id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
900 |
synthesize
|
import re
from chevron import render as render_mustache
from .base import ResponseMicroService
from ..util import get_dict_defaults
class MustachAttrValue(object):
def __init__(self, attr_name, values):
self._attr_name = attr_name
self._values = values
if any(['@' in v for v in values]):
local_parts = []
domain_parts = []
scopes = dict()
for v in values:
(local_part, sep, domain_part) = v.partition('@')
# probably not needed now...
local_parts.append(local_part)
domain_parts.append(domain_part)
scopes[domain_part] = True
self._scopes = list(scopes.keys())
else:
self._scopes = None
def __str__(self):
return ";".join(self._values)
@property
def values(self):
return [{self._attr_name: v} for v in self._values]
@property
def value(self):
if len(self._values) == 1:
return self._values[0]
else:
return self._values
@property
def first(self):
if len(self._values) > 0:
return self._values[0]
else:
return ""
@property
def scope(self):
if self._scopes is not None:
return self._scopes[0]
return ""
class AddSyntheticAttributes(ResponseMicroService):
"""
A class that add generated or synthetic attributes to a response set. Attribute
generation is done using mustach (http://mustache.github.io) templates. The
following example configuration illustrates most common features:
```yaml
module: satosa.micro_services.attribute_generation.AddSyntheticAttributes
name: AddSyntheticAttributes
config:
synthetic_attributes:
requester1:
target_provider1:
eduPersonAffiliation: member;employee
default:
default:
schacHomeOrganization: {{eduPersonPrincipalName.scope}}
schacHomeOrganizationType: tomfoolery provider
```
The use of "" and 'default' is synonymous. Attribute rules are not
overloaded or inherited. For instance a response for "requester1"
from target_provider1 in the above config will generate a (static) attribute
set of 'member' and 'employee' for the eduPersonAffiliation attribute
and nothing else. Note that synthetic attributes override existing
attributes if present.
*Evaluating and interpreting templates*
Attribute values are split on combinations of ';' and newline so that
a template resulting in the following text:
```
a;
b;c
```
results in three attribute values: 'a','b' and 'c'. Templates are
evaluated with a single context that represents the response attributes
before the microservice is processed. De-referencing the attribute
name as in '{{name}}' results in a ';'-separated list of all attribute
values. This notation is useful when you know there is only a single
attribute value in the set.
*Special contexts*
For treating the values as a list - eg for interating using mustach,
use the .values sub-context For instance to synthesize all first-last
name combinations do this:
```
{{#givenName.values}}
{{#sn.values}}{{givenName}} {{sn}}{{/sn.values}}
{{/givenName.values}}
```
Note that the .values sub-context behaves as if it is an iterator
over single-value context with the same key name as the original
attribute name.
The .scope sub-context evalues to the right-hand part of any @
sign. This is assumed to be single valued.
The .first sub-context evalues to the first value of a context
which may be safer to use if the attribute is multivalued but
you don't care which value is used in a template.
"""
def __init__(self, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.synthetic_attributes = config["synthetic_attributes"]
def METHOD_NAME(self, attributes, requester, provider):
syn_attributes = dict()
context = dict()
for attr_name, values in attributes.items():
context[attr_name] = MustachAttrValue(
attr_name,
values
if values
and isinstance(values, list)
and all(isinstance(value, str) for value in values)
else [],
)
recipes = get_dict_defaults(self.synthetic_attributes, requester, provider)
for attr_name, fmt in recipes.items():
syn_attributes[attr_name] = [
value
for token in re.split("[;\n]+", render_mustache(fmt, context))
for value in [token.strip().strip(';')]
if value
]
return syn_attributes
def process(self, context, data):
data.attributes.update(self.METHOD_NAME(data.attributes, data.requester, data.auth_info.issuer))
return super().process(context, data)
|
901 |
test synchronize telemetry
|
"""Telemetry Worker tests."""
import unittest.mock as mock
import json
from splitio.sync.telemetry import TelemetrySynchronizer, InMemoryTelemetrySubmitter
from splitio.engine.telemetry import TelemetryEvaluationConsumer, TelemetryInitConsumer, TelemetryRuntimeConsumer, TelemetryStorageConsumer
from splitio.storage.inmemmory import InMemoryTelemetryStorage, InMemorySegmentStorage, InMemorySplitStorage
from splitio.models.splits import Split, Status
from splitio.models.segments import Segment
from splitio.models.telemetry import StreamingEvents
from splitio.api.telemetry import TelemetryAPI
class TelemetrySynchronizerTests(object):
"""Telemetry synchronizer test cases."""
@mock.patch('splitio.sync.telemetry.InMemoryTelemetrySubmitter.synchronize_config')
def test_synchronize_config(self, mocker):
telemetry_synchronizer = TelemetrySynchronizer(InMemoryTelemetrySubmitter(mocker.Mock(), mocker.Mock(), mocker.Mock(), mocker.Mock()))
telemetry_synchronizer.synchronize_config()
assert(mocker.called)
@mock.patch('splitio.sync.telemetry.InMemoryTelemetrySubmitter.synchronize_stats')
def test_synchronize_stats(self, mocker):
telemetry_synchronizer = TelemetrySynchronizer(InMemoryTelemetrySubmitter(mocker.Mock(), mocker.Mock(), mocker.Mock(), mocker.Mock()))
telemetry_synchronizer.synchronize_stats()
assert(mocker.called)
class TelemetrySubmitterTests(object):
"""Telemetry submitter test cases."""
def METHOD_NAME(self, mocker):
api = mocker.Mock(spec=TelemetryAPI)
telemetry_storage = InMemoryTelemetryStorage()
telemetry_consumer = TelemetryStorageConsumer(telemetry_storage)
split_storage = InMemorySplitStorage()
split_storage.put(Split('split1', 1234, 1, False, 'user', Status.ACTIVE, 123))
segment_storage = InMemorySegmentStorage()
segment_storage.put(Segment('segment1', [], 123))
telemetry_submitter = InMemoryTelemetrySubmitter(telemetry_consumer, split_storage, segment_storage, api)
telemetry_storage._counters._impressions_queued = 100
telemetry_storage._counters._impressions_deduped = 30
telemetry_storage._counters._impressions_dropped = 0
telemetry_storage._counters._events_queued = 20
telemetry_storage._counters._events_dropped = 10
telemetry_storage._counters._auth_rejections = 1
telemetry_storage._counters._token_refreshes = 3
telemetry_storage._counters._session_length = 3
telemetry_storage._counters._update_from_sse['sp'] = 3
telemetry_storage._method_exceptions._treatment = 10
telemetry_storage._method_exceptions._treatments = 1
telemetry_storage._method_exceptions._treatment_with_config = 5
telemetry_storage._method_exceptions._treatments_with_config = 1
telemetry_storage._method_exceptions._track = 3
telemetry_storage._last_synchronization._split = 5
telemetry_storage._last_synchronization._segment = 3
telemetry_storage._last_synchronization._impression = 10
telemetry_storage._last_synchronization._impression_count = 0
telemetry_storage._last_synchronization._event = 4
telemetry_storage._last_synchronization._telemetry = 0
telemetry_storage._last_synchronization._token = 3
telemetry_storage._http_sync_errors._split = {'500': 3, '501': 2}
telemetry_storage._http_sync_errors._segment = {'401': 1}
telemetry_storage._http_sync_errors._impression = {'500': 1}
telemetry_storage._http_sync_errors._impression_count = {'401': 5}
telemetry_storage._http_sync_errors._event = {'404': 10}
telemetry_storage._http_sync_errors._telemetry = {'501': 3}
telemetry_storage._http_sync_errors._token = {'505': 11}
telemetry_storage._streaming_events = StreamingEvents()
telemetry_storage._tags = ['tag1']
telemetry_storage._method_latencies._treatment = [1] + [0] * 22
telemetry_storage._method_latencies._treatments = [0] * 23
telemetry_storage._method_latencies._treatment_with_config = [0] * 23
telemetry_storage._method_latencies._treatments_with_config = [0] * 23
telemetry_storage._method_latencies._track = [0] * 23
telemetry_storage._http_latencies._split = [1] + [0] * 22
telemetry_storage._http_latencies._segment = [0] * 23
telemetry_storage._http_latencies._impression = [0] * 23
telemetry_storage._http_latencies._impression_count = [0] * 23
telemetry_storage._http_latencies._event = [0] * 23
telemetry_storage._http_latencies._telemetry = [0] * 23
telemetry_storage._http_latencies._token = [0] * 23
telemetry_storage.record_config({'operationMode': 'inmemory',
'storageType': None,
'streamingEnabled': True,
'impressionsQueueSize': 100,
'eventsQueueSize': 200,
'impressionsMode': 'DEBUG',
'impressionListener': None,
'featuresRefreshRate': 30,
'segmentsRefreshRate': 30,
'impressionsRefreshRate': 60,
'eventsPushRate': 60,
'metricsRefreshRate': 10,
'activeFactoryCount': 1,
'notReady': 0,
'timeUntilReady': 1
}, {}
)
self.formatted_config = ""
def record_init(*args, **kwargs):
self.formatted_config = args[0]
api.record_init.side_effect = record_init
telemetry_submitter.synchronize_config()
assert(self.formatted_config == telemetry_submitter._telemetry_init_consumer.get_config_stats())
def record_stats(*args, **kwargs):
self.formatted_stats = args[0]
api.record_stats.side_effect = record_stats
telemetry_submitter.synchronize_stats()
assert(self.formatted_stats == {
"iQ": 100,
"iDe": 30,
"iDr": 0,
"eQ": 20,
"eD": 10,
"lS": {"sp": 5, "se": 3, "im": 10, "ic": 0, "ev": 4, "te": 0, "to": 3},
"t": ["tag1"],
"hE": {"sp": {"500": 3, "501": 2}, "se": {"401": 1}, "im": {"500": 1}, "ic": {"401": 5}, "ev": {"404": 10}, "te": {"501": 3}, "to": {"505": 11}},
"hL": {"sp": [1] + [0] * 22, "se": [0] * 23, "im": [0] * 23, "ic": [0] * 23, "ev": [0] * 23, "te": [0] * 23, "to": [0] * 23},
"aR": 1,
"tR": 3,
"sE": [],
"sL": 3,
"mE": {"t": 10, "ts": 1, "tc": 5, "tcs": 1, "tr": 3},
"mL": {"t": [1] + [0] * 22, "ts": [0] * 23, "tc": [0] * 23, "tcs": [0] * 23, "tr": [0] * 23},
"spC": 1,
"seC": 1,
"skC": 0,
"ufs": {"sp": 3},
"t": ['tag1']
})
|
902 |
test in sequence assignment response
|
from datetime import datetime
from django.test.client import RequestFactory
from django.test.testcases import TestCase
from mediathread.djangosherd.api import DiscussionIndexResource, \
SherdNoteResource
from mediathread.djangosherd.models import DiscussionIndex
from mediathread.factories import MediathreadTestMixin, AssetFactory, \
SherdNoteFactory, ProjectNoteFactory
from mediathread.projects.tests.factories import ProjectSequenceAssetFactory
from mediathread.sequence.tests.factories import SequenceAssetFactory, \
SequenceMediaElementFactory
from mediathread.taxonomy.models import Term
class DiscussionIndexResourcesTest(MediathreadTestMixin, TestCase):
def test_render(self):
self.setup_sample_course()
self.create_discussion(self.sample_course, self.instructor_one)
indicies = DiscussionIndex.objects.all()
request = RequestFactory().get('/')
request.course = self.sample_course
request.user = self.instructor_one
ctx = DiscussionIndexResource().render_list(request, indicies)
self.assertTrue('references' in ctx)
self.assertEquals(ctx['references'][0]['title'],
'Sample Course Discussion')
self.assertEquals(ctx['references'][0]['type'], 'discussion')
class SherdNoteResourceTest(MediathreadTestMixin, TestCase):
def setUp(self):
self.setup_sample_course()
asset = AssetFactory(author=self.student_one, primary_source='image',
course=self.sample_course)
self.note1 = SherdNoteFactory(
asset=asset, author=self.student_one,
title='one', range1=116.25, range2=6.75)
self.note2 = SherdNoteFactory(
asset=asset, author=self.student_one,
title='two', range1=116.25, range2=6.75)
def test_in_selection_assignment_response(self):
res = SherdNoteResource()
self.assertFalse(res.in_selection_assignment_response(self.note1))
pn = ProjectNoteFactory(annotation=self.note1)
pn.project.date_submitted = datetime.today()
pn.project.save()
self.assertTrue(res.in_selection_assignment_response(self.note1))
request = RequestFactory().get('/?citable=true')
request.user = self.student_one
request.course = self.sample_course
bundle = SherdNoteResource().build_bundle(
obj=self.note1, request=request)
res.dehydrate(bundle)
self.assertEquals(bundle.data['editable'], False)
self.assertEquals(bundle.data['citable'], True)
def METHOD_NAME(self):
res = SherdNoteResource()
self.assertFalse(res.in_sequence_assignment_response(self.note1))
self.assertFalse(res.in_sequence_assignment_response(self.note2))
sa = SequenceAssetFactory(spine=self.note1)
psa = ProjectSequenceAssetFactory(sequence_asset=sa)
self.assertFalse(res.in_sequence_assignment_response(self.note1))
psa.project.date_submitted = datetime.today()
psa.project.save()
self.assertTrue(res.in_sequence_assignment_response(self.note1))
SequenceMediaElementFactory(sequence_asset=sa, media=self.note2)
self.assertTrue(res.in_sequence_assignment_response(self.note2))
request = RequestFactory().get('/?citable=true')
request.user = self.student_one
request.course = self.sample_course
bundle = SherdNoteResource().build_bundle(
obj=self.note1, request=request)
res.dehydrate(bundle)
self.assertEquals(bundle.data['editable'], False)
self.assertEquals(bundle.data['citable'], True)
def test_render_related_terms(self):
taxonomy = {
'Shapes': ['Square', 'Triangle'],
'Colors': ['Red', 'Blue', 'Green']
}
self.create_vocabularies(self.sample_course, taxonomy)
term = Term.objects.get(name='square')
self.create_term_relationship(self.note1, term)
term = Term.objects.get(name='triangle')
self.create_term_relationship(self.note1, term)
term = Term.objects.get(name='red')
self.create_term_relationship(self.note1, term)
res = SherdNoteResource()
request = RequestFactory().get('')
bundle = SherdNoteResource().build_bundle(
obj=self.note1, request=request)
values = res.render_related_terms(bundle)
self.assertEquals(len(values), 2)
self.assertEquals(values[0]['terms'][0]['display_name'], 'Square')
self.assertEquals(values[0]['terms'][1]['display_name'], 'Triangle')
self.assertEquals(values[1]['terms'][0]['display_name'], 'Red')
def test_dehydrate(self):
res = SherdNoteResource()
request = RequestFactory().get('/?citable=true')
request.user = self.student_one
bundle = SherdNoteResource().build_bundle(
obj=self.note1, request=request)
res.dehydrate(bundle)
self.assertEquals(bundle.data['vocabulary'], [])
self.assertEquals(bundle.data['is_null'], False)
self.assertEquals(bundle.data['editable'], True)
self.assertEquals(bundle.data['is_global_annotation'], False)
self.assertEquals(bundle.data['citable'], True)
|
903 |
pkg commit hash
|
# encoding: utf-8
"""
Utilities for getting information about IPython and the system it's running in.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import platform
import pprint
import sys
import subprocess
from IPython.core import release
from IPython.utils import _sysinfo, encoding
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def METHOD_NAME(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD'.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip().decode('ascii')
return '(none found)', '<not found>'
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = METHOD_NAME(pkg_path)
return dict(
ipython_version=release.version,
ipython_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
def get_sys_info():
"""Return useful information about IPython and the system, as a dict."""
p = os.path
path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
return pkg_info(path)
def sys_info():
"""Return useful information about IPython and the system, as a string.
Examples
--------
::
In [2]: print(sys_info())
{'commit_hash': '144fdae', # random
'commit_source': 'repository',
'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
'ipython_version': '0.11.dev',
'os_name': 'posix',
'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
'sys_executable': '/usr/bin/python',
'sys_platform': 'linux2',
'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
"""
return pprint.pformat(get_sys_info())
def num_cpus():
"""DEPRECATED
Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
import warnings
warnings.warn(
"`num_cpus` is deprecated since IPython 8.0. Use `os.cpu_count` instead.",
DeprecationWarning,
stacklevel=2,
)
return os.cpu_count() or 1
|
904 |
verify checklist
|
import pytest
import six
import ckan.tests.factories as factories
import ckan.model as model
import ckan.plugins.toolkit as tk
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
from ckanext.hdx_org_group.helpers.static_lists import ORGANIZATION_TYPE_LIST
config = tk.config
NotAuthorized = tk.NotAuthorized
class TestQAChecklist(hdx_test_base.HdxBaseTest):
NORMAL_USER = 'qa_checklist_completed_user'
SYSADMIN_USER = 'testsysadmin'
PACKAGE_ID = 'test_dataset_4_qa_checklist'
PACKAGE_ID2 = 'test_dataset_4_qa_checklist_2'
CHECKLIST_DICT = {
"version": 1,
"metadata": {"m13": True, "m12": True, "m15": True},
"resources": [
{"r5": True, "r6": True, "r7": True}
],
"dataProtection": {"dp5": True, "dp4": True, "dp3": True}
}
RESOURCE_ID = None
RESOURCE_ID2 = None
@classmethod
def _get_action(cls, action_name):
return tk.get_action(action_name)
@classmethod
def setup_class(cls):
super(TestQAChecklist, cls).setup_class()
factories.User(name=cls.NORMAL_USER, email='[email protected]')
factories.Organization(
name='org_name_4_qa_checklist',
title='ORG NAME FOR QA CHECKLIST',
users=[
{'name': cls.NORMAL_USER, 'capacity': 'admin'},
],
hdx_org_type=ORGANIZATION_TYPE_LIST[0][1],
org_url='https://hdx.hdxtest.org/'
)
dataset_dict = cls.create_packages_by_user(cls.PACKAGE_ID, cls.NORMAL_USER, True)
cls.RESOURCE_ID = dataset_dict['resources'][0]['id']
dataset_dict2 = cls.create_packages_by_user(cls.PACKAGE_ID2, cls.NORMAL_USER, False)
cls.RESOURCE_ID2 = dataset_dict2['resources'][0]['id']
@classmethod
def create_packages_by_user(cls, pkg_id, user, qa_checklist):
package = {
"package_creator": "test function",
"private": False,
"dataset_date": "[1960-01-01 TO 2012-12-31]",
"caveats": "These are the caveats",
"license_other": "TEST OTHER LICENSE",
"methodology": "This is a test methodology",
"dataset_source": "Test data",
"license_id": "hdx-other",
"name": pkg_id,
"notes": "This is a test dataset",
"title": "Test Dataset for QA Checklist " + pkg_id,
"owner_org": "org_name_4_qa_checklist",
"groups": [{"name": "roger"}],
"resources": [
{
'package_id': pkg_id,
'url': config.get('ckan.site_url', '') + '/storage/f/test_folder/hdx_test.csv',
'resource_type': 'file.upload',
'format': 'CSV',
'name': 'hdx_test.csv'
},
{
'package_id': pkg_id,
'url': config.get('ckan.site_url', '') + '/storage/f/test_folder/hdx_test.csv',
'resource_type': 'file.upload',
'format': 'CSV',
'name': 'hdx_test2.csv'
}
]
}
if qa_checklist:
package['qa_checklist'] = '{"test": "test"}'
context = {'model': model, 'session': model.Session, 'user': user}
return cls._get_action('package_create')(context, package)
def test_qa_checklist_not_on_dataset_creation(self):
'''
Tests that qa_checklist cannot be set on dataset creation / package_create()
'''
package_dict = self._get_action('package_show')({}, {'id': self.PACKAGE_ID})
assert "qa_checklist" not in package_dict
for resource_dict in package_dict.get('resources'):
assert "qa_checklist" not in resource_dict
assert "qa_checklist_num" not in resource_dict
def test_qa_checklist_not_reset_after_update(self):
'''
Tests that qa_checklist doesn't get reset by a normal dataset_update
'''
self._qa_checklist_update(self.PACKAGE_ID, self.RESOURCE_ID, self.SYSADMIN_USER)
self.METHOD_NAME(self.PACKAGE_ID)
self._change_description_of_package(self.PACKAGE_ID, self.NORMAL_USER)
self.METHOD_NAME(self.PACKAGE_ID)
self._change_description_of_package(self.PACKAGE_ID, self.SYSADMIN_USER,
new_description='modified by sysadmin for qa checklist')
self.METHOD_NAME(self.PACKAGE_ID)
def test_qa_checklist_reset_on_empty_checklist_push(self):
'''
Tests that qa_checklist gets reset when pushing empty checklist data
'''
self._qa_checklist_update(self.PACKAGE_ID, self.RESOURCE_ID, self.SYSADMIN_USER)
self.METHOD_NAME(self.PACKAGE_ID)
empty_checklist = {
"version": 1,
"resources": [
{}
],
"dataProtection": {},
"metadata": {}
}
self._qa_checklist_update(self.PACKAGE_ID, self.RESOURCE_ID, self.SYSADMIN_USER, data_dict=empty_checklist)
package_dict = self._get_action('package_show')({}, {'id': self.PACKAGE_ID})
assert "qa_checklist" not in package_dict
for resource_dict in package_dict.get('resources'):
assert "qa_checklist" not in resource_dict
assert "qa_checklist_num" not in resource_dict
def test_resource_qa_checklist_on_resource_delete(self):
'''
Tests that qa_checklist at the resource level gets deleted when the resource is deleted
'''
self._qa_checklist_update(self.PACKAGE_ID2, self.RESOURCE_ID2, self.SYSADMIN_USER)
self.METHOD_NAME(self.PACKAGE_ID2)
context = {'model': model, 'session': model.Session, 'user': self.NORMAL_USER}
self._get_action('resource_delete')(context, {'id': self.RESOURCE_ID2})
checklist = self._get_action('hdx_package_qa_checklist_show')({}, {'id': self.PACKAGE_ID2})
assert checklist.get('resources') != self.CHECKLIST_DICT.get('resources')
assert checklist.get('dataProtection') == self.CHECKLIST_DICT.get('dataProtection')
assert checklist.get('metadata') == self.CHECKLIST_DICT.get('metadata')
def METHOD_NAME(self, package_id):
checklist = self._get_action('hdx_package_qa_checklist_show')({}, {'id': package_id})
for resource_info in checklist.get('resources'):
resource_info.pop('id', None)
assert checklist.get('resources') == self.CHECKLIST_DICT.get('resources')
assert checklist.get('dataProtection') == self.CHECKLIST_DICT.get('dataProtection')
assert checklist.get('metadata') == self.CHECKLIST_DICT.get('metadata')
def _qa_checklist_update(self, package_id, resource_id, user, data_dict=None):
context = {'model': model, 'session': model.Session, 'user': user}
d = {
"id": package_id,
}
if data_dict is None:
d.update(self.CHECKLIST_DICT)
else:
d.update(data_dict)
d['resources'][0]['id'] = resource_id
return self._get_action('hdx_package_qa_checklist_update')(context, d)
def _change_description_of_package(self, package_id, user, new_description='modified for qa checklist'):
context = {'model': model, 'session': model.Session, 'user': user}
return self._get_action('package_patch')(context, {'id': package_id, 'notes': new_description})
|
905 |
test localtime epoch utc daylight false
|
import datetime
from email import utils
import test.support
import time
import unittest
import sys
import os.path
class DateTimeTests(unittest.TestCase):
datestring = 'Sun, 23 Sep 2001 20:10:55'
dateargs = (2001, 9, 23, 20, 10, 55)
offsetstring = ' -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
naive_dt = datetime.datetime(*dateargs)
aware_dt = datetime.datetime(*dateargs, tzinfo=tz)
def test_naive_datetime(self):
self.assertEqual(utils.format_datetime(self.naive_dt),
self.datestring + ' -0000')
def test_aware_datetime(self):
self.assertEqual(utils.format_datetime(self.aware_dt),
self.datestring + self.offsetstring)
def test_usegmt(self):
utc_dt = datetime.datetime(*self.dateargs,
tzinfo=datetime.timezone.utc)
self.assertEqual(utils.format_datetime(utc_dt, usegmt=True),
self.datestring + ' GMT')
def test_usegmt_with_naive_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.naive_dt, usegmt=True)
def test_usegmt_with_non_utc_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.aware_dt, usegmt=True)
def test_parsedate_to_datetime(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + self.offsetstring),
self.aware_dt)
def test_parsedate_to_datetime_naive(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + ' -0000'),
self.naive_dt)
class LocaltimeTests(unittest.TestCase):
def test_localtime_is_tz_aware_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t = utils.localtime()
self.assertIsNotNone(t.tzinfo)
def test_localtime_is_tz_aware_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t = utils.localtime()
self.assertIsNotNone(t.tzinfo)
def test_localtime_daylight_true_dst_false(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_true_dst_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_true(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def METHOD_NAME(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
# XXX: Need a more robust test for Olson's tzdata
@unittest.skipIf(sys.platform.startswith('win'),
"Windows does not use Olson's TZ database")
@unittest.skipUnless(os.path.exists('/usr/share/zoneinfo') or
os.path.exists('/usr/lib/zoneinfo'),
"Can't find the Olson's TZ database")
@test.support.run_with_tz('Europe/Kiev')
def test_variable_tzname(self):
t0 = datetime.datetime(1984, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'MSK')
t0 = datetime.datetime(1994, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'EET')
# Issue #24836: The timezone files are out of date (pre 2011k)
# on Mac OS X Snow Leopard.
@test.support.requires_mac_ver(10, 7)
class FormatDateTests(unittest.TestCase):
@test.support.run_with_tz('Europe/Minsk')
def test_formatdate(self):
timeval = time.mktime((2011, 12, 1, 18, 0, 0, 4, 335, 0))
string = utils.formatdate(timeval, localtime=False, usegmt=False)
self.assertEqual(string, 'Thu, 01 Dec 2011 15:00:00 -0000')
string = utils.formatdate(timeval, localtime=False, usegmt=True)
self.assertEqual(string, 'Thu, 01 Dec 2011 15:00:00 GMT')
@test.support.run_with_tz('Europe/Minsk')
def test_formatdate_with_localtime(self):
timeval = time.mktime((2011, 1, 1, 18, 0, 0, 6, 1, 0))
string = utils.formatdate(timeval, localtime=True)
self.assertEqual(string, 'Sat, 01 Jan 2011 18:00:00 +0200')
# Minsk moved from +0200 (with DST) to +0300 (without DST) in 2011
timeval = time.mktime((2011, 12, 1, 18, 0, 0, 4, 335, 0))
string = utils.formatdate(timeval, localtime=True)
self.assertEqual(string, 'Thu, 01 Dec 2011 18:00:00 +0300')
if __name__ == '__main__':
unittest.main()
|
906 |
wget output path
|
__package__ = 'archivebox.extractors'
import re
from pathlib import Path
from typing import Optional
from datetime import datetime, timezone
from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
from ..system import run, chmod_file
from ..util import (
enforce_types,
without_fragment,
without_query,
path,
domain,
urldecode,
)
from ..config import (
WGET_ARGS,
TIMEOUT,
SAVE_WGET,
SAVE_WARC,
WGET_BINARY,
WGET_VERSION,
RESTRICT_FILE_NAMES,
CHECK_SSL_VALIDITY,
SAVE_WGET_REQUISITES,
WGET_AUTO_COMPRESSION,
WGET_USER_AGENT,
COOKIES_FILE,
)
from ..logging_util import TimedProgress
@enforce_types
def should_save_wget(link: Link, out_dir: Optional[Path]=None, overwrite: Optional[bool]=False) -> bool:
output_path = METHOD_NAME(link)
out_dir = out_dir or Path(link.link_dir)
if not overwrite and output_path and (out_dir / output_path).exists():
return False
return SAVE_WGET
@enforce_types
def save_wget(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
"""download full site using wget"""
out_dir = out_dir or link.link_dir
if SAVE_WARC:
warc_dir = out_dir / "warc"
warc_dir.mkdir(exist_ok=True)
warc_path = warc_dir / str(int(datetime.now(timezone.utc).timestamp()))
# WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
output: ArchiveOutput = None
cmd = [
WGET_BINARY,
# '--server-response', # print headers for better error parsing
*WGET_ARGS,
'--timeout={}'.format(timeout),
*(['--restrict-file-names={}'.format(RESTRICT_FILE_NAMES)] if RESTRICT_FILE_NAMES else []),
*(['--warc-file={}'.format(str(warc_path))] if SAVE_WARC else []),
*(['--page-requisites'] if SAVE_WGET_REQUISITES else []),
*(['--user-agent={}'.format(WGET_USER_AGENT)] if WGET_USER_AGENT else []),
*(['--load-cookies', str(COOKIES_FILE)] if COOKIES_FILE else []),
*(['--compression=auto'] if WGET_AUTO_COMPRESSION else []),
*([] if SAVE_WARC else ['--timestamping']),
*([] if CHECK_SSL_VALIDITY else ['--no-check-certificate', '--no-hsts']),
link.url,
]
status = 'succeeded'
timer = TimedProgress(timeout, prefix=' ')
try:
result = run(cmd, cwd=str(out_dir), timeout=timeout)
output = METHOD_NAME(link)
# parse out number of files downloaded from last line of stderr:
# "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
output_tail = [
line.strip()
for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
if line.strip()
]
files_downloaded = (
int(output_tail[-1].strip().split(' ', 2)[1] or 0)
if 'Downloaded:' in output_tail[-1]
else 0
)
hints = (
'Got wget response code: {}.'.format(result.returncode),
*output_tail,
)
# Check for common failure cases
if (result.returncode > 0 and files_downloaded < 1) or output is None:
if b'403: Forbidden' in result.stderr:
raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)
if b'404: Not Found' in result.stderr:
raise ArchiveError('404 Not Found', hints)
if b'ERROR 500: Internal Server Error' in result.stderr:
raise ArchiveError('500 Internal Server Error', hints)
raise ArchiveError('Wget failed or got an error from the server', hints)
if (out_dir / output).exists():
chmod_file(output, cwd=str(out_dir))
else:
print(f' {out_dir}/{output}')
raise ArchiveError('Failed to find wget output after running', hints)
except Exception as err:
status = 'failed'
output = err
finally:
timer.end()
return ArchiveResult(
cmd=cmd,
pwd=str(out_dir),
cmd_version=WGET_VERSION,
output=output,
status=status,
**timer.stats,
)
@enforce_types
def METHOD_NAME(link: Link) -> Optional[str]:
"""calculate the path to the wgetted .html file, since wget may
adjust some paths to be different than the base_url path.
See docs on wget --adjust-extension (-E)
"""
# Wget downloads can save in a number of different ways depending on the url:
# https://example.com
# > example.com/index.html
# https://example.com?v=zzVa_tX1OiI
# > example.com/index.html?v=zzVa_tX1OiI.html
# https://www.example.com/?v=zzVa_tX1OiI
# > example.com/index.html?v=zzVa_tX1OiI.html
# https://example.com/abc
# > example.com/abc.html
# https://example.com/abc/
# > example.com/abc/index.html
# https://example.com/abc?v=zzVa_tX1OiI.html
# > example.com/abc?v=zzVa_tX1OiI.html
# https://example.com/abc/?v=zzVa_tX1OiI.html
# > example.com/abc/index.html?v=zzVa_tX1OiI.html
# https://example.com/abc/test.html
# > example.com/abc/test.html
# https://example.com/abc/test?v=zzVa_tX1OiI
# > example.com/abc/test?v=zzVa_tX1OiI.html
# https://example.com/abc/test/?v=zzVa_tX1OiI
# > example.com/abc/test/index.html?v=zzVa_tX1OiI.html
# There's also lots of complexity around how the urlencoding and renaming
# is done for pages with query and hash fragments or extensions like shtml / htm / php / etc
# Since the wget algorithm for -E (appending .html) is incredibly complex
# and there's no way to get the computed output path from wget
# in order to avoid having to reverse-engineer how they calculate it,
# we just look in the output folder read the filename wget used from the filesystem
full_path = without_fragment(without_query(path(link.url))).strip('/')
search_dir = Path(link.link_dir) / domain(link.url).replace(":", "+") / urldecode(full_path)
for _ in range(4):
if search_dir.exists():
if search_dir.is_dir():
html_files = [
f for f in search_dir.iterdir()
if re.search(".+\\.[Ss]?[Hh][Tt][Mm][Ll]?$", str(f), re.I | re.M)
]
if html_files:
return str(html_files[0].relative_to(link.link_dir))
# sometimes wget'd URLs have no ext and return non-html
# e.g. /some/example/rss/all -> some RSS XML content)
# /some/other/url.o4g -> some binary unrecognized ext)
# test this with archivebox add --depth=1 https://getpocket.com/users/nikisweeting/feed/all
last_part_of_url = urldecode(full_path.rsplit('/', 1)[-1])
for file_present in search_dir.iterdir():
if file_present == last_part_of_url:
return str((search_dir / file_present).relative_to(link.link_dir))
# Move up one directory level
search_dir = search_dir.parent
if str(search_dir) == link.link_dir:
break
# check for literally any file present that isnt an empty folder
domain_dir = Path(domain(link.url).replace(":", "+"))
files_within = list((Path(link.link_dir) / domain_dir).glob('**/*.*'))
if files_within:
return str((domain_dir / files_within[-1]).relative_to(link.link_dir))
# fallback to just the domain dir
search_dir = Path(link.link_dir) / domain(link.url).replace(":", "+")
if search_dir.is_dir():
return domain(link.url).replace(":", "+")
return None
|
907 |
run command
|
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from typing import Any, List, NamedTuple, Optional
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def METHOD_NAME(
args: List[str],
*,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
remaining_retries = retries
while True:
try:
return METHOD_NAME(args, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
binary: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
proc = run_command(
[binary, filename],
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"clang-format timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="See https://clang.llvm.org/docs/ClangFormat.html.\nRun `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with clang-format.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-format binary path",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out clang-format",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for clang-format",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
binary = os.path.normpath(args.binary) if IS_WINDOWS else args.binary
if not Path(binary).exists():
lint_message = LintMessage(
path=None,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="init-error",
original=None,
replacement=None,
description=(
f"Could not find clang-format binary at {binary}, "
"did you forget to run `lintrunner init`?"
),
)
print(json.dumps(lint_message._asdict()), flush=True)
sys.exit(0)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, binary, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
908 |
test 1 1
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from io import StringIO
from twisted.python import text
from twisted.trial import unittest
sampleText = """Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self) -> None:
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self) -> None:
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self) -> None:
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self) -> None:
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail(
"%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output), self.lineWidth, failures)
)
def test_doubleNewline(self) -> None:
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self) -> None:
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self) -> None:
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = "newline\n"
m = text.endsInNewline(s)
self.assertTrue(m)
s = "oldline"
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self) -> None:
"""
Tuple elements are displayed on separate lines.
"""
s = ("a", "b")
m = text.stringyString(s)
self.assertEqual(m, "(a,\n b,)\n")
def test_dict(self) -> None:
"""
Dicts elements are displayed using C{str()}.
"""
s = {"a": 0}
m = text.stringyString(s)
self.assertEqual(m, "{a: 0}")
def test_list(self) -> None:
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ["a", "b"]
m = text.stringyString(s)
self.assertEqual(m, "[a,\n b,]\n")
class SplitTests(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self) -> None:
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(["This", "code", "works."], r)
def test_multiWord(self) -> None:
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(["The", "hairy monkey", "likes", "pie."], r)
# Some of the many tests that would fail:
# def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
# def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTests(unittest.TestCase):
def setUp(self) -> None:
self.io = StringIO("this is a test string")
def tearDown(self) -> None:
pass
def test_1_f(self) -> None:
self.assertFalse(text.strFile("x", self.io))
def METHOD_NAME(self) -> None:
self.assertTrue(text.strFile("t", self.io))
def test_1_2(self) -> None:
self.assertTrue(text.strFile("h", self.io))
def test_1_3(self) -> None:
self.assertTrue(text.strFile("i", self.io))
def test_1_4(self) -> None:
self.assertTrue(text.strFile("s", self.io))
def test_1_5(self) -> None:
self.assertTrue(text.strFile("n", self.io))
def test_1_6(self) -> None:
self.assertTrue(text.strFile("g", self.io))
def test_3_1(self) -> None:
self.assertTrue(text.strFile("thi", self.io))
def test_3_2(self) -> None:
self.assertTrue(text.strFile("his", self.io))
def test_3_3(self) -> None:
self.assertTrue(text.strFile("is ", self.io))
def test_3_4(self) -> None:
self.assertTrue(text.strFile("ing", self.io))
def test_3_f(self) -> None:
self.assertFalse(text.strFile("bla", self.io))
def test_large_1(self) -> None:
self.assertTrue(text.strFile("this is a test", self.io))
def test_large_2(self) -> None:
self.assertTrue(text.strFile("is a test string", self.io))
def test_large_f(self) -> None:
self.assertFalse(text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self) -> None:
self.assertFalse(
text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io)
)
def test_self(self) -> None:
self.assertTrue(text.strFile("this is a test string", self.io))
def test_insensitive(self) -> None:
self.assertTrue(text.strFile("ThIs is A test STRING", self.io, False))
|
909 |
eval observables
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Evaluator of auxiliary operators for algorithms."""
from __future__ import annotations
import numpy as np
from qiskit import QuantumCircuit
from qiskit.opflow import (
CircuitSampler,
ListOp,
StateFn,
OperatorBase,
ExpectationBase,
)
from qiskit.providers import Backend
from qiskit.quantum_info import Statevector
from qiskit.utils import QuantumInstance
from qiskit.utils.deprecation import deprecate_func
from .list_or_dict import ListOrDict
@deprecate_func(
additional_msg=(
"Instead, use the function "
"``qiskit.algorithms.observables_evaluator.estimate_observables``. See "
"https://qisk.it/algo_migration for a migration guide."
),
since="0.24.0",
)
def METHOD_NAME(
quantum_instance: QuantumInstance | Backend,
quantum_state: Statevector | QuantumCircuit | OperatorBase,
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
threshold: float = 1e-12,
) -> ListOrDict[tuple[complex, complex]]:
"""
Deprecated: Accepts a list or a dictionary of operators and calculates
their expectation values - means
and standard deviations. They are calculated with respect to a quantum state provided. A user
can optionally provide a threshold value which filters mean values falling below the threshold.
This function has been superseded by the
:func:`qiskit.algorithms.observables_evaluator.eval_observables` function.
It will be deprecated in a future release and subsequently
removed after that.
Args:
quantum_instance: A quantum instance used for calculations.
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
threshold: A threshold value that defines which mean values should be neglected (helpful for
ignoring numerical instabilities close to 0).
Returns:
A list or a dictionary of tuples (mean, standard deviation).
Raises:
ValueError: If a ``quantum_state`` with free parameters is provided.
"""
if (
isinstance(
quantum_state, (QuantumCircuit, OperatorBase)
) # Statevector cannot be parametrized
and len(quantum_state.parameters) > 0
):
raise ValueError(
"A parametrized representation of a quantum_state was provided. It is not "
"allowed - it cannot have free parameters."
)
# Create new CircuitSampler to avoid breaking existing one's caches.
sampler = CircuitSampler(quantum_instance)
list_op = _prepare_list_op(quantum_state, observables)
observables_expect = expectation.convert(list_op)
observables_expect_sampled = sampler.convert(observables_expect)
# compute means
values = np.real(observables_expect_sampled.eval())
# compute standard deviations
# We use sampler.quantum_instance to take care of case in which quantum_instance is Backend
std_devs = _compute_std_devs(
observables_expect_sampled, observables, expectation, sampler.quantum_instance
)
# Discard values below threshold
observables_means = values * (np.abs(values) > threshold)
# zip means and standard deviations into tuples
observables_results = list(zip(observables_means, std_devs))
# Return None eigenvalues for None operators if observables is a list.
# None operators are already dropped in compute_minimum_eigenvalue if observables is a dict.
return _prepare_result(observables_results, observables)
def _prepare_list_op(
quantum_state: Statevector | QuantumCircuit | OperatorBase,
observables: ListOrDict[OperatorBase],
) -> ListOp:
"""
Accepts a list or a dictionary of operators and converts them to a ``ListOp``.
Args:
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators.
Returns:
A ``ListOp`` that includes all provided observables.
"""
if isinstance(observables, dict):
observables = list(observables.values())
if not isinstance(quantum_state, StateFn):
quantum_state = StateFn(quantum_state)
return ListOp([StateFn(obs, is_measurement=True).compose(quantum_state) for obs in observables])
def _prepare_result(
observables_results: list[tuple[complex, complex]],
observables: ListOrDict[OperatorBase],
) -> ListOrDict[tuple[complex, complex]]:
"""
Prepares a list or a dictionary of eigenvalues from ``observables_results`` and
``observables``.
Args:
observables_results: A list of of tuples (mean, standard deviation).
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
Returns:
A list or a dictionary of tuples (mean, standard deviation).
"""
if isinstance(observables, list):
observables_eigenvalues: ListOrDict[tuple[complex, complex]] = [None] * len(observables)
key_value_iterator = enumerate(observables_results)
else:
observables_eigenvalues = {}
key_value_iterator = zip(observables.keys(), observables_results)
for key, value in key_value_iterator:
if observables[key] is not None:
observables_eigenvalues[key] = value
return observables_eigenvalues
def _compute_std_devs(
observables_expect_sampled: OperatorBase,
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
quantum_instance: QuantumInstance | Backend,
) -> list[complex]:
"""
Calculates a list of standard deviations from expectation values of observables provided.
Args:
observables_expect_sampled: Expected values of observables.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
quantum_instance: A quantum instance used for calculations.
Returns:
A list of standard deviations.
"""
variances = np.real(expectation.compute_variance(observables_expect_sampled))
if not isinstance(variances, np.ndarray) and variances == 0.0:
# when `variances` is a single value equal to 0., our expectation value is exact and we
# manually ensure the variances to be a list of the correct length
variances = np.zeros(len(observables), dtype=float)
# TODO: this will crash if quantum_instance is a backend
std_devs = np.sqrt(variances / quantum_instance.run_config.shots)
return std_devs
|
910 |
get bias
|
import torch
import numpy as np
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
class NumpyDenseLayer(object):
"""
NumpyDenseLayer is designed for Pailler Tensor compute
"""
def __init__(self):
self.input = None
self.model_weight = None
self.model_shape = None
self.bias = None
self.lr = 1.0
self.role = None
self.is_empty_model = False
self.activation_input = None
self.input_cached = np.array([])
self.activation_cached = np.array([])
self.do_backward_selective_strategy = False
self.batch_size = None
def set_backward_selective_strategy(self):
self.do_backward_selective_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def build(self, torch_linear: torch.nn.Linear):
if torch_linear is None:
if self.role == "host":
raise ValueError("host input is empty!")
self.is_empty_model = True
return
assert isinstance(
torch_linear, torch.nn.Linear), 'must use a torch Linear to build this class, but got {}' .format(torch_linear)
self.model_weight = torch_linear.weight.cpu().detach().numpy().transpose()
if torch_linear.bias is not None:
self.bias = torch_linear.bias.cpu().detach().numpy()
def export_model(self):
if self.is_empty_model:
return "".encode()
layer_weights = [self.model_weight]
return layer_weights
def get_selective_activation_input(self):
self.activation_input = self.activation_cached[: self.batch_size]
self.activation_cached = self.activation_cached[self.batch_size:]
return self.activation_input
def get_weight(self):
return self.model_weight.transpose()
def METHOD_NAME(self):
return self.bias
def set_learning_rate(self, lr):
self.lr = lr
def forward(self, x, **kwargs):
pass
def get_weight_gradient(self, delta):
pass
def restore_model(self, model_bytes):
pass
def update_weight(self, delta):
pass
def update_bias(self, delta):
pass
@property
def empty(self):
return self.is_empty_model
@property
def output_shape(self):
return self.model_weight.shape[1:]
def __repr__(self):
return 'model weights: {}, model bias {}'.format(
self.model_weight, self.bias)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
class NumpyDenseLayerGuest(NumpyDenseLayer):
def __init__(self):
super(NumpyDenseLayerGuest, self).__init__()
self.role = consts.GUEST
def forward(self, x):
if self.empty:
return None
self.input = x
output = np.matmul(x, self.model_weight)
if self.bias is not None:
output += self.bias
return output
def select_backward_sample(self, selective_ids):
if self.input_cached.shape[0] == 0:
self.input_cached = self.input[selective_ids]
else:
self.input_cached = np.vstack(
(self.input_cached, self.input[selective_ids])
)
def get_input_gradient(self, delta):
if self.empty:
return None
error = np.matmul(delta, self.model_weight.T)
return error
def get_weight_gradient(self, delta):
if self.empty:
return None
if self.do_backward_selective_strategy:
self.input = self.input_cached[: self.batch_size]
self.input_cached = self.input_cached[self.batch_size:]
delta_w = np.matmul(delta.T, self.input)
return delta_w
def update_weight(self, delta):
if self.empty:
return None
self.model_weight -= self.lr * delta.T
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
class NumpyDenseLayerHost(NumpyDenseLayer):
"""
This dense layer can directly compute pallier-tensor forward
"""
def __init__(self):
super(NumpyDenseLayerHost, self).__init__()
self.role = consts.HOST
def select_backward_sample(self, selective_ids):
cached_shape = self.input_cached.shape[0]
offsets = [i + cached_shape for i in range(len(selective_ids))]
id_map = dict(zip(selective_ids, offsets))
if cached_shape == 0:
self.input_cached = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(self.input_cached)
self.activation_cached = self.activation_input[selective_ids]
else:
selective_input = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj().union(selective_input)
)
self.activation_cached = np.vstack(
(self.activation_cached, self.activation_input[selective_ids])
)
def forward(self, x, encoder=None):
self.input = x
if encoder is not None:
output = x * encoder.encode(self.model_weight)
else:
output = x * self.model_weight
if self.bias is not None:
if encoder is not None:
output += encoder.encode(self.bias)
else:
output += self.bias
return output
def get_input_gradient(self, delta, acc_noise, encoder=None):
if not encoder:
error = delta * self.model_weight.T + delta * acc_noise.T
else:
error = delta.encode(encoder) * (self.model_weight + acc_noise).T
return error
def get_weight_gradient(self, delta, encoder=None):
if self.do_backward_selective_strategy:
batch_size = self.batch_size
self.input = PaillierTensor(
self.input_cached.get_obj().filter(lambda k, v: k < batch_size)
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj()
.filter(lambda k, v: k >= batch_size)
.map(lambda k, v: (k - batch_size, v))
)
if encoder:
delta_w = self.input.fast_matmul_2d(encoder.encode(delta))
else:
delta_w = self.input.fast_matmul_2d(delta)
return delta_w
def update_weight(self, delta):
self.model_weight -= delta * self.lr
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
|
911 |
test create page with slugified url
|
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
from django.urls import reverse
from oscar.apps.dashboard.pages.forms import PageUpdateForm
from oscar.test.testcases import WebTestCase
class TestPageDashboard(WebTestCase):
is_anonymous = False
is_staff = True
def setUp(self):
self.flatpage_1 = FlatPage.objects.create(
title="title1", url="/url1/", content="some content"
)
self.flatpage_2 = FlatPage.objects.create(
title="title2", url="/url2/", content="other content"
)
super().setUp()
def test_dashboard_index_is_for_staff_only(self):
response = self.get(reverse("dashboard:page-list"))
self.assertTrue("Password" not in response.content.decode("utf8"))
def test_dashboard_page_list(self):
response = self.get(reverse("dashboard:page-list"))
objects = response.context[-1]["object_list"]
self.assertTrue(self.flatpage_1 in objects)
self.assertTrue(self.flatpage_2 in objects)
def test_dashboard_delete_pages(self):
page = self.get(reverse("dashboard:page-list"))
delete_page = page.click(linkid="delete_page_%s" % self.flatpage_1.id)
response = delete_page.form.submit()
self.assertIsRedirect(response)
self.assertEqual(FlatPage.objects.count(), 1)
def test_dashboard_create_page_with_slugified_url(self):
page = self.get(reverse("dashboard:page-create"))
form = page.form
form["title"] = "test"
form["content"] = "my content here"
response = form.submit()
self.assertIsRedirect(response)
def test_dashboard_create_page_with_duplicate_slugified_url_fails(self):
page = self.get(reverse("dashboard:page-create"))
form = page.form
form["title"] = "url1" # This will slugify to url1
form["content"] = "my content here"
response = form.submit()
self.assertEqual(200, response.status_code)
def test_default_site_added_for_new_pages(self):
page = self.get(reverse("dashboard:page-create"))
form = page.form
form["title"] = "test"
form["url"] = "/hello-world/"
form.submit()
p = FlatPage.objects.get(url="/hello-world/")
self.assertEqual(p.sites.count(), 1)
class DashboardPageUpdateFormTestCase(TestCase):
def setUp(self):
self.flatpage_1 = FlatPage.objects.create(
title="title1", url="/url1/", content="some content"
)
self.flatpage_2 = FlatPage.objects.create(
title="title2", url="/url2/", content="other content"
)
def test_doesnt_allow_existing_pages_to_be_clobbered(self):
form = PageUpdateForm(
data={
"title": "test",
"url": "/dashboard/pages/",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["url"], ["Specified page already exists!"])
def test_allows_page_to_be_created(self):
form = PageUpdateForm(
data={"title": "test", "url": "/my-new-url/", "content": "my content here"}
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(FlatPage.objects.count(), 3)
def METHOD_NAME(self):
form = PageUpdateForm(data={"title": "test", "content": "my content here"})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(FlatPage.objects.count(), 3)
def test_create_page_with_existing_url_does_not_work(self):
form = PageUpdateForm(
data={
"title": "test",
"url": "/url1/", # already exists
"content": "my content here",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["url"], ["Specified page already exists!"])
def test_update_page_valid_url(self):
form = PageUpdateForm(
instance=self.flatpage_1,
data={"title": "test", "url": "/new/url/", "content": "my content here"},
)
form.save()
self.flatpage_1.refresh_from_db()
page = self.flatpage_1
self.assertEqual(page.title, "test")
self.assertEqual(page.url, "/new/url/")
self.assertEqual(page.content, "my content here")
def test_invalid_chars_in_url(self):
form = PageUpdateForm(
data={
"url": "/%* /",
"title": "Title",
"content": "Content",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["url"],
[
"This value must contain only letters, numbers, dots, underscores, dashes, slashes or tildes."
],
)
def test_invalid_url_length(self):
form = PageUpdateForm(
data={
"url": "/this_url_is_more_than_100_characters_long_which_is_invalid"
"_because_the_model_field_has_a_max_length_of_100",
"title": "Title",
"content": "Content",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["url"],
["Ensure this value has at most 100 characters (it has 107)."],
)
|
912 |
test check status fail
|
import json
import os
import pytest
from mock import patch
from datadog_checks.base import AgentCheck
from datadog_checks.unifi_console.check import UnifiConsoleCheck
from datadog_checks.unifi_console.errors import APIConnectionError
from datadog_checks.unifi_console.types import ControllerInfo, Count, Gauge, Rate
from tests.common import HERE
@pytest.mark.usefixtures("mock_api")
def test_metrics_submission(aggregator, dd_run_check, instance):
"""This test asserts that the same api content always produces the same metrics."""
check = UnifiConsoleCheck("unifi", {}, [instance])
dd_run_check(check)
value_files = ["device_metrics_values.json", "client_metrics_values.json"]
for file in value_files:
fixture_file = os.path.join(HERE, "fixtures", file)
with open(fixture_file, "r") as f:
data = json.load(f)
for metric in data:
aggregator.assert_metric(
metric["name"],
metric.get("value"),
hostname=metric.get("hostname"),
tags=metric.get("tags"),
)
aggregator.assert_metric('unifi_console.healthy', metric_type=aggregator.GAUGE)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("mock_api")
def test__initiate_api_connection(instance):
with patch("datadog_checks.unifi_console.check.Unifi.login") as mock_connect:
check = UnifiConsoleCheck("unifi", {}, [instance])
mock_connect.side_effect = APIConnectionError()
with pytest.raises(APIConnectionError):
check._initiate_api_connection()
@pytest.mark.usefixtures("mock_api")
def METHOD_NAME(aggregator, dd_run_check, instance):
with patch("datadog_checks.unifi_console.check.Unifi.status") as mock_status:
check = UnifiConsoleCheck("unifi", {}, [instance])
mock_status.side_effect = Exception()
with pytest.raises(Exception):
dd_run_check(check)
aggregator.assert_service_check("unifi_console.can_connect", AgentCheck.CRITICAL, tags=check._config.tags)
aggregator.assert_service_check("unifi_console.healthy", AgentCheck.CRITICAL, tags=check._config.tags)
aggregator.assert_metric('unifi_console.healthy', 0, metric_type=aggregator.GAUGE)
@pytest.mark.usefixtures("mock_api")
def test_check_status_pass(aggregator, dd_run_check, instance):
check = UnifiConsoleCheck("unifi", {}, [instance])
dd_run_check(check)
aggregator.assert_service_check("unifi_console.can_connect", AgentCheck.OK, tags=check._config.tags)
aggregator.assert_service_check("unifi_console.healthy", AgentCheck.OK, tags=check._config.tags)
aggregator.assert_metric('unifi_console.healthy', 1, metric_type=aggregator.GAUGE)
@pytest.mark.usefixtures("mock_api")
def test_get_devices_info_fails(aggregator, dd_run_check, instance):
with patch("datadog_checks.unifi_console.check.Unifi.get_devices_info") as mock_get_devices_info:
check = UnifiConsoleCheck("unifi", {}, [instance])
mock_get_devices_info.side_effect = Exception()
with pytest.raises(Exception):
dd_run_check(check)
@pytest.mark.usefixtures("mock_api")
def test_get_clients_info_fails(aggregator, dd_run_check, instance):
with patch("datadog_checks.unifi_console.check.Unifi.get_clients_info") as mock_get_clients_info:
check = UnifiConsoleCheck("unifi", {}, [instance])
mock_get_clients_info.side_effect = Exception()
with pytest.raises(Exception):
dd_run_check(check)
@pytest.mark.usefixtures("mock_api")
def test__submit_healthy_metrics(aggregator, instance):
check = UnifiConsoleCheck("unifi", {}, [instance])
info = "test"
check._submit_healthy_metrics(info, check._config.tags)
aggregator.assert_service_check("unifi_console.healthy", AgentCheck.CRITICAL, tags=check._config.tags)
aggregator.assert_metric('unifi_console.healthy', 0, metric_type=aggregator.GAUGE)
with open(os.path.join(HERE, "fixtures", "status_valid.json")) as f:
check._submit_healthy_metrics(ControllerInfo(json.load(f)), check._config.tags)
aggregator.assert_service_check("unifi_console.healthy", AgentCheck.OK, tags=check._config.tags)
aggregator.assert_metric('unifi_console.healthy', 1, metric_type=aggregator.GAUGE)
aggregator.reset()
with open(os.path.join(HERE, "fixtures", "status_invalid.json")) as f:
check._submit_healthy_metrics(ControllerInfo(json.load(f)), check._config.tags)
aggregator.assert_service_check("unifi_console.healthy", AgentCheck.CRITICAL, tags=check._config.tags)
aggregator.assert_metric('unifi_console.healthy', 0, metric_type=aggregator.GAUGE)
@pytest.mark.parametrize(
"metric, expected_type",
[
(Gauge('test', 1, []), 0),
(Count('test', 1, []), 2),
(Rate('test', 1, []), 1),
],
)
@pytest.mark.usefixtures("mock_api")
def test__submit_metrics(aggregator, instance, metric, expected_type):
check = UnifiConsoleCheck("unifi", {}, [instance])
metrics = [metric]
check._submit_metrics(metrics)
aggregator.assert_metric('unifi_console.test', 1, metric_type=expected_type)
|
913 |
explore array
|
import random
from typing import Any, Dict, List, Union
from ludwig.schema.metadata.parameter_metadata import ExpectedImpact
# base types for ludwig config parameters.
ParameterBaseTypes = Union[str, float, int, bool, None]
def handle_property_type(
property_type: str, item: Dict[str, Any], expected_impact: ExpectedImpact = ExpectedImpact.HIGH
) -> List[Union[ParameterBaseTypes, List[ParameterBaseTypes]]]:
"""Return possible parameter values for a parameter type.
Args:
property_type: type of the parameter (e.g. array, number, etc.)
item: dictionary containing details on the parameter such as default, min and max values.
expected_impact: threshold expected impact that we'd like to include.
"""
parameter_metadata = item.get("parameter_metadata", None)
if not parameter_metadata:
return []
# don't explore internal only parameters.
if parameter_metadata.get("internal_only", True):
return []
# don't explore parameters that have expected impact less than HIGH.
if parameter_metadata.get("expected_impact", ExpectedImpact.LOW) < expected_impact:
return []
if property_type == "number":
return explore_number(item)
elif property_type == "integer":
return explore_integer(item)
elif property_type == "string":
return explore_string(item)
elif property_type == "boolean":
return explore_boolean()
elif property_type == "null":
return explore_null()
elif property_type == "array":
return METHOD_NAME(item)
else:
return []
def METHOD_NAME(item: Dict[str, Any]) -> List[List[ParameterBaseTypes]]:
"""Return possible parameter values for the `array` parameter type.
Args:
item: dictionary containing details on the parameter such as default, min and max values.
"""
candidates = []
if "default" in item and item["default"]:
candidates.append(item["default"])
item_choices = []
maxlen = 0
# In the case where the length of the array isn't defined.
if not isinstance(item["items"], list):
return []
for item_of in item["items"]:
choices = handle_property_type(item_of["type"], item_of)
maxlen = max(maxlen, len(choices))
item_choices.append(choices)
# pad to same length
for i in range(len(item_choices)):
item_choices[i] = maxlen * item_choices[i]
item_choices[i] = item_choices[i][:maxlen]
merged = list(zip(*item_choices)) + candidates
return [list(tup) for tup in merged]
def explore_number(item: Dict[str, Any]) -> List[ParameterBaseTypes]:
"""Return possible parameter values for the `number` parameter type.
Args:
item: dictionary containing details on the parameter such as default, min and max values.
TODO(Wael): Improve logic.
"""
minimum, maximum = 0, 1
if "default" not in item or item["default"] is None:
candidates = []
else:
candidates = [1, 2, item["default"], 2 * (item["default"] + 1), item["default"] // 2, -1 * item["default"]]
if "minimum" in item:
minimum = item["minimum"]
candidates = [num for num in candidates if num > minimum]
if "maximum" in item:
maximum = item["maximum"]
candidates = [num for num in candidates if num < maximum]
return candidates + [random.random() * 0.99 * maximum]
def explore_integer(item: Dict[str, Any]) -> List[ParameterBaseTypes]:
"""Return possible parameter values for the `integer` parameter type.
Args:
item: dictionary containing details on the parameter such as default, min and max values.
TODO(Wael): Improve logic.
"""
minimum, maximum = 0, 10
if "default" not in item or item["default"] is None:
candidates = []
else:
candidates = [item["default"], 2 * (item["default"] + 1), item["default"] // 2, -1 * item["default"]]
if "minimum" in item:
minimum = item["minimum"]
candidates = [num for num in candidates if num >= item["minimum"]]
if "maximum" in item:
maximum = item["maximum"]
candidates = [num for num in candidates if num <= item["maximum"]]
return candidates + [random.randint(minimum, maximum)]
def explore_string(item: Dict[str, Any]) -> List[ParameterBaseTypes]:
"""Return possible parameter values for the `string` parameter type.
Args:
item: dictionary containing details on the parameter such as default, min and max values.
"""
if "enum" in item:
return item["enum"]
return [item["default"]]
def explore_boolean() -> List[bool]:
"""Return possible parameter values for the `boolean` parameter type (i.e. [True, False])"""
return [True, False]
def explore_null() -> List[None]:
"""Return possible parameter values for the `null` parameter type (i.e. [None])"""
return [None]
|
914 |
shoul d clause
|
"""
Available Queries
-----------------
Queries are used for actual searching - things like relevancy scores,
Levenstein distance, and partial matches.
View the `elasticsearch documentation <query_docs>`_ to see what other options
are available, and put 'em here if you end up using any of 'em.
.. _`query_docs`: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-queries.html
"""
import re
from .filters import date_range, range_filter
MUST = "must"
MUST_NOT = "must_not"
SHOULD = "should"
BOOL = "bool"
DISTANCE_UNITS = ["miles", "yards", "feet", "inch", "kilometers", "meters",
"centimeters", "millimeters", "nauticalmiles"]
def BOOL_CLAUSE(query, **kwargs):
return _CLAUSE(BOOL, query, **kwargs)
def MUST_CLAUSE(query, **kwargs):
return _CLAUSE(MUST, query, **kwargs)
def MUST_NOT_CLAUSE(query, **kwargs):
return _CLAUSE(MUST_NOT, query, **kwargs)
def METHOD_NAME(query, **kwargs):
return _CLAUSE(SHOULD, query, **kwargs)
def _CLAUSE(clause, query, **kwargs):
clause = {clause: query}
clause.update(kwargs)
return clause
CLAUSES = {
MUST: MUST_CLAUSE,
MUST_NOT: MUST_NOT_CLAUSE,
SHOULD: METHOD_NAME,
BOOL: BOOL_CLAUSE
}
def match_all():
"""No-op query used because a default must be specified"""
return {"match_all": {}}
def search_string_query(search_string, default_fields):
"""
All input defaults to doing an infix search for each term.
(This may later change to some kind of fuzzy matching).
This is also available via the main ESQuery class.
"""
if not search_string:
return match_all()
# Parse user input into individual search terms
r = re.compile(r'\w+')
tokens = r.findall(search_string)
query_string = "*{}*".format("* *".join(tokens))
# TODO: add support for searching date ranges.
return {
"query_string": {
"query": query_string,
"default_operator": "AND",
"fields": default_fields,
}
}
def ids_query(doc_ids):
return {"ids": {"values": doc_ids}}
def match(search_string, field, operator=None):
if operator not in [None, 'and', 'or']:
raise ValueError(" 'operator' argument should be one of: 'and', 'or' ")
return {
"match": {
field: {
"query": search_string,
# OR is the accepted default for the operator on an ES match query
"operator": 'and' if operator == 'and' else 'or',
"fuzziness": "0",
}
}
}
def fuzzy(search_string, field, fuzziness="AUTO"):
return {
"fuzzy": {
field: {
"value": f"{search_string}".lower(),
"fuzziness": fuzziness,
"max_expansions": 100
}
}
}
def nested(path, query, *args, **kwargs):
"""
Creates a nested query for use with nested documents
Keyword arguments such as score_mode and others can be added.
"""
nested = {
"path": path,
"query": query
}
nested.update(kwargs)
return {
"nested": nested
}
def nested_filter(path, filter_, *args, **kwargs):
"""
Creates a nested query for use with nested documents
Keyword arguments such as score_mode and others can be added.
"""
nested = {
"path": path,
"filter": filter_
}
nested.update(kwargs)
return {
"nested": nested
}
def filtered(query, filter_):
"""
Filtered query for performing both filtering and querying at once
"""
return {
"bool": {
"filter": [filter_],
"must": query
}
}
def regexp(field, regex):
return {
'regexp': {
field: {
'value': regex,
}
}
}
def geo_distance(field, geopoint, **kwargs):
"""Filters cases to those within a certain distance of the provided geopoint
eg: geo_distance('gps_location', GeoPoint(-33.1, 151.8), kilometers=100)
"""
if len(kwargs) != 1 or not all(k in DISTANCE_UNITS for k in kwargs):
raise ValueError("'geo_distance' requires exactly one distance kwarg, "
f"options are {', '.join(DISTANCE_UNITS)}")
unit, distance = kwargs.popitem()
return {
'geo_distance': {
field: geopoint.lat_lon,
'distance': f"{distance}{unit}",
}
}
range_query = range_filter
date_range = date_range
|
915 |
e tag
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection for a project.
"""
def __init__(__self__, METHOD_NAME=None, id=None, name=None, properties=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def METHOD_NAME(self) -> Optional[str]:
"""
For optimistic concurrency control.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Path reference to this private endpoint endpoint connection. /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/assessmentProjects/{projectName}/privateEndpointConnections/{privateEndpointConnectionName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the private endpoint endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.PrivateEndpointConnectionPropertiesResponse':
"""
Properties of the private endpoint endpoint connection.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the object = [Microsoft.Migrate/assessmentProjects/privateEndpointConnections].
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
project_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Get information related to a specific private endpoint connection in the project. Returns a json object of type 'privateEndpointConnections' as specified in the models section.
:param str private_endpoint_connection_name: Unique name of a private endpoint connection within a project.
:param str project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that project is part of.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['projectName'] = project_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:migrate/v20191001:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
METHOD_NAME=pulumi.get(__ret__, 'e_tag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Get information related to a specific private endpoint connection in the project. Returns a json object of type 'privateEndpointConnections' as specified in the models section.
:param str private_endpoint_connection_name: Unique name of a private endpoint connection within a project.
:param str project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that project is part of.
"""
...
|
916 |
view range changed
|
import numpy as np
from .. import functions as fn
from .. import getConfigOption
from ..Point import Point
from ..Qt import QtCore, QtGui
from .UIGraphicsItem import UIGraphicsItem
__all__ = ['GridItem']
class GridItem(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Displays a rectangular grid of lines indicating major divisions within a coordinate system.
Automatically determines what divisions to use.
"""
def __init__(self, pen='default', textPen='default'):
UIGraphicsItem.__init__(self)
#QtWidgets.QGraphicsItem.__init__(self, *args)
#self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemClipsToShape)
#self.setCacheMode(QtWidgets.QGraphicsItem.CacheMode.DeviceCoordinateCache)
self.opts = {}
self.setPen(pen)
self.setTextPen(textPen)
self.setTickSpacing(x=[None, None, None], y=[None, None, None])
def setPen(self, *args, **kwargs):
"""Set the pen used to draw the grid."""
if kwargs == {} and (args == () or args == ('default',)):
self.opts['pen'] = fn.mkPen(getConfigOption('foreground'))
else:
self.opts['pen'] = fn.mkPen(*args, **kwargs)
self.picture = None
self.update()
def setTextPen(self, *args, **kwargs):
"""Set the pen used to draw the texts."""
if kwargs == {} and (args == () or args == ('default',)):
self.opts['textPen'] = fn.mkPen(getConfigOption('foreground'))
else:
if args == (None,):
self.opts['textPen'] = None
else:
self.opts['textPen'] = fn.mkPen(*args, **kwargs)
self.picture = None
self.update()
def setTickSpacing(self, x=None, y=None):
"""
Set the grid tick spacing to use.
Tick spacing for each axis shall be specified as an array of
descending values, one for each tick scale. When the value
is set to None, grid line distance is chosen automatically
for this particular level.
Example:
Default setting of 3 scales for each axis:
setTickSpacing(x=[None, None, None], y=[None, None, None])
Single scale with distance of 1.0 for X axis, Two automatic
scales for Y axis:
setTickSpacing(x=[1.0], y=[None, None])
Single scale with distance of 1.0 for X axis, Two scales
for Y axis, one with spacing of 1.0, other one automatic:
setTickSpacing(x=[1.0], y=[1.0, None])
"""
self.opts['tickSpacing'] = (x or self.opts['tickSpacing'][0],
y or self.opts['tickSpacing'][1])
self.grid_depth = max([len(s) for s in self.opts['tickSpacing']])
self.picture = None
self.update()
def METHOD_NAME(self):
UIGraphicsItem.METHOD_NAME(self)
self.picture = None
#UIGraphicsItem.viewRangeChanged(self)
#self.update()
def paint(self, p, opt, widget):
#p.setPen(QtGui.QPen(QtGui.QColor(100, 100, 100)))
#p.drawRect(self.boundingRect())
#UIGraphicsItem.paint(self, p, opt, widget)
### draw picture
if self.picture is None:
#print "no pic, draw.."
self.generatePicture()
p.drawPicture(QtCore.QPointF(0, 0), self.picture)
#p.setPen(QtGui.QPen(QtGui.QColor(255,0,0)))
#p.drawLine(0, -100, 0, 100)
#p.drawLine(-100, 0, 100, 0)
#print "drawing Grid."
def generatePicture(self):
self.picture = QtGui.QPicture()
p = QtGui.QPainter()
p.begin(self.picture)
vr = self.getViewWidget().rect()
unit = self.pixelWidth(), self.pixelHeight()
dim = [vr.width(), vr.height()]
lvr = self.boundingRect()
ul = np.array([lvr.left(), lvr.top()])
br = np.array([lvr.right(), lvr.bottom()])
texts = []
if ul[1] > br[1]:
x = ul[1]
ul[1] = br[1]
br[1] = x
lastd = [None, None]
for i in range(self.grid_depth - 1, -1, -1):
dist = br-ul
nlTarget = 10.**i
d = 10. ** np.floor(np.log10(np.abs(dist/nlTarget))+0.5)
for ax in range(0,2):
ts = self.opts['tickSpacing'][ax]
try:
if ts[i] is not None:
d[ax] = ts[i]
except IndexError:
pass
lastd[ax] = d[ax]
ul1 = np.floor(ul / d) * d
br1 = np.ceil(br / d) * d
dist = br1-ul1
nl = (dist / d) + 0.5
for ax in range(0,2): ## Draw grid for both axes
if i >= len(self.opts['tickSpacing'][ax]):
continue
if d[ax] < lastd[ax]:
continue
ppl = dim[ax] / nl[ax]
c = int(fn.clip_scalar(5 * (ppl-3), 0, 50))
linePen = self.opts['pen']
lineColor = self.opts['pen'].color()
lineColor.setAlpha(c)
linePen.setColor(lineColor)
textPen = self.opts['textPen']
if textPen is not None:
textColor = self.opts['textPen'].color()
textColor.setAlpha(c * 2)
textPen.setColor(textColor)
bx = (ax+1) % 2
for x in range(0, int(nl[ax])):
linePen.setCosmetic(True)
p.setPen(linePen)
p1 = np.array([0.,0.])
p2 = np.array([0.,0.])
p1[ax] = ul1[ax] + x * d[ax]
p2[ax] = p1[ax]
p1[bx] = ul[bx]
p2[bx] = br[bx]
## don't draw lines that are out of bounds.
if p1[ax] < min(ul[ax], br[ax]) or p1[ax] > max(ul[ax], br[ax]):
continue
p.drawLine(QtCore.QPointF(p1[0], p1[1]), QtCore.QPointF(p2[0], p2[1]))
if i < 2 and textPen is not None:
if ax == 0:
x = p1[0] + unit[0]
y = ul[1] + unit[1] * 8.
else:
x = ul[0] + unit[0]*3
y = p1[1] + unit[1]
texts.append((QtCore.QPointF(x, y), "%g"%p1[ax]))
tr = self.deviceTransform()
p.setWorldTransform(fn.invertQTransform(tr))
if textPen is not None and len(texts) > 0:
# if there is at least one text, then c is set
textColor.setAlpha(c * 2)
p.setPen(QtGui.QPen(textColor))
for t in texts:
x = tr.map(t[0]) + Point(0.5, 0.5)
p.drawText(x, t[1])
p.end()
|
917 |
extract data
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.AppPlatform/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_03_01_preview.AppPlatformManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationDetail"]:
"""Lists all of the available REST API operations of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationDetail or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_03_01_preview.models.OperationDetail]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2022-03-01-preview")
)
cls: ClsType[_models.AvailableOperations] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("AvailableOperations", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.AppPlatform/operations"}
|
918 |
test captured
|
# -*- coding: utf-8 -*-
import sys
from contextlib import contextmanager
from pybind11_tests import iostream as m
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
@contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
@contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
def METHOD_NAME(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ""
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset0(capsys):
msg = "\u07FF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset1(capsys):
msg = "\u07FF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset0(capsys):
msg = "\uFFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset1(capsys):
msg = "\uFFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset2(capsys):
msg = "\uFFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset0(capsys):
msg = "\U0010FFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset1(capsys):
msg = "\U0010FFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset2(capsys):
msg = "\U0010FFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset3(capsys):
msg = "\U0010FFFF"
msg = "123" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ""
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == "bd"
assert stream.getvalue() == "ac"
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
def test_threading():
with m.ostream_redirect(stdout=True, stderr=False):
# start some threads
threads = []
# start some threads
for _j in range(20):
threads.append(m.TestThread())
# give the threads some time to fail
threads[0].sleep()
# stop all the threads
for t in threads:
t.stop()
for t in threads:
t.join()
# if a thread segfaults, we don't get here
assert True
|
919 |
copysign
|
"""
mathematical functions.
Descriptions taken from:
https://raw.githubusercontent.com/micropython/micropython/master/docs/library/math.rst.
=====================================
.. module:: math
:synopsis: mathematical functions
|see_cpython_module| :mod:`python:math`.
The ``math`` module provides some basic mathematical functions for
working with floating-point numbers.
*Note:* On the pyboard, floating-point numbers have 32-bit precision.
Availability: not available on WiPy. Floating point support required
for this module.
"""
__author__ = "Howard C Lovatt"
__copyright__ = "Howard C Lovatt, 2020 onwards."
__license__ = "MIT https://opensource.org/licenses/MIT (as used by MicroPython)."
__version__ = "7.3.9" # Version set by https://github.com/hlovatt/tag2ver
from typing import SupportsFloat, Final
def acos(x: SupportsFloat, /) -> float:
"""
Return the inverse cosine of ``x``.
"""
def acosh(x: SupportsFloat, /) -> float:
"""
Return the inverse hyperbolic cosine of ``x``.
"""
def asin(x: SupportsFloat, /) -> float:
"""
Return the inverse sine of ``x``.
"""
def asinh(x: SupportsFloat, /) -> float:
"""
Return the inverse hyperbolic sine of ``x``.
"""
def atan(x: SupportsFloat, /) -> float:
"""
Return the inverse tangent of ``x``.
"""
def atan2(y: SupportsFloat, x: SupportsFloat, /) -> float:
"""
Return the principal value of the inverse tangent of ``y/x``.
"""
def atanh(x: SupportsFloat, /) -> float:
"""
Return the inverse hyperbolic tangent of ``x``.
"""
def ceil(x: SupportsFloat, /) -> int:
"""
Return an integer, being ``x`` rounded towards positive infinity.
"""
def METHOD_NAME(x: SupportsFloat, y: SupportsFloat, /) -> float:
"""
Return ``x`` with the sign of ``y``.
"""
def cos(x: SupportsFloat, /) -> float:
"""
Return the cosine of ``x``.
"""
def cosh(x: SupportsFloat, /) -> float:
"""
Return the hyperbolic cosine of ``x``.
"""
def degrees(x: SupportsFloat, /) -> float:
"""
Return radians ``x`` converted to degrees.
"""
def erf(x: SupportsFloat, /) -> float:
"""
Return the error function of ``x``.
"""
def erfc(x: SupportsFloat, /) -> float:
"""
Return the complementary error function of ``x``.
"""
def exp(x: SupportsFloat, /) -> float:
"""
Return the exponential of ``x``.
"""
def expm1(x: SupportsFloat, /) -> float:
"""
Return ``exp(x) - 1``.
"""
def fabs(x: SupportsFloat, /) -> float:
"""
Return the absolute value of ``x``.
"""
def floor(x: SupportsFloat, /) -> int:
"""
Return an integer, being ``x`` rounded towards negative infinity.
"""
def fmod(x: SupportsFloat, y: SupportsFloat, /) -> float:
"""
Return the remainder of ``x/y``.
"""
def frexp(x: SupportsFloat, /) -> tuple[float, int]:
"""
Decomposes a floating-point number into its mantissa and exponent.
The returned value is the tuple ``(m, e)`` such that ``x == m * 2**e``
exactly. If ``x == 0`` then the function returns ``(0.0, 0)``, otherwise
the relation ``0.5 <= abs(m) < 1`` holds.
"""
def gamma(x: SupportsFloat, /) -> float:
"""
Return the gamma function of ``x``.
"""
def isfinite(x: SupportsFloat, /) -> bool:
"""
Return ``True`` if ``x`` is finite.
"""
def isinf(x: SupportsFloat, /) -> bool:
"""
Return ``True`` if ``x`` is infinite.
"""
def isnan(x: SupportsFloat, /) -> bool:
"""
Return ``True`` if ``x`` is not-a-number
"""
# noinspection PyShadowingNames
def ldexp(x: SupportsFloat, exp: int, /) -> float:
"""
Return ``x * (2**exp)``.
"""
def lgamma(x: SupportsFloat, /) -> float:
"""
Return the natural logarithm of the gamma function of ``x``.
"""
def log(x: SupportsFloat, /) -> float:
"""
Return the natural logarithm of ``x``.
"""
def log10(x: SupportsFloat, /) -> float:
"""
Return the base-10 logarithm of ``x``.
"""
def log2(x: SupportsFloat, /) -> float:
"""
Return the base-2 logarithm of ``x``.
"""
def modf(x: SupportsFloat, /) -> tuple[float, float]:
"""
Return a tuple of two floats, being the fractional and integral parts of
``x``. Both return values have the same sign as ``x``.
"""
def pow(x: SupportsFloat, y: SupportsFloat, /) -> float:
"""
Returns ``x`` to the power of ``y``.
"""
def radians(x: SupportsFloat, /) -> float:
"""
Return degrees ``x`` converted to radians.
"""
def sin(x: SupportsFloat, /) -> float:
"""
Return the sine of ``x``.
"""
def sinh(x: SupportsFloat, /) -> float:
"""
Return the hyperbolic sine of ``x``.
"""
def sqrt(x: SupportsFloat, /) -> float:
"""
Return the square root of ``x``.
"""
def tan(x: SupportsFloat, /) -> float:
"""
Return the tangent of ``x``.
"""
def tanh(x: SupportsFloat, /) -> float:
"""
Return the hyperbolic tangent of ``x``.
"""
def trunc(x: SupportsFloat, /) -> float:
"""
Return an integer, being ``x`` rounded towards 0.
"""
e: Final[float] = ...
"""
base of the natural logarithm
"""
pi: Final[float] = ...
"""
the ratio of a circle's circumference to its diameter
"""
|
920 |
connection string
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMECRoleResult',
'AwaitableGetMECRoleResult',
'get_mec_role',
'get_mec_role_output',
]
@pulumi.output_type
class GetMECRoleResult:
"""
MEC role.
"""
def __init__(__self__, METHOD_NAME=None, controller_endpoint=None, id=None, kind=None, name=None, resource_unique_id=None, role_status=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'connection_string' to be a dict")
pulumi.set(__self__, "connection_string", METHOD_NAME)
if controller_endpoint and not isinstance(controller_endpoint, str):
raise TypeError("Expected argument 'controller_endpoint' to be a str")
pulumi.set(__self__, "controller_endpoint", controller_endpoint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_unique_id and not isinstance(resource_unique_id, str):
raise TypeError("Expected argument 'resource_unique_id' to be a str")
pulumi.set(__self__, "resource_unique_id", resource_unique_id)
if role_status and not isinstance(role_status, str):
raise TypeError("Expected argument 'role_status' to be a str")
pulumi.set(__self__, "role_status", role_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="connectionString")
def METHOD_NAME(self) -> Optional['outputs.AsymmetricEncryptedSecretResponse']:
"""
Activation key of the MEC.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="controllerEndpoint")
def controller_endpoint(self) -> Optional[str]:
"""
Controller Endpoint.
"""
return pulumi.get(self, "controller_endpoint")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Role type.
Expected value is 'MEC'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceUniqueId")
def resource_unique_id(self) -> Optional[str]:
"""
Unique Id of the Resource.
"""
return pulumi.get(self, "resource_unique_id")
@property
@pulumi.getter(name="roleStatus")
def role_status(self) -> str:
"""
Role status.
"""
return pulumi.get(self, "role_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of Role
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetMECRoleResult(GetMECRoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMECRoleResult(
METHOD_NAME=self.METHOD_NAME,
controller_endpoint=self.controller_endpoint,
id=self.id,
kind=self.kind,
name=self.name,
resource_unique_id=self.resource_unique_id,
role_status=self.role_status,
system_data=self.system_data,
type=self.type)
def get_mec_role(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMECRoleResult:
"""
Gets a specific role by name.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20230101preview:getMECRole', __args__, opts=opts, typ=GetMECRoleResult).value
return AwaitableGetMECRoleResult(
METHOD_NAME=pulumi.get(__ret__, 'connection_string'),
controller_endpoint=pulumi.get(__ret__, 'controller_endpoint'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
resource_unique_id=pulumi.get(__ret__, 'resource_unique_id'),
role_status=pulumi.get(__ret__, 'role_status'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_mec_role)
def get_mec_role_output(device_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMECRoleResult]:
"""
Gets a specific role by name.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
...
|
921 |
run
|
import json
from provider import qemu_img_utils as img_utils
from virttest import qemu_storage
from virttest import data_dir
def METHOD_NAME(test, params, env):
"""
Commit with explicit backing file specification.
1. create snapshot chain as image1->sn1->sn2
2. commit sn2 to base
3. check to see that sn2 is not emptied and the temp file in corresponding
snapshot remains intact.
"""
def prepare_images_from_params(images, params):
"""Parse params to initialize a QImage list."""
return [qemu_storage.QemuImg(params.object_params(tag), root_dir, tag)
for tag in images]
def verify_backing_chain(info):
"""Verify image's backing chain."""
for image, img_info in zip(images, reversed(info)):
base_image = None
if image.base_tag:
base_params = params.object_params(image.base_tag)
base_image = qemu_storage.get_image_repr(image.base_tag,
base_params, root_dir)
base_image_from_info = img_info.get("full-backing-filename")
if base_image != base_image_from_info:
test.fail(("backing chain check for image %s failed, backing"
" file from info is %s, which should be %s.") %
(image.image_filename, base_image_from_info,
base_image))
images = params.get("image_chain", "").split()
if len(images) < 3:
test.cancel("Snapshot chain must at least contains three images")
params["image_name_%s" % images[0]] = params["image_name"]
params["image_format_%s" % images[0]] = params["image_format"]
root_dir = data_dir.get_data_dir()
images = prepare_images_from_params(images, params)
base, active_layer = images[0], images[-1]
md5sum_bin = params.get("md5sum_bin", "md5sum")
sync_bin = params.get("sync_bin", "sync")
hashes = {}
for image in images:
if image is not base:
test.log.debug("Create snapshot %s based on %s",
image.image_filename, image.base_image_filename)
image.create(image.params)
vm = img_utils.boot_vm_with_images(test, params, env, (image.tag,))
guest_file = params["guest_tmp_filename"] % image.tag
test.log.debug("Create tmp file %s in image %s", guest_file,
image.image_filename)
img_utils.save_random_file_to_vm(vm, guest_file, 2048 * 100, sync_bin)
session = vm.wait_for_login()
test.log.debug("Get md5 value fo the temporary file")
hashes[guest_file] = img_utils.check_md5sum(guest_file,
md5sum_bin, session)
session.close()
vm.destroy()
test.log.debug("Hashes of temporary files:\n%s", hashes)
test.log.debug("Verify the snapshot chain")
info = json.loads(active_layer.info(output="json"))
active_layer_size_before = info[0]["actual-size"]
verify_backing_chain(info)
test.log.debug("Commit image")
active_layer.commit(base=base.tag)
test.log.debug("Verify the snapshot chain after commit")
info = json.loads(active_layer.info(output="json"))
active_layer_size_after = info[0]["actual-size"]
test.log.debug("%s file size before commit: %s, after commit: %s",
active_layer.image_filename, active_layer_size_before,
active_layer_size_after)
if active_layer_size_after < active_layer_size_before:
test.fail("image %s is emptied after commit with explicit base" %
active_layer.image_filename)
verify_backing_chain(info)
test.log.debug("Verify hashes of temporary files")
vm = img_utils.boot_vm_with_images(test, params, env, (base.tag,))
session = vm.wait_for_login()
for tmpfile, hashval in hashes.items():
img_utils.check_md5sum(tmpfile, md5sum_bin, session,
md5_value_to_check=hashval)
for image in images:
if image is not base:
image.remove()
|
922 |
chinese text preprocessing
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
from builtins import str as unicode
from typing import List, Tuple
__all__ = [
"chinese_text_preprocessing",
"english_text_preprocessing",
"any_locale_text_preprocessing",
"spanish_text_preprocessing",
"any_locale_word_tokenize",
"english_word_tokenize",
"LATIN_CHARS_ALL",
"normalize_unicode_text",
]
# Derived from LJSpeech
_synoglyphs = {
"'": ['’'],
'"': ['”', '“'],
}
SYNOGLYPH2ASCII = {g: asc for asc, glyphs in _synoglyphs.items() for g in glyphs}
# Example of parsing by groups via _WORDS_RE_EN.
# Regular expression pattern groups:
# 1st group -- valid english words,
# 2nd group -- any substring starts from | to | (mustn't be nested), useful when you want to leave sequence unchanged,
# 3rd group -- punctuation marks or whitespaces.
# Text (first line) and mask of groups for every char (second line).
# config file must contain |EY1 EY1|, B, C, D, E, F, and G.
# define char set based on https://en.wikipedia.org/wiki/List_of_Unicode_characters
LATIN_ALPHABET_BASIC = "A-Za-z"
ACCENTED_CHARS = "À-ÖØ-öø-ÿ"
LATIN_CHARS_ALL = f"{LATIN_ALPHABET_BASIC}{ACCENTED_CHARS}"
_WORDS_RE_EN = re.compile(
fr"([{LATIN_ALPHABET_BASIC}]+(?:[{LATIN_ALPHABET_BASIC}\-']*[{LATIN_ALPHABET_BASIC}]+)*)|(\|[^|]*\|)|([^{LATIN_ALPHABET_BASIC}|]+)"
)
_WORDS_RE_ANY_LOCALE = re.compile(
fr"([{LATIN_CHARS_ALL}]+(?:[{LATIN_CHARS_ALL}\-']*[{LATIN_CHARS_ALL}]+)*)|(\|[^|]*\|)|([^{LATIN_CHARS_ALL}|]+)"
)
def english_text_preprocessing(text, lower=True):
text = unicode(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text) if unicodedata.category(char) != 'Mn')
text = ''.join(char if char not in SYNOGLYPH2ASCII else SYNOGLYPH2ASCII[char] for char in text)
if lower:
text = text.lower()
return text
def any_locale_text_preprocessing(text: str) -> str:
"""
Normalize unicode text with "NFC", and convert right single quotation mark (U+2019, decimal 8217) as an apostrophe.
Args:
text (str): the original input sentence.
Returns: normalized text (str).
"""
res = []
for c in normalize_unicode_text(text):
if c in ['’']: # right single quotation mark (U+2019, decimal 8217) as an apostrophe
res.append("'")
else:
res.append(c)
return ''.join(res)
def normalize_unicode_text(text: str) -> str:
"""
TODO @xueyang: Apply NFC form may be too aggressive since it would ignore some accented characters that do not exist
in predefined German alphabet (nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon.IPA_CHARACTER_SETS),
such as 'é'. This is not expected. A better solution is to add an extra normalization with NFD to discard the
diacritics and consider 'é' and 'e' produce similar pronunciations.
Note that the tokenizer needs to run `unicodedata.normalize("NFC", x)` before calling `encode` function,
especially for the characters that have diacritics, such as 'ö' in the German alphabet. 'ö' can be encoded as
b'\xc3\xb6' (one char) as well as b'o\xcc\x88' (two chars). Without the normalization of composing two chars
together and without a complete predefined set of diacritics, when the tokenizer reads the input sentence
char-by-char, it would skip the combining diaeresis b'\xcc\x88', resulting in indistinguishable pronunciations
for 'ö' and 'o'.
Args:
text (str): the original input sentence.
Returns:
NFC normalized sentence (str).
"""
# normalize word with NFC form
if not unicodedata.is_normalized("NFC", text):
text = unicodedata.normalize("NFC", text)
return text
def _word_tokenize(words: List[Tuple[str, str, str]], is_lower: bool = False) -> List[Tuple[List[str], bool]]:
"""
Process a list of words and attach indicators showing if each word is unchangeable or not. Each word representation
can be one of valid word, any substring starting from | to | (unchangeable word), or punctuation marks including
whitespaces. This function will split unchanged strings by whitespaces and return them as `List[str]`. For example,
.. code-block:: python
[
('Hello', '', ''), # valid word
('', '', ' '), # punctuation mark
('World', '', ''), # valid word
('', '', ' '), # punctuation mark
('', '|NVIDIA unchanged|', ''), # unchangeable word
('', '', '!') # punctuation mark
]
will be converted into,
.. code-block:: python
[
(["Hello"], False),
([" "], False),
(["World"], False),
([" "], False),
(["NVIDIA", "unchanged"], True),
(["!"], False)
]
Args:
words (List[str]): a list of tuples like `(maybe_word, maybe_without_changes, maybe_punct)` where each element
corresponds to a non-overlapping match of either `_WORDS_RE_EN` or `_WORDS_RE_ANY_LOCALE`.
is_lower (bool): a flag to trigger lowercase all words. By default, it is False.
Returns: List[Tuple[List[str], bool]], a list of tuples like `(a list of words, is_unchanged)`.
"""
result = []
for word in words:
maybe_word, maybe_without_changes, maybe_punct = word
without_changes = False
if maybe_word != '':
if is_lower:
token = [maybe_word.lower()]
else:
token = [maybe_word]
elif maybe_punct != '':
token = [maybe_punct]
elif maybe_without_changes != '':
without_changes = True
token = maybe_without_changes[1:-1].split(" ")
else:
raise ValueError(
f"This is not expected. Found empty string: <{word}>. "
f"Please validate your regular expression pattern '_WORDS_RE_EN' or '_WORDS_RE_ANY_LOCALE'."
)
result.append((token, without_changes))
return result
def english_word_tokenize(text: str) -> List[Tuple[List[str], bool]]:
words = _WORDS_RE_EN.findall(text)
return _word_tokenize(words, is_lower=True)
def any_locale_word_tokenize(text: str) -> List[Tuple[List[str], bool]]:
words = _WORDS_RE_ANY_LOCALE.findall(text)
return _word_tokenize(words)
def spanish_text_preprocessing(text: str) -> str:
return text.lower()
def METHOD_NAME(text: str) -> str:
return text
|
923 |
defer run cleanups
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import sys
from io import StringIO
from twisted.python import log
from twisted.trial.unittest import TestCase
import buildbot
from buildbot.process.buildstep import BuildStep
class PatcherMixin:
"""
Mix this in to get a few special-cased patching methods
"""
def patch_os_uname(self, replacement):
# twisted's 'patch' doesn't handle the case where an attribute
# doesn't exist..
if hasattr(os, 'uname'):
self.patch(os, 'uname', replacement)
else:
def cleanup():
del os.uname
self.addCleanup(cleanup)
os.uname = replacement
class StdoutAssertionsMixin:
"""
Mix this in to be able to assert on stdout during the test
"""
def setUpStdoutAssertions(self):
self.stdout = StringIO()
self.patch(sys, 'stdout', self.stdout)
def assertWasQuiet(self):
self.assertEqual(self.stdout.getvalue(), '')
def assertInStdout(self, exp):
self.assertIn(exp, self.stdout.getvalue())
def getStdout(self):
return self.stdout.getvalue().strip()
class TimeoutableTestCase(TestCase):
# The addCleanup in current Twisted does not time out any functions
# registered via addCleanups. Until we can depend on fixed Twisted, use
# TimeoutableTestCase whenever test failure may cause it to block and not
# report anything.
def METHOD_NAME(self, ignored, result):
self._deferRunCleanupResult = result
d = self._run('deferRunCleanupsTimeoutable', result)
d.addErrback(self._ebGotMaybeTimeout, result)
return d
def _ebGotMaybeTimeout(self, failure, result):
result.addError(self, failure)
def deferRunCleanupsTimeoutable(self):
return super().METHOD_NAME(None, self._deferRunCleanupResult)
def encodeExecutableAndArgs(executable, args, encoding="utf-8"):
"""
Encode executable and arguments from unicode to bytes.
This avoids a deprecation warning when calling reactor.spawnProcess()
"""
if isinstance(executable, str):
executable = executable.encode(encoding)
argsBytes = []
for arg in args:
if isinstance(arg, str):
arg = arg.encode(encoding)
argsBytes.append(arg)
return (executable, argsBytes)
def enable_trace(case, trace_exclusions=None, f=sys.stdout):
"""This function can be called to enable tracing of the execution
"""
if trace_exclusions is None:
trace_exclusions = [
"twisted", "worker_transition.py", "util/tu", "util/path",
"log.py", "/mq/", "/db/", "buildbot/data/", "fake/reactor.py"
]
bbbase = os.path.dirname(buildbot.__file__)
state = {'indent': 0}
def tracefunc(frame, event, arg):
if frame.f_code.co_filename.startswith(bbbase):
if not any(te in frame.f_code.co_filename for te in trace_exclusions):
if event == "call":
state['indent'] += 2
print("-" * state['indent'], frame.f_code.co_filename.replace(bbbase, ""),
frame.f_code.co_name, frame.f_code.co_varnames, file=f)
if event == "return":
state['indent'] -= 2
return tracefunc
sys.settrace(tracefunc)
case.addCleanup(sys.settrace, lambda _a, _b, _c: None)
class DebugIntegrationLogsMixin:
def setupDebugIntegrationLogs(self):
# to ease debugging we display the error logs in the test log
origAddCompleteLog = BuildStep.addCompleteLog
def addCompleteLog(self, name, _log):
if name.endswith("err.text"):
log.msg("got error log!", name, _log)
return origAddCompleteLog(self, name, _log)
self.patch(BuildStep, "addCompleteLog", addCompleteLog)
if 'BBTRACE' in os.environ:
enable_trace(self)
class BuildDictLookAlike:
""" a class whose instances compares to any build dict that this reporter is supposed to send
out"""
def __init__(self, extra_keys=None, expected_missing_keys=None, **assertions):
self.keys = [
'builder', 'builderid', 'buildid', 'buildrequest', 'buildrequestid',
'buildset', 'complete', 'complete_at', 'masterid', 'number',
'parentbuild', 'parentbuilder', 'properties', 'results',
'started_at', 'state_string', 'url', 'workerid'
]
if extra_keys:
self.keys.extend(extra_keys)
if expected_missing_keys is not None:
for key in expected_missing_keys:
self.keys.remove(key)
self.keys.sort()
self.assertions = assertions
def __eq__(self, b):
if sorted(b.keys()) != self.keys:
raise AssertionError('BuildDictLookAlike is not equal to build: '
f'Extra keys: {set(b.keys()) - set(self.keys)} '
f'Missing keys: {set(self.keys) - set(b.keys())}')
for k, v in self.assertions.items():
if b[k] != v:
return False
return True
def __ne__(self, b):
return not self == b
def __repr__(self):
return "{ any build }"
|
924 |
generate curve
|
# Alfonso del Carre
import numpy as np
import sharpy.utils.algebra as algebra
class Element(object):
"""
This class stores all the required data for the definition of
a linear or quadratic beam element.
"""
ordering = [0, 2, 1]
max_nodes_elem = 3
def __init__(self,
ielem,
n_nodes,
global_connectivities,
coordinates,
frame_of_reference_delta,
structural_twist,
num_mem,
stiff_index,
mass_index):
# store info in instance
# global element number
self.ielem = ielem
# number of nodes per elem
self.n_nodes = n_nodes
if self.max_nodes_elem < self.n_nodes:
raise AttributeError('Elements with more than 3 nodes are not allowed')
# global connectivities (global node numbers)
self.global_connectivities = global_connectivities
self.reordered_global_connectivities = global_connectivities[self.ordering]
# coordinates of the nodes in a-frame (body-fixed frame)
self.coordinates_def = coordinates.copy()
# frame of reference points
self.frame_of_reference_delta = frame_of_reference_delta
# structural twist
self.structural_twist = structural_twist
# number in memory (for fortran routines)
self.num_mem = num_mem
# stiffness and mass matrices indices (stored in parent beam class)
self.stiff_index = stiff_index
self.mass_index = mass_index
# placeholder for RBMass
self.rbmass = None # np.zeros((self.max_nodes_elem, 6, 6))
self.update(self.coordinates_def)
def update(self, coordinates_def, psi_def=None):
self.coordinates_def = coordinates_def.copy()
if psi_def is not None:
# element orientation
self.psi_def = psi_def.copy()
# element length
self.calculate_length()
if psi_def is None: # ini conditions, initial crv has to be calculated
# we need to define the FoR z direction for every beam element
v1, v2, v3 = self.get_triad()
self.psi_ini = algebra.triad2crv_vec(v1, v2, v3)
self.psi_def = self.psi_ini.copy()
# copy all the info to _ini fields
self.coordinates_ini = self.coordinates_def.copy()
def calculate_length(self):
# TODO implement length based on integration
self.length = np.linalg.norm(self.coordinates_def[0, :] - self.coordinates_def[1, :])
def add_attributes(self, dictionary):
for key, value in dictionary.items():
setattr(self, key, value)
def METHOD_NAME(self, n_elem_curve, defor=False):
curve = np.zeros((n_elem_curve, 3))
t_vec = np.linspace(0, 2, n_elem_curve)
for i in range(n_elem_curve):
t = t_vec[i]
for idim in range(3):
if defor:
polyfit, _, _ = algebra.get_polyfit(self.coordinates_def, self.ordering)
else:
polyfit, _, _ = algebra.get_polyfit(self.coordinates_ini, self.ordering)
polyf = np.poly1d(polyfit[idim])
curve[i, idim] = (polyf(t))
return curve
def get_triad(self):
"""
Generates two unit vectors in body FoR that define the local FoR for
a beam element. These vectors are calculated using `frame_of_reference_delta`
:return:
"""
# now, calculate tangent vector (and coefficients of the polynomial
# fit just in case)
tangent, polyfit = algebra.tangent_vector(
self.coordinates_def,
Element.ordering)
normal = np.zeros_like(tangent)
binormal = np.zeros_like(tangent)
# v_vector is the vector with origin the FoR node and delta
# equals frame_of_reference_delta
for inode in range(self.n_nodes):
v_vector = self.frame_of_reference_delta[inode, :]
normal[inode, :] = algebra.unit_vector(np.cross(
tangent[inode, :],
v_vector
)
)
binormal[inode, :] = -algebra.unit_vector(np.cross(
tangent[inode, :],
normal[inode, :]
)
)
# we apply twist now
for inode in range(self.n_nodes):
if not self.structural_twist[inode] == 0.0:
rotation_mat = algebra.rotation_matrix_around_axis(tangent[inode, :],
self.structural_twist[inode])
normal[inode, :] = np.dot(rotation_mat, normal[inode, :])
binormal[inode, :] = np.dot(rotation_mat, binormal[inode, :])
return tangent, binormal, normal
def deformed_triad(self, psi=None):
if psi is None:
return algebra.crv2triad_vec(self.psi_def)
else:
return algebra.crv2triad_vec(psi)
|
925 |
test accepts iso 8701 local
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import logging
from io import StringIO
import botocore.session
from botocore.client import ClientError
from botocore.exceptions import EndpointConnectionError
from tests import unittest
# This is really a combination of testing the debug logging mechanism
# as well as the response wire log, which theoretically could be
# implemented in any number of modules, which makes it hard to pick
# which integration test module this code should live in, so I picked
# the client module.
class TestResponseLog(unittest.TestCase):
def test_debug_log_contains_headers_and_body(self):
# This test just verifies that the response headers/body
# are in the debug log. It's an integration test so that
# we can refactor the code however we want, as long as we don't
# lose this feature.
session = botocore.session.get_session()
client = session.create_client('s3', region_name='us-west-2')
debug_log = StringIO()
session.set_stream_logger('', logging.DEBUG, debug_log)
client.list_buckets()
debug_log_contents = debug_log.getvalue()
self.assertIn('Response headers', debug_log_contents)
self.assertIn('Response body', debug_log_contents)
class TestAcceptedDateTimeFormats(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('emr', 'us-west-2')
def test_accepts_datetime_object(self):
response = self.client.list_clusters(
CreatedAfter=datetime.datetime.now()
)
self.assertIn('Clusters', response)
def test_accepts_epoch_format(self):
response = self.client.list_clusters(CreatedAfter=0)
self.assertIn('Clusters', response)
def test_accepts_iso_8601_unaware(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00'
)
self.assertIn('Clusters', response)
def test_accepts_iso_8601_utc(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00Z'
)
self.assertIn('Clusters', response)
def METHOD_NAME(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00-08:00'
)
self.assertIn('Clusters', response)
class TestClientErrors(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_region_mentioned_in_invalid_region(self):
client = self.session.create_client(
'cloudformation', region_name='us-east-999'
)
with self.assertRaisesRegex(
EndpointConnectionError, 'Could not connect to the endpoint URL'
):
client.list_stacks()
def test_client_modeled_exception(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2'
)
with self.assertRaises(client.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
def test_client_modeleded_exception_with_differing_code(self):
client = self.session.create_client('iam', region_name='us-west-2')
# The NoSuchEntityException should be raised on NoSuchEntity error
# code.
with self.assertRaises(client.exceptions.NoSuchEntityException):
client.get_role(RoleName="NonexistentIAMRole")
def test_raises_general_client_error_for_non_modeled_exception(self):
client = self.session.create_client('ec2', region_name='us-west-2')
try:
client.describe_regions(DryRun=True)
except client.exceptions.ClientError as e:
self.assertIs(e.__class__, ClientError)
def test_can_catch_client_exceptions_across_two_different_clients(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2'
)
client2 = self.session.create_client(
'dynamodb', region_name='us-west-2'
)
with self.assertRaises(client2.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
class TestClientMeta(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_region_name_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
self.assertEqual(client.meta.region_name, 'us-west-2')
def test_endpoint_url_on_meta(self):
client = self.session.create_client(
's3', 'us-west-2', endpoint_url='https://foo'
)
self.assertEqual(client.meta.endpoint_url, 'https://foo')
class TestClientInjection(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_can_inject_client_methods(self):
def extra_client_method(self, name):
return name
def inject_client_method(class_attributes, **kwargs):
class_attributes['extra_client_method'] = extra_client_method
self.session.register('creating-client-class.s3', inject_client_method)
client = self.session.create_client('s3', 'us-west-2')
# We should now have access to the extra_client_method above.
self.assertEqual(client.extra_client_method('foo'), 'foo')
class TestMixedEndpointCasing(unittest.TestCase):
def setUp(self):
self.url = 'https://EC2.US-WEST-2.amazonaws.com/'
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', 'us-west-2', endpoint_url=self.url
)
def test_sigv4_is_correct_when_mixed_endpoint_casing(self):
res = self.client.describe_regions()
status_code = res['ResponseMetadata']['HTTPStatusCode']
self.assertEqual(status_code, 200)
|
926 |
product on basis
|
# sage.doctest: needs sage.modules
r"""
Examples of graded connected Hopf algebras with basis
"""
# ****************************************************************************
# Copyright (C) 2015 Jean-Baptiste Priez <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.graded_hopf_algebras_with_basis import GradedHopfAlgebrasWithBasis
from sage.combinat.free_module import CombinatorialFreeModule
from sage.arith.misc import binomial
from sage.misc.cachefunc import cached_method
from sage.sets.non_negative_integers import NonNegativeIntegers
class GradedConnectedCombinatorialHopfAlgebraWithPrimitiveGenerator(CombinatorialFreeModule):
r"""
This class illustrates an implementation of a graded Hopf algebra
with basis that has one primitive generator of degree 1 and basis
elements indexed by non-negative integers.
This Hopf algebra example differs from what topologists refer to as
a graded Hopf algebra because the twist operation in the tensor rule
satisfies
.. MATH::
(\mu \otimes \mu) \circ (id \otimes \tau \otimes id) \circ
(\Delta \otimes \Delta) = \Delta \circ \mu
where `\tau(x\otimes y) = y\otimes x`.
"""
def __init__(self, base_ring):
"""
EXAMPLES::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: TestSuite(H).run()
"""
CombinatorialFreeModule.__init__(self, base_ring, NonNegativeIntegers(),
category=GradedHopfAlgebrasWithBasis(base_ring).Connected())
@cached_method
def one_basis(self):
"""
Returns 0, which index the unit of the Hopf algebra.
OUTPUT:
- the non-negative integer 0
EXAMPLES::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: H.one_basis()
0
sage: H.one()
P0
"""
return self.basis().keys()(0)
def degree_on_basis(self, i):
"""
The degree of a non-negative integer is itself
INPUT:
- ``i`` -- a non-negative integer
OUTPUT:
- a non-negative integer
TESTS::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: H.degree_on_basis(45)
45
"""
return i
def _repr_(self):
"""
Representation of the graded connected Hopf algebra
EXAMPLES::
sage: GradedHopfAlgebrasWithBasis(QQ).Connected().example()
An example of a graded connected Hopf algebra with basis over Rational Field
"""
return "An example of a graded connected Hopf algebra with basis over %s" % self.base_ring()
def _repr_term(self, i):
"""
Representation for the basis element indexed by the integer ``i``.
EXAMPLES::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: H._repr_term(45)
'P45'
"""
return 'P' + repr(i)
def METHOD_NAME(self, i, j):
"""
The product of two basis elements.
The product of elements of degree ``i`` and ``j`` is an element
of degree ``i+j``.
INPUT:
- ``i``, ``j`` -- non-negative integers
OUTPUT:
- a basis element indexed by ``i+j``
TESTS::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: H.monomial(4) * H.monomial(5)
P9
"""
return self.monomial(i+j)
def coproduct_on_basis(self, i):
r"""
The coproduct of a basis element.
.. MATH::
\Delta(P_i) = \sum_{j=0}^i P_{i-j} \otimes P_j
INPUT:
- ``i`` -- a non-negative integer
OUTPUT:
- an element of the tensor square of ``self``
TESTS::
sage: H = GradedHopfAlgebrasWithBasis(QQ).Connected().example()
sage: H.monomial(3).coproduct()
P0 # P3 + 3*P1 # P2 + 3*P2 # P1 + P3 # P0
"""
return self.sum_of_terms(
((i-j, j), binomial(i, j))
for j in range(i+1)
)
Example = GradedConnectedCombinatorialHopfAlgebraWithPrimitiveGenerator
|
927 |
setdt
|
""" BlueSky Datalogger """
# ToDo: Add description in comments
import numbers
import itertools
from datetime import datetime
import numpy as np
from bluesky import settings, stack
from bluesky.core import varexplorer as ve
import bluesky as bs
from bluesky.stack import command
# Register settings defaults
settings.set_variable_defaults(log_path='output')
logprecision = '%.8f'
# Dict to contain the definitions of periodic loggers
periodicloggers = dict()
# Dict to contain all loggers (also the periodic loggers)
allloggers = dict()
@command(name='CRELOG')
def crelogstack(name: 'txt', dt: float = None, header: 'string' = ''):
""" Create a new data logger.
Arguments:
- name: The name of the logger
- dt: The logging time interval. When a value is given for dt
this becomes a periodic logger.
- header: A header text to put at the top of each log file
"""
if name in allloggers:
return False, f'Logger {name} already exists'
crelog(name, dt, header)
return True, f'Created {"periodic" if dt else ""} logger {name}'
def crelog(name, dt=None, header=''):
""" Create a new logger. """
allloggers[name] = allloggers.get(name, CSVLogger(name, dt or 0.0, header))
if dt:
periodicloggers[name] = allloggers[name]
return allloggers[name]
def update():
""" This function writes to files of all periodic logs by calling the appropriate
functions for each type of periodic log, at the approriate update time. """
for log in periodicloggers.values():
log.log()
def reset():
""" This function closes all logs. It is called when simulation is
reset and at quit. """
CSVLogger.simt = 0.0
# Close all logs and remove reference to its file object
for log in allloggers.values():
log.reset()
def makeLogfileName(logname, prefix: str = ''):
timestamp = datetime.now().strftime('%Y%m%d_%H-%M-%S')
if prefix == '' or prefix.lower() == stack.get_scenname().lower():
fname = "%s_%s_%s.log" % (logname, stack.get_scenname(), timestamp)
else:
fname = "%s_%s_%s_%s.log" % (logname, stack.get_scenname(), prefix, timestamp)
return bs.resource(settings.log_path) / fname
def col2txt(col, nrows):
if isinstance(col, (list, np.ndarray)):
if isinstance(col[0], numbers.Integral):
ret = np.char.mod('%d', col)
elif isinstance(col[0], numbers.Number):
ret = np.char.mod(logprecision, col)
else:
ret = np.char.mod('%s', col)
if len(ret.shape) > 1:
for el in ret.T:
yield el
else:
yield ret
elif isinstance(col, numbers.Integral):
yield nrows * ['%d' % col]
elif isinstance(col, numbers.Number):
yield nrows * [logprecision % col]
# The input is not a number
else:
yield nrows * [col]
class CSVLogger:
def __init__(self, name, dt, header):
self.name = name
self.file = None
self.fname = None
self.dataparents = []
self.header = header.split('\n')
self.tlog = 0.0
self.selvars = []
# In case this is a periodic logger: log timestep
self.dt = dt
self.default_dt = dt
# Register a command for this logger in the stack
stackcmd = {name: [
name + ' ON/OFF,[dt] or ADD [FROM parent] var1,...,varn',
'[txt,float/word,...]', self.stackio, name + " data logging on"]
}
stack.append_commands(stackcmd)
def write(self, line):
self.file.write(bytearray(line, 'ascii'))
def setheader(self, header):
self.header = header.split('\n')
def METHOD_NAME(self, dt):
self.dt = dt
self.default_dt = dt
def addvars(self, selection):
selvars = []
while selection:
parent = ''
if selection[0].upper() == 'FROM':
parent = selection[1] + '.'
del selection[0:2]
variables = list(itertools.takewhile(
lambda i: i.upper() != 'FROM', selection))
selection = selection[len(variables):]
for v in variables:
varobj = ve.findvar(parent + v)
if varobj:
selvars.append(varobj)
else:
return False, f'Variable {v} not found'
self.selvars = selvars
return True
def open(self, fname):
if self.file:
self.file.close()
self.file = open(fname, 'wb')
# Write the header
for line in self.header:
self.file.write(bytearray('# ' + line + '\n', 'ascii'))
# Write the column contents
columns = ['simt']
for v in self.selvars:
columns.append(v.varname)
self.file.write(
bytearray('# ' + str.join(', ', columns) + '\n', 'ascii'))
def isopen(self):
return self.file is not None
def log(self, *additional_vars):
if self.file and bs.sim.simt >= self.tlog:
# Set the next log timestep
self.tlog += self.dt
# Make the variable reference list
varlist = [bs.sim.simt]
varlist += [v.get() for v in self.selvars]
varlist += additional_vars
# Get the number of rows from the first array/list
nrows = 1
for v in varlist:
if isinstance(v, (list, np.ndarray)):
nrows = len(v)
break
if nrows == 0:
return
# Convert (numeric) arrays to text, leave text arrays untouched
txtdata = [
txtcol for col in varlist for txtcol in col2txt(col, nrows)]
# log the data to file
np.savetxt(self.file, np.vstack(txtdata).T,
delimiter=',', newline='\n', fmt='%s')
def start(self, prefix: str = ''):
""" Start this logger. """
self.tlog = bs.sim.simt
self.fname = makeLogfileName(self.name, prefix)
self.open(self.fname)
def reset(self):
self.dt = self.default_dt
self.tlog = 0.0
self.fname = None
if self.file:
self.file.close()
self.file = None
def listallvarnames(self):
return str.join(', ', (v.varname for v in self.selvars))
def stackio(self, *args):
if len(args) == 0:
text = 'This is '
if self.name in periodicloggers:
text += 'a periodic logger, with an update interval of %.2f seconds.\n' % self.dt
else:
text += 'a non-periodic logger.\n'
text += 'with variables: ' + self.listallvarnames() + '\n'
text += self.name + ' is ' + ('ON' if self.isopen() else 'OFF') + \
'\nUsage: ' + self.name + \
' ON/OFF,[dt] or ADD [FROM parent] var1,...,varn'
return True, text
# TODO: add list of logging vars
elif args[0] == 'ON':
if len(args) > 1:
if isinstance(args[1], float):
self.dt = args[1]
else:
return False, 'Turn ' + self.name + ' on with optional dt'
self.start()
elif args[0] == 'OFF':
self.reset()
elif args[0] == 'ADD':
return self.addvars(list(args[1:]))
return True
|
928 |
create optimizer
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import net
class DygraphModel():
# define model
def create_model(self, config):
sparse_feature_number = config.get(
"hyper_parameters.sparse_feature_number")
sparse_feature_dim = config.get("hyper_parameters.sparse_feature_dim")
fc_sizes = config.get("hyper_parameters.fc_sizes")
sparse_fea_num = config.get('hyper_parameters.sparse_fea_num')
dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
sparse_input_slot = config.get('hyper_parameters.sparse_inputs_slots')
use_embedding_gate = config.get('hyper_parameters.use_embedding_gate')
use_hidden_gate = config.get('hyper_parameters.use_hidden_gate')
gate_dnn_model = net.GateDNNLayer(
sparse_feature_number, sparse_feature_dim, dense_feature_dim,
sparse_input_slot - 1, fc_sizes, use_embedding_gate,
use_hidden_gate)
return gate_dnn_model
# define feeds which convert numpy of batch data to paddle.tensor
def create_feeds(self, batch_data, config):
dense_feature_dim = config.get('hyper_parameters.dense_input_dim')
sparse_tensor = []
for b in batch_data[:-1]:
sparse_tensor.append(
paddle.to_tensor(b.numpy().astype('int64').reshape(-1, 1)))
dense_tensor = paddle.to_tensor(batch_data[-1].numpy().astype(
'float32').reshape(-1, dense_feature_dim))
label = sparse_tensor[0]
return label, sparse_tensor[1:], dense_tensor
# define loss function by predicts and label
def create_loss(self, raw_pred, label):
loss = paddle.nn.functional.log_loss(
input=raw_pred, label=paddle.cast(label, "float32"))
loss = paddle.mean(loss)
return loss
# define optimizer
def METHOD_NAME(self, dy_model, config):
lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
optimizer = paddle.optimizer.Adam(
learning_rate=lr, parameters=dy_model.parameters())
return optimizer
# define metrics such as auc/acc
# multi-task need to define multi metric
def create_metrics(self):
metrics_list_name = ["auc"]
auc_metric = paddle.metric.Auc("ROC")
metrics_list = [auc_metric]
return metrics_list, metrics_list_name
# construct train forward phase
def train_forward(self, dy_model, metrics_list, batch_data, config):
label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
config)
raw_pred = dy_model.forward(sparse_tensor, dense_tensor)
loss = self.create_loss(raw_pred, label)
# update metrics
predict_2d = paddle.concat(x=[1 - raw_pred, raw_pred], axis=1)
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
# print_dict format :{'loss': loss}
print_dict = None
return loss, metrics_list, print_dict
def infer_forward(self, dy_model, metrics_list, batch_data, config):
label, sparse_tensor, dense_tensor = self.create_feeds(batch_data,
config)
raw_pred = dy_model.forward(sparse_tensor, dense_tensor)
# update metrics
predict_2d = paddle.concat(x=[1 - raw_pred, raw_pred], axis=1)
metrics_list[0].update(preds=predict_2d.numpy(), labels=label.numpy())
return metrics_list, None
|
929 |
build default receive data dir
|
# -*- coding: utf-8 -*-
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014-2022 Micah Lee, et al. <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import json
import platform
if platform.system() == "Darwin":
import pwd
class ModeSettings:
"""
This stores the settings for a single instance of an OnionShare mode. In CLI there
is only one ModeSettings, and in the GUI there is a separate ModeSettings for each tab
"""
def __init__(self, common, filename=None, id=None):
self.common = common
self.default_settings = {
"onion": {
"private_key": None,
"client_auth_priv_key": None,
"client_auth_pub_key": None,
},
"persistent": {"mode": None, "enabled": False},
"general": {
"title": None,
"public": False,
"autostart_timer": False,
"autostop_timer": False,
"service_id": None,
},
"share": {"autostop_sharing": True, "filenames": []},
"receive": {
"data_dir": self.METHOD_NAME(),
"webhook_url": None,
"disable_text": False,
"disable_files": False,
},
"website": {"disable_csp": False, "custom_csp": None, "filenames": []},
"chat": {},
}
self._settings = {}
self.just_created = False
if id:
self.id = id
else:
self.id = self.common.build_password(3)
self.load(filename)
def fill_in_defaults(self):
"""
If there are any missing settings from self._settings, replace them with
their default values.
"""
for key in self.default_settings:
if key in self._settings:
for inner_key in self.default_settings[key]:
if inner_key not in self._settings[key]:
self._settings[key][inner_key] = self.default_settings[key][
inner_key
]
else:
self._settings[key] = self.default_settings[key]
def get(self, group, key):
return self._settings[group][key]
def set(self, group, key, val):
self._settings[group][key] = val
self.common.log(
"ModeSettings", "set", f"updating {self.id}: {group}.{key} = {val}"
)
self.save()
def METHOD_NAME(self):
"""
Returns the path of the default Downloads directory for receive mode.
"""
if self.common.platform == "Darwin":
# We can't use os.path.expanduser() in macOS because in the sandbox it
# returns the path to the sandboxed homedir
real_homedir = pwd.getpwuid(os.getuid()).pw_dir
return os.path.join(real_homedir, "OnionShare")
elif self.common.platform == "Windows":
# On Windows, os.path.expanduser() needs to use backslash, or else it
# retains the forward slash, which breaks opening the folder in explorer.
return os.path.expanduser("~\\OnionShare")
else:
# All other OSes
return os.path.expanduser("~/OnionShare")
def load(self, filename=None):
# Load persistent settings from disk. If the file doesn't exist, create it
if filename:
self.filename = filename
else:
self.filename = os.path.join(
self.common.build_persistent_dir(), f"{self.id}.json"
)
if os.path.exists(self.filename):
try:
with open(self.filename, "r") as f:
self._settings = json.load(f)
self.fill_in_defaults()
self.common.log("ModeSettings", "load", f"loaded {self.filename}")
return
except Exception:
pass
# If loading settings didn't work, create the settings file
self.common.log("ModeSettings", "load", f"creating {self.filename}")
self.fill_in_defaults()
self.just_created = True
def save(self):
# Save persistent setting to disk
if not self.get("persistent", "enabled"):
return
if self.filename:
with open(self.filename, "w") as file:
file.write(json.dumps(self._settings, indent=2))
def delete(self):
# Delete the file from disk
if os.path.exists(self.filename):
os.remove(self.filename)
|
930 |
test casepriority list api method get
|
import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Casepriority
class CasepriorityAPIViewTestCase(TestCase):
"""casepriority API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Casepriority.objects.create(casepriority_name='casepriority_api_1')
# create user
User.objects.create_user(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
def test_casepriority_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/casepriority/')
# compare
self.assertEqual(response.status_code, 401)
def METHOD_NAME(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# get response
response = self.client.get('/api/casepriority/')
# compare
self.assertEqual(response.status_code, 200)
def test_casepriority_list_api_method_post(self):
"""POST is forbidden"""
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# create POST string
poststring = {"casepriority_name": "casepriority_api_2"}
# get response
response = self.client.post('/api/casepriority/', data=poststring)
# compare
self.assertEqual(response.status_code, 405)
def test_casepriority_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# create url
destination = urllib.parse.quote('/api/casepriority/', safe='/')
# get response
response = self.client.get('/api/casepriority', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_casepriority_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
casepriority_api_1 = Casepriority.objects.get(
casepriority_name='casepriority_api_1'
)
# get response
response = self.client.get(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id) + '/'
)
# compare
self.assertEqual(response.status_code, 401)
def test_casepriority_detail_api_method_get(self):
"""GET is allowed"""
# get object
casepriority_api_1 = Casepriority.objects.get(
casepriority_name='casepriority_api_1'
)
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# get response
response = self.client.get(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id) + '/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_casepriority_detail_api_method_delete(self):
"""DELETE is forbidden"""
# get object
casepriority_api_1 = Casepriority.objects.get(
casepriority_name='casepriority_api_1'
)
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# get response
response = self.client.delete(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id) + '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_casepriority_detail_api_method_put(self):
"""PUT is forbidden"""
# get object
casepriority_api_1 = Casepriority.objects.get(
casepriority_name='casepriority_api_1'
)
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# create url
destination = urllib.parse.quote(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id) + '/',
safe='/',
)
# create PUT string
putstring = {"casepriority_name": "new_casepriority_api_1"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 405)
def test_casepriority_detail_api_redirect(self):
"""test redirect with appending slash"""
# get object
casepriority_api_1 = Casepriority.objects.get(
casepriority_name='casepriority_api_1'
)
# login testuser
self.client.login(
username='testuser_casepriority_api', password='IkVd4MCMYIlTf5MbCiF8'
)
# create url
destination = urllib.parse.quote(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id) + '/',
safe='/',
)
# get response
response = self.client.get(
'/api/casepriority/' + str(casepriority_api_1.casepriority_id), follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
|
931 |
my malformed namespace
|
# Standard Library
from typing import Dict
from unittest import mock
# Third-party
import pytest
# Sematic
from sematic import func
from sematic.abstract_future import AbstractFuture
from sematic.caching.caching import determine_cache_namespace, get_future_cache_key
from sematic.runners.state_machine_runner import StateMachineRunner
# these values were calculated by hand on paper to validate the algorithm
# (they were all correct on the first try)
MY_CACHE_KEY = "ec8eaec9ea3bd0315d5bd0839380ed2cab6bf526_my_namespace"
MY_OTHER_CACHE_KEY = "ec8eaec9ea3bd0315d5bd0839380ed2cab6bf526_my_other_namespace"
@func
def my_pipeline(a: int, b: Dict[str, int]) -> int:
# a dict arg must be included ^ to check that we correctly cover
# potential "TypeError: unhashable type" errors
return a + b["test_key"]
@func
def my_other_pipeline(a: int) -> int:
return a
def my_namespace(_: AbstractFuture) -> str:
return "my_namespace"
def my_other_namespace(_: AbstractFuture) -> str:
return "my_other_namespace"
@pytest.fixture(scope="function")
def my_future() -> AbstractFuture:
future = my_pipeline(1, {"test_key": 2})
future.resolved_kwargs = StateMachineRunner._get_concrete_kwargs(future)
return future
@pytest.fixture(scope="function")
def my_other_future() -> AbstractFuture:
future = my_other_pipeline(1)
future.resolved_kwargs = StateMachineRunner._get_concrete_kwargs(future)
return future
def test_none_namespace(my_future: AbstractFuture):
with pytest.raises(ValueError, match="cannot be None"):
get_future_cache_key(None, my_future) # type: ignore
def test_unresolved_args_namespace():
with pytest.raises(ValueError, match="Not all input arguments are resolved"):
get_future_cache_key("my_namespace", my_pipeline(1, {"test_key": 2}))
def test_namespace_str_happy_path(my_future: AbstractFuture):
actual = get_future_cache_key("my_namespace", my_future)
assert actual == MY_CACHE_KEY
actual = get_future_cache_key("my_other_namespace", my_future)
assert actual == MY_OTHER_CACHE_KEY
def test_resolve_namespace_str_happy_path(my_future: AbstractFuture):
actual = determine_cache_namespace("my_namespace", my_future)
assert actual == "my_namespace"
def test_resolve_namespace_callable_happy_path(my_future: AbstractFuture):
actual = determine_cache_namespace(my_namespace, my_future)
assert actual == "my_namespace"
actual = determine_cache_namespace(my_other_namespace, my_future)
assert actual == "my_other_namespace"
def test_resolve_namespace_str_truncated(my_future: AbstractFuture):
actual = determine_cache_namespace(
"01234567890123456789012345678901234567890123456789extra",
my_future,
)
assert actual == "01234567890123456789012345678901234567890123456789"
def test_resolve_namespace_callable_truncated(my_future: AbstractFuture):
def my_custom_namespace(_: AbstractFuture) -> str:
return "01234567890123456789012345678901234567890123456789extra"
actual = determine_cache_namespace(my_custom_namespace, my_future)
assert actual == "01234567890123456789012345678901234567890123456789"
def test_custom_resolve_namespace(
my_future: AbstractFuture, my_other_future: AbstractFuture
):
def my_custom_namespace(future: AbstractFuture) -> str:
fqpn = future.function.get_func_fqpn() # type: ignore # noqa: ignore
if fqpn == "sematic.caching.tests.test_caching.my_pipeline":
return "my_namespace"
if fqpn == "sematic.caching.tests.test_caching.my_other_pipeline":
return "my_other_namespace"
return "whatever"
actual = determine_cache_namespace(my_custom_namespace, my_future)
assert actual == "my_namespace"
actual = determine_cache_namespace(my_custom_namespace, my_other_future)
assert actual == "my_other_namespace"
def test_invalid_args_resolve_namespace(my_future: AbstractFuture):
with pytest.raises(ValueError, match="cannot be None"):
determine_cache_namespace(None, my_future)
actual = determine_cache_namespace("my_namespace", None) # type: ignore
assert actual == "my_namespace"
with pytest.raises(ValueError, match="cannot be None"):
determine_cache_namespace(my_namespace, None) # type: ignore
nested_future = mock.MagicMock()
nested_future.is_root_future.return_value = False
with pytest.raises(ValueError, match="must be a pipeline run root Future"):
determine_cache_namespace(my_namespace, nested_future)
def test_malformed_resolve_namespace(my_future: AbstractFuture):
def METHOD_NAME() -> str:
return "my_namespace"
with pytest.raises(TypeError, match="takes 0 positional arguments but 1 was given"):
determine_cache_namespace(METHOD_NAME, my_future) # type: ignore
def test_resolve_namespace_error_raised(my_future: AbstractFuture):
def my_error_namespace(_: AbstractFuture) -> str:
raise ValueError("test error")
with pytest.raises(ValueError, match="test error"):
determine_cache_namespace(my_error_namespace, my_future)
|
932 |
evaluate
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application.dssp"
__author__ = "Patrick Kunzmann"
__all__ = ["DsspApp"]
from tempfile import NamedTemporaryFile
from ..localapp import LocalApp, cleanup_tempfile
from ..application import AppState, requires_state
from ...structure.io.pdbx.file import PDBxFile
from ...structure.io.pdbx.convert import set_structure
import numpy as np
class DsspApp(LocalApp):
r"""
Annotate the secondary structure of a protein structure using the
*DSSP* software.
Internally this creates a :class:`Popen` instance, which handles
the execution.
DSSP differentiates between 8 different types of secondary
structure elements:
- C: loop, coil or irregular
- H: :math:`{\alpha}`-helix
- B: :math:`{\beta}`-bridge
- E: extended strand, participation in :math:`{\beta}`-ladder
- G: 3 :sub:`10`-helix
- I: :math:`{\pi}`-helix
- T: hydrogen bonded turn
- S: bend
Parameters
----------
atom_array : AtomArray
The atom array to be annotated.
bin_path : str, optional
Path of the *DDSP* binary.
Examples
--------
>>> app = DsspApp(atom_array)
>>> app.start()
>>> app.join()
>>> print(app.get_sse())
['C' 'H' 'H' 'H' 'H' 'H' 'H' 'H' 'T' 'T' 'G' 'G' 'G' 'G' 'T' 'C' 'C' 'C'
'C' 'C']
"""
def __init__(self, atom_array, bin_path="mkdssp"):
super().__init__(bin_path)
# mkdssp requires also the
# 'occupancy', 'b_factor' and 'charge' fields
# -> Add these annotations to a copy of the input structure
self._array = atom_array.copy()
categories = self._array.get_annotation_categories()
if "charge" not in categories:
self._array.set_annotation(
"charge", np.zeros(self._array.array_length(), dtype=int)
)
if "b_factor" not in categories:
self._array.set_annotation(
"b_factor", np.zeros(self._array.array_length(), dtype=float)
)
if "occupancy" not in categories:
self._array.set_annotation(
"occupancy", np.ones(self._array.array_length(), dtype=float)
)
self._in_file = NamedTemporaryFile("w", suffix=".cif", delete=False)
self._out_file = NamedTemporaryFile("r", suffix=".dssp", delete=False)
def run(self):
in_file = PDBxFile()
set_structure(in_file, self._array, data_block="DSSP_INPUT")
in_file.write(self._in_file)
self._in_file.flush()
self.set_arguments(
["-i", self._in_file.name, "-o", self._out_file.name]
)
super().run()
def METHOD_NAME(self):
super().METHOD_NAME()
lines = self._out_file.read().split("\n")
# Index where SSE records start
sse_start = None
for i, line in enumerate(lines):
if line.startswith(" # RESIDUE AA STRUCTURE"):
sse_start = i+1
if sse_start is None:
raise ValueError("DSSP file does not contain SSE records")
# Remove "!" for missing residues
lines = [
line for line in lines[sse_start:]
if len(line) != 0 and line[13] != "!"
]
self._sse = np.zeros(len(lines), dtype="U1")
# Parse file for SSE letters
for i, line in enumerate(lines):
self._sse[i] = line[16]
self._sse[self._sse == " "] = "C"
def clean_up(self):
super().clean_up()
cleanup_tempfile(self._in_file)
cleanup_tempfile(self._out_file)
@requires_state(AppState.JOINED)
def get_sse(self):
"""
Get the resulting secondary structure assignment.
Returns
-------
sse : ndarray, dtype="U1"
An array containing DSSP secondary structure symbols
corresponding to the residues in the input atom array.
"""
return self._sse
@staticmethod
def annotate_sse(atom_array, bin_path="mkdssp"):
"""
Perform a secondary structure assignment to an atom array.
This is a convenience function, that wraps the :class:`DsspApp`
execution.
Parameters
----------
atom_array : AtomArray
The atom array to be annotated.
bin_path : str, optional
Path of the DDSP binary.
Returns
-------
sse : ndarray, dtype="U1"
An array containing DSSP secondary structure symbols
corresponding to the residues in the input atom array.
"""
app = DsspApp(atom_array, bin_path)
app.start()
app.join()
return app.get_sse()
|
933 |
read sequence
|
"""Adapted from Chris Lowis' drum synthesis code in Javascript
https://github.com/chrislo/drum_synthesis
"""
import random
import json
from browser import bind, console, document, html, timer, window
import drum_score
class Config:
context = None
def setup():
if Config.context is None:
Config.context = window.AudioContext.new()
kick_freq = document['kick_freq']
class Kick:
checked = 'o'
def __init__(self):
setup()
def setup(self):
self.osc = Config.context.createOscillator()
self.gain = Config.context.createGain()
self.osc.connect(self.gain)
self.gain.connect(Config.context.destination)
def trigger(self, time=None):
time = time or Config.context.currentTime
self.setup()
self.osc.frequency.setValueAtTime(int(kick_freq.value), time)
self.gain.gain.setValueAtTime(1, time)
self.osc.frequency.exponentialRampToValueAtTime(0.01, time + 0.5)
self.gain.gain.exponentialRampToValueAtTime(0.01, time + 0.5)
self.osc.start(time)
self.osc.stop(time + 0.5)
class Snare:
checked = 'o'
def __init__(self):
setup()
self.setup()
def setup(self):
self.noise = Config.context.createBufferSource()
self.noise.buffer = self.noiseBuffer()
noiseFilter = Config.context.createBiquadFilter()
noiseFilter.type = 'highpass'
noiseFilter.frequency.value = 1000
self.noise.connect(noiseFilter)
self.noiseEnvelope = Config.context.createGain()
noiseFilter.connect(self.noiseEnvelope)
self.noiseEnvelope.connect(Config.context.destination)
def noiseBuffer(self):
bufferSize = Config.context.sampleRate
buffer = Config.context.createBuffer(1, bufferSize,
Config.context.sampleRate)
output = buffer.getChannelData(0)
for i in range(bufferSize):
output[i] = random.random() * 2 - 1
return buffer
def trigger(self, time=None):
time = time or Config.context.currentTime
self.osc = Config.context.createOscillator()
self.osc.type = 'triangle'
self.oscEnvelope = Config.context.createGain()
self.osc.connect(self.oscEnvelope)
self.oscEnvelope.connect(Config.context.destination)
self.noiseEnvelope.gain.cancelScheduledValues(time)
self.noiseEnvelope.gain.setValueAtTime(1, time)
self.noiseEnvelope.gain.exponentialRampToValueAtTime(0.01, time + 0.2)
self.noise.start(time)
self.osc.frequency.setValueAtTime(100, time)
self.oscEnvelope.gain.setValueAtTime(0.7, time)
self.oscEnvelope.gain.exponentialRampToValueAtTime(0.01, time + 0.1)
self.osc.start(time)
self.osc.stop(time + 0.2)
self.noise.stop(time + 0.2)
class HiHat:
buffer = None
checked = 'x'
def setup(self, time):
self.source = Config.context.createBufferSource()
self.source.buffer = self.buffer
self.source.connect(Config.context.destination)
self.play(time)
def trigger(self, time=None):
if self.buffer is None:
Config.context = window.AudioContext.new()
time = time or Config.context.currentTime
sampleLoader('samples/hihat.wav', HiHat, lambda: self.setup(time))
else:
time = time or Config.context.currentTime
self.setup(time)
def play(self, time):
time = Config.context.currentTime if time is None else time
self.source.start(time)
instruments = [HiHat, Snare, Kick]
score = drum_score.Score(instruments)
document['score'] <= html.DIV('Patterns')
document['score'] <= score
score.new_tab()
def sampleLoader(url, cls, callback):
request = window.XMLHttpRequest.new()
request.open("GET", url, True)
request.responseType = "arraybuffer"
def f(buffer):
cls.buffer = buffer
callback()
@bind(request, 'load')
def load(ev):
Config.context.decodeAudioData(request.response, f)
request.send()
load_button = document['load_score']
@bind(load_button, "input")
def file_read(ev):
def onload(event):
"""Triggered when file is read. The FileReader instance is
event.target.
The file content, as text, is the FileReader instance's "result"
attribute."""
global score
data = json.loads(event.target.result)
score = drum_score.Score(instruments)
document['score'].clear()
document['score'] <= score
score.patterns.value = data['patterns']
for i, notes in enumerate(data['bars']):
score.new_tab(notes=notes)
# set attribute "download" to file name
save_button.attrs["download"] = file.name
# Get the selected file as a DOM File object
file = load_button.files[0]
# Create a new DOM FileReader instance
reader = window.FileReader.new()
# Read the file content as text
reader.readAsText(file)
reader.bind("load", onload)
save_button = document['save_score']
@bind(save_button, "mousedown")
def mousedown(evt):
"""Create a "data URI" to set the downloaded file content
Cf. https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
"""
patterns = score.patterns.value
bars = []
for bar in score.bars:
sbar = {}
for instrument in bar.notes:
sbar[instrument.__name__] = bar.notes[instrument]
bars.append(sbar)
data = json.dumps({'patterns': score.patterns.value, 'bars': bars})
content = window.encodeURIComponent(data)
# set attribute "href" of save link
save_button.attrs["download"] = 'drum_score.json'
save_button.attrs["href"] = "data:text/json," + content
look_ahead = 0.1
schedule_period = 1000 * 0.05 # milliseconds
bpm_control = document['bpm']
@bind('#bpm', 'input')
def change_bpm(ev):
Sequencer.METHOD_NAME()
def get_bpm():
return int(bpm_control.value)
class Sequencer:
running = False
pattern = None
@classmethod
def METHOD_NAME(cls):
cls.seq, cls.nb_bars = score.get_seq(get_bpm())
@bind('#start_loop', 'click')
def start_loop(ev):
setup()
if Sequencer.running:
return
Sequencer.METHOD_NAME()
if not Sequencer.seq:
return
Sequencer.running = True
Sequencer.pattern = None
loop(Config.context.currentTime, 0)
@bind('#end_loop', 'click')
def end_loop(ev):
Sequencer.running = False
def loop(t0, i):
dt = Config.context.currentTime - t0
if not Sequencer.running:
return
while dt > Sequencer.seq[i][1] - look_ahead:
line_num, t, pattern, cell = Sequencer.seq[i]
instrument = score.instruments[line_num]()
if pattern != Sequencer.pattern:
score.show_pattern(pattern)
Sequencer.pattern = pattern
score.flash(cell)
start = t0 + t
instrument.trigger(start + 0.1)
i += 1
if i >= len(Sequencer.seq):
i = 0
bpm = get_bpm()
t0 = t0 + Sequencer.nb_bars * 240 / bpm # bar duration (4 quarter notes)
Sequencer.METHOD_NAME()
break
timer.set_timeout(loop, schedule_period, t0, i)
|
934 |
streams
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import copy
from base64 import standard_b64encode
from typing import Any, List, Mapping, Tuple
import pendulum
import requests
from airbyte_cdk.models import FailureType
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.METHOD_NAME import Stream
from airbyte_cdk.sources.METHOD_NAME.http.auth import Oauth2Authenticator
from airbyte_cdk.utils import AirbyteTracedException
from source_pinterest.reports import CampaignAnalyticsReport
from .METHOD_NAME import (
AdAccountAnalytics,
AdAccounts,
AdAnalytics,
AdGroupAnalytics,
AdGroups,
Ads,
BoardPins,
Boards,
BoardSectionPins,
BoardSections,
CampaignAnalytics,
Campaigns,
PinterestStream,
UserAccountAnalytics,
)
class SourcePinterest(AbstractSource):
def _validate_and_transform(self, config: Mapping[str, Any], amount_of_days_allowed_for_lookup: int = 89):
config = copy.deepcopy(config)
today = pendulum.today()
latest_date_allowed_by_api = today.subtract(days=amount_of_days_allowed_for_lookup)
start_date = config["start_date"]
if not start_date:
config["start_date"] = latest_date_allowed_by_api
else:
try:
config["start_date"] = pendulum.from_format(config["start_date"], "YYYY-MM-DD")
except ValueError:
message = "Entered `Start Date` does not match format YYYY-MM-DD"
raise AirbyteTracedException(
message=message,
internal_message=message,
failure_type=FailureType.config_error,
)
if (today - config["start_date"]).days > amount_of_days_allowed_for_lookup:
config["start_date"] = latest_date_allowed_by_api
return config
@staticmethod
def get_authenticator(config):
config = config.get("credentials") or config
credentials_base64_encoded = standard_b64encode(
(config.get("client_id") + ":" + config.get("client_secret")).encode("ascii")
).decode("ascii")
auth = f"Basic {credentials_base64_encoded}"
return Oauth2Authenticator(
token_refresh_endpoint=f"{PinterestStream.url_base}oauth/token",
client_secret=config.get("client_secret"),
client_id=config.get("client_id"),
refresh_access_token_headers={"Authorization": auth},
refresh_token=config.get("refresh_token"),
)
def check_connection(self, logger, config) -> Tuple[bool, any]:
config = self._validate_and_transform(config)
authenticator = self.get_authenticator(config)
url = f"{PinterestStream.url_base}user_account"
auth_headers = {"Accept": "application/json", **authenticator.get_auth_header()}
try:
session = requests.get(url, headers=auth_headers)
session.raise_for_status()
return True, None
except requests.exceptions.RequestException as e:
return False, e
def METHOD_NAME(self, config: Mapping[str, Any]) -> List[Stream]:
config["authenticator"] = self.get_authenticator(config)
report_config = self._validate_and_transform(config, amount_of_days_allowed_for_lookup=913)
config = self._validate_and_transform(config)
status = ",".join(config.get("status")) if config.get("status") else None
return [
AdAccountAnalytics(AdAccounts(config), config=config),
AdAccounts(config),
AdAnalytics(Ads(AdAccounts(config), with_data_slices=False, config=config), config=config),
AdGroupAnalytics(AdGroups(AdAccounts(config), with_data_slices=False, config=config), config=config),
AdGroups(AdAccounts(config), status_filter=status, config=config),
Ads(AdAccounts(config), status_filter=status, config=config),
BoardPins(Boards(config), config=config),
BoardSectionPins(BoardSections(Boards(config), config=config), config=config),
BoardSections(Boards(config), config=config),
Boards(config),
CampaignAnalytics(Campaigns(AdAccounts(config), with_data_slices=False, config=config), config=config),
CampaignAnalyticsReport(AdAccounts(report_config), config=report_config),
Campaigns(AdAccounts(config), status_filter=status, config=config),
UserAccountAnalytics(None, config=config),
]
|
935 |
register endpoints
|
"""
OIDC/OAuth2 backend module.
"""
import datetime
import logging
from urllib.parse import urlparse
from idpyoidc.client.oauth2.stand_alone_client import StandAloneClient
from idpyoidc.server.user_authn.authn_context import UNSPECIFIED
from satosa.backends.base import BackendModule
from satosa.internal import AuthenticationInformation
from satosa.internal import InternalData
import satosa.logging_util as lu
from ..exception import SATOSAAuthenticationError
from ..exception import SATOSAError
from ..response import Redirect
UTC = datetime.timezone.utc
logger = logging.getLogger(__name__)
class IdpyOIDCBackend(BackendModule):
"""
Backend module for OIDC and OAuth 2.0, can be directly used.
"""
def __init__(self, auth_callback_func, internal_attributes, config, base_url, name):
"""
OIDC backend module.
:param auth_callback_func: Callback should be called by the module after the authorization
in the backend is done.
:param internal_attributes: Mapping dictionary between SATOSA internal attribute names and
the names returned by underlying IdP's/OP's as well as what attributes the calling SP's and
RP's expects namevice.
:param config: Configuration parameters for the module.
:param base_url: base url of the service
:param name: name of the plugin
:type auth_callback_func:
(satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response
:type internal_attributes: dict[string, dict[str, str | list[str]]]
:type config: dict[str, dict[str, str] | list[str]]
:type base_url: str
:type name: str
"""
super().__init__(auth_callback_func, internal_attributes, base_url, name)
# self.auth_callback_func = auth_callback_func
# self.config = config
self.client = StandAloneClient(config=config["client"], client_type="oidc")
self.client.do_provider_info()
self.client.do_client_registration()
_redirect_uris = self.client.context.claims.get_usage('redirect_uris')
if not _redirect_uris:
raise SATOSAError("Missing path in redirect uri")
self.redirect_path = urlparse(_redirect_uris[0]).path
def start_auth(self, context, internal_request):
"""
See super class method satosa.backends.base#start_auth
:type context: satosa.context.Context
:type internal_request: satosa.internal.InternalData
:rtype satosa.response.Redirect
"""
login_url = self.client.init_authorization()
return Redirect(login_url)
def METHOD_NAME(self):
"""
Creates a list of all the endpoints this backend module needs to listen to. In this case
it's the authentication response from the underlying OP that is redirected from the OP to
the proxy.
:rtype: Sequence[(str, Callable[[satosa.context.Context], satosa.response.Response]]
:return: A list that can be used to map the request to SATOSA to this endpoint.
"""
url_map = []
url_map.append((f"^{self.redirect_path.lstrip('/')}$", self.response_endpoint))
return url_map
def response_endpoint(self, context, *args):
"""
Handles the authentication response from the OP.
:type context: satosa.context.Context
:type args: Any
:rtype: satosa.response.Response
:param context: SATOSA context
:param args: None
:return:
"""
_info = self.client.finalize(context.request)
self._check_error_response(_info, context)
userinfo = _info.get('userinfo')
id_token = _info.get('id_token')
if not id_token and not userinfo:
msg = "No id_token or userinfo, nothing to do.."
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.error(logline)
raise SATOSAAuthenticationError(context.state, "No user info available.")
all_user_claims = dict(list(userinfo.items()) + list(id_token.items()))
msg = "UserInfo: {}".format(all_user_claims)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
internal_resp = self._translate_response(all_user_claims, _info["issuer"])
return self.auth_callback_func(context, internal_resp)
def _translate_response(self, response, issuer):
"""
Translates oidc response to SATOSA internal response.
:type response: dict[str, str]
:type issuer: str
:type subject_type: str
:rtype: InternalData
:param response: Dictioary with attribute name as key.
:param issuer: The oidc op that gave the repsonse.
:param subject_type: public or pairwise according to oidc standard.
:return: A SATOSA internal response.
"""
timestamp_epoch = (
response.get("auth_time")
or response.get("iat")
or int(datetime.datetime.now(UTC).timestamp())
)
timestamp_dt = datetime.datetime.fromtimestamp(timestamp_epoch, UTC)
timestamp_iso = timestamp_dt.isoformat().replace("+00:00", "Z")
auth_class_ref = response.get("acr") or response.get("amr") or UNSPECIFIED
auth_info = AuthenticationInformation(auth_class_ref, timestamp_iso, issuer)
internal_resp = InternalData(auth_info=auth_info)
internal_resp.attributes = self.converter.to_internal("openid", response)
internal_resp.subject_id = response["sub"]
return internal_resp
def _check_error_response(self, response, context):
"""
Check if the response is an error response.
:param response: the response from finalize()
:type response: oic.oic.message
:raise SATOSAAuthenticationError: if the response is an OAuth error response
"""
if "error" in response:
msg = "{name} error: {error} {description}".format(
name=type(response).__name__,
error=response["error"],
description=response.get("error_description", ""),
)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
raise SATOSAAuthenticationError(context.state, "Access denied")
|
936 |
test hs256
|
"""JWKS tests"""
import base64
import json
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
from django.urls.base import reverse
from jwt import PyJWKSet
from authentik.core.models import Application
from authentik.core.tests.utils import create_test_cert, create_test_flow
from authentik.crypto.models import CertificateKeyPair
from authentik.lib.generators import generate_id
from authentik.providers.oauth2.models import OAuth2Provider
from authentik.providers.oauth2.tests.utils import OAuthTestCase
TEST_CORDS_CERT = """
-----BEGIN CERTIFICATE-----
MIIB6jCCAZCgAwIBAgIRAOsdE3N7zETzs+7shTXGj5wwCgYIKoZIzj0EAwIwHjEc
MBoGA1UEAwwTYXV0aGVudGlrIDIwMjIuMTIuMjAeFw0yMzAxMTYyMjU2MjVaFw0y
NDAxMTIyMjU2MjVaMHgxTDBKBgNVBAMMQ0NsbDR2TzFJSGxvdFFhTGwwMHpES2tM
WENYdzRPUFF2eEtZN1NrczAuc2VsZi1zaWduZWQuZ29hdXRoZW50aWsuaW8xEjAQ
BgNVBAoMCWF1dGhlbnRpazEUMBIGA1UECwwLU2VsZi1zaWduZWQwWTATBgcqhkjO
PQIBBggqhkjOPQMBBwNCAAQAwOGam7AKOi5LKmb9lK1rAzA2JTppqrFiIaUdjqmH
ZICJP00Wt0dfqOtEjgMEv1Hhu1DmKZn2ehvpxwPSzBr5o1UwUzBRBgNVHREBAf8E
RzBFgkNCNkw4YlI0UldJRU42NUZLamdUTzV1YmRvNUZWdkpNS2lxdjFZeTRULnNl
bGYtc2lnbmVkLmdvYXV0aGVudGlrLmlvMAoGCCqGSM49BAMCA0gAMEUCIC/JAfnl
uC30ihqepbiMCaTaPMbL8Ka2Lk92IYfMhf46AiEAz9Kmv6HF2D4MK54iwhz2WqvF
8vo+OiGdTQ1Qoj7fgYU=
-----END CERTIFICATE-----
"""
TEST_CORDS_KEY = """
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKy6mPLJc5v71InMMvYaxyXI3xXpwQTPLyAYWVFnZHVioAoGCCqGSM49
AwEHoUQDQgAEAMDhmpuwCjouSypm/ZStawMwNiU6aaqxYiGlHY6ph2SAiT9NFrdH
X6jrRI4DBL9R4btQ5imZ9nob6ccD0swa+Q==
-----END EC PRIVATE KEY-----
"""
class TestJWKS(OAuthTestCase):
"""Test JWKS view"""
def test_rs256(self):
"""Test JWKS request with RS256"""
provider = OAuth2Provider.objects.create(
name="test",
client_id="test",
authorization_flow=create_test_flow(),
redirect_uris="http://local.invalid",
signing_key=create_test_cert(),
)
app = Application.objects.create(name="test", slug="test", provider=provider)
response = self.client.get(
reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug})
)
body = json.loads(response.content.decode())
self.assertEqual(len(body["keys"]), 1)
PyJWKSet.from_dict(body)
key = body["keys"][0]
load_der_x509_certificate(base64.b64decode(key["x5c"][0]), default_backend()).public_key()
def METHOD_NAME(self):
"""Test JWKS request with HS256"""
provider = OAuth2Provider.objects.create(
name="test",
client_id="test",
authorization_flow=create_test_flow(),
redirect_uris="http://local.invalid",
)
app = Application.objects.create(name="test", slug="test", provider=provider)
response = self.client.get(
reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug})
)
self.assertJSONEqual(response.content.decode(), {})
def test_es256(self):
"""Test JWKS request with ES256"""
provider = OAuth2Provider.objects.create(
name="test",
client_id="test",
authorization_flow=create_test_flow(),
redirect_uris="http://local.invalid",
signing_key=create_test_cert(use_ec_private_key=True),
)
app = Application.objects.create(name="test", slug="test", provider=provider)
response = self.client.get(
reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug})
)
body = json.loads(response.content.decode())
self.assertEqual(len(body["keys"]), 1)
PyJWKSet.from_dict(body)
def test_ecdsa_coords_mismatched(self):
"""Test JWKS request with ES256"""
cert = CertificateKeyPair.objects.create(
name=generate_id(),
key_data=TEST_CORDS_KEY,
certificate_data=TEST_CORDS_CERT,
)
provider = OAuth2Provider.objects.create(
name="test",
client_id="test",
authorization_flow=create_test_flow(),
redirect_uris="http://local.invalid",
signing_key=cert,
)
app = Application.objects.create(name="test", slug="test", provider=provider)
response = self.client.get(
reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug})
)
body = json.loads(response.content.decode())
self.assertEqual(len(body["keys"]), 1)
PyJWKSet.from_dict(body)
|
937 |
load poses
|
import argparse
import glob
import json
import os
import re
import shutil
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
import PIL
from PIL import Image
from torchvision import transforms
parser = argparse.ArgumentParser(description="preprocess neural rgbd dataset to sdfstudio dataset")
parser.add_argument("--input_path", dest="input_path", help="path to scannet scene")
parser.add_argument("--output_path", dest="output_path", help="path to output")
parser.add_argument(
"--type",
dest="type",
default="mono_prior",
choices=["mono_prior", "sensor_depth"],
help="mono_prior to use monocular prior, sensor_depth to use depth captured with a depth sensor (gt depth)",
)
args = parser.parse_args()
def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [int(x) if x.isdigit() else x for x in re.split("([0-9]+)", s)]
def METHOD_NAME(posefile):
file = open(posefile, "r")
lines = file.readlines()
file.close()
poses = []
valid = []
lines_per_matrix = 4
for i in range(0, len(lines), lines_per_matrix):
if "nan" in lines[i]:
valid.append(False)
poses.append(np.eye(4, 4, dtype=np.float32).tolist())
else:
valid.append(True)
pose_floats = [[float(x) for x in line.split()] for line in lines[i : i + lines_per_matrix]]
poses.append(pose_floats)
return poses, valid
output_path = Path(args.output_path) # "data/neural_rgbd/breakfast_room/"
input_path = Path(args.input_path) # "data/neural_rgbd_data/breakfast_room/"
output_path.mkdir(parents=True, exist_ok=True)
# load color
color_path = input_path / "images"
color_paths = sorted(glob.glob(os.path.join(color_path, "*.png")), key=alphanum_key)
# load depth
depth_path = input_path / "depth_filtered"
depth_paths = sorted(glob.glob(os.path.join(depth_path, "*.png")), key=alphanum_key)
H, W = cv2.imread(depth_paths[0]).shape[:2]
print(H, W)
# load intrinsic
intrinsic_path = input_path / "focal.txt"
focal_length = np.loadtxt(intrinsic_path)
camera_intrinsic = np.eye(4)
camera_intrinsic[0, 0] = focal_length
camera_intrinsic[1, 1] = focal_length
camera_intrinsic[0, 2] = W * 0.5
camera_intrinsic[1, 2] = H * 0.5
print(camera_intrinsic)
# load pose
pose_path = input_path / "poses.txt"
poses, valid_poses = METHOD_NAME(pose_path)
poses = np.array(poses)
print(poses.shape)
# OpenGL/Blender convention, needs to change to COLMAP/OpenCV convention
# https://docs.nerf.studio/en/latest/quickstart/data_conventions.html
poses[:, 0:3, 1:3] *= -1
# deal with invalid poses
min_vertices = poses[:, :3, 3][valid_poses].min(axis=0)
max_vertices = poses[:, :3, 3][valid_poses].max(axis=0)
center = (min_vertices + max_vertices) / 2.0
scale = 2.0 / (np.max(max_vertices - min_vertices) + 3.0)
print(center, scale)
# we should normalize pose to unit cube
poses[:, :3, 3] -= center
poses[:, :3, 3] *= scale
# inverse normalization
scale_mat = np.eye(4).astype(np.float32)
scale_mat[:3, 3] -= center
scale_mat[:3] *= scale
scale_mat = np.linalg.inv(scale_mat)
if args.type == "mono_prior":
# center copy image if use monocular prior because omnidata use 384x384 as inputs
# get smallest side to generate square crop
target_crop = min(H, W)
target_size = 384
trans_totensor = transforms.Compose(
[
transforms.CenterCrop(target_crop),
transforms.Resize(target_size, interpolation=PIL.Image.BILINEAR),
]
)
# center crop by min_dim
offset_x = (W - target_crop) * 0.5
offset_y = (H - target_crop) * 0.5
camera_intrinsic[0, 2] -= offset_x
camera_intrinsic[1, 2] -= offset_y
# resize from min_dim x min_dim -> to 384 x 384
resize_factor = target_size / target_crop
camera_intrinsic[:2, :] *= resize_factor
# new H, W after center crop
H, W = target_size, target_size
K = camera_intrinsic
frames = []
out_index = 0
for idx, (valid, pose, image_path, depth_path) in enumerate(zip(valid_poses, poses, color_paths, depth_paths)):
if idx % 10 != 0:
continue
if not valid:
continue
target_image = output_path / f"{out_index:06d}_rgb.png"
print(target_image)
if args.type == "mono_prior":
img = Image.open(image_path)
img_tensor = trans_totensor(img)
img_tensor.save(target_image)
else:
shutil.copyfile(image_path, target_image)
rgb_path = str(target_image.relative_to(output_path))
frame = {
"rgb_path": rgb_path,
"camtoworld": pose.tolist(),
"intrinsics": K.tolist(),
}
if args.type == "mono_prior":
frame.update(
{
"mono_depth_path": rgb_path.replace("_rgb.png", "_depth.npy"),
"mono_normal_path": rgb_path.replace("_rgb.png", "_normal.npy"),
}
)
else:
frame["sensor_depth_path"] = rgb_path.replace("_rgb.png", "_depth.npy")
depth_map = cv2.imread(depth_path, -1)
# Convert depth to meters, then to "network units"
depth_shift = 1000.0
depth_maps = (np.array(depth_map) / depth_shift).astype(np.float32)
depth_maps *= scale
np.save(output_path / frame["sensor_depth_path"], depth_maps)
# color map gt depth for visualization
plt.imsave(output_path / frame["sensor_depth_path"].replace(".npy", ".png"), depth_maps, cmap="viridis")
frames.append(frame)
out_index += 1
# scene bbox for the scannet scene
scene_box = {
"aabb": [[-1, -1, -1], [1, 1, 1]],
"near": 0.05,
"far": 2.5,
"radius": 1.0,
"collider_type": "box",
}
# meta data
output_data = {
"camera_model": "OPENCV",
"height": H,
"width": W,
"has_mono_prior": args.type == "mono_prior",
"has_sensor_depth": args.type == "sensor_depth",
"pairs": None,
"worldtogt": scale_mat.tolist(),
"scene_box": scene_box,
}
output_data["frames"] = frames
# save as json
with open(output_path / "meta_data.json", "w", encoding="utf-8") as f:
json.dump(output_data, f, indent=4)
|
938 |
parameter documentation footer
|
#!/usr/bin/python
#
# xml.etree.ElementTree is available in python 2.5 and higher.
# It can be downloaded to use with older python.
#
# This script reads an XML description of the Zoltan2 parameters,
# and writes a doxygen page with this information.
import xml.etree.ElementTree as ET
outfile = open("parameters.dox", "w")
def parameterDocumentationHeader():
outfile.write("/*! \\page z2_parameters Zoltan2 Parameters\n\n")
outfile.write("This page lists each Zoltan2 parameter and how to use it. The validators\n")
outfile.write("are classes of interest to Zoltan2 developers. They are used to evaluate\n")
outfile.write("the validity of parameter values at run time.\n\n")
def METHOD_NAME():
outfile.write("*/\n")
def parameterEnhancedNumber(pname, pinfo, pval):
desc = pinfo.get("docString")
min = pval.get("min")
max = pval.get("max")
step = pval.get("step")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Valid values:\n")
outfile.write(" - minimum is "+min+"\n")
outfile.write(" - maximum is "+max+"\n")
outfile.write(" - step is "+step+"\n")
outfile.write(" - Validator type: Teuchos::EnhancedNumberValidator\n")
outfile.write("\n")
def parameterIntegerRangeList(pname, pinfo, pval):
desc = pinfo.get("docString")
unsorted = pval.get("unsorted")
min="unset"
max="unset"
if "min" in pinfo.keys():
min = pval.get("min")
if "max" in pinfo.keys():
max = pval.get("max")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Valid values: a comma-separated list of\n")
outfile.write(" - numbers\n")
outfile.write(" - number ranges separated by a dash (\"1-5\")\n")
outfile.write(" - the word \"all\" to indicate all possible values\n")
if min != "unset":
outfile.write(" - minimum is: "+min+"\n")
if max != "unset":
outfile.write(" - maximum is: "+max+"\n")
outfile.write(" - Examples: \"1,2,7\", \"all\", \"5,1-15,80-82,99\"\n")
outfile.write(" - Validator type: Zoltan2::IntegerRangeListValidator\n")
if unsorted == "true":
outfile.write( "(list is not changed during processing)\n")
else:
outfile.write( "(list will be sorted, and duplicates removed, during processing)\n")
outfile.write("\n")
def parameterFileName(pname, pinfo, pval):
desc = pinfo.get("docString")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Validator type: Teuchos::FileNameValidator\n")
outfile.write("\n")
def parameterAnyNumber(pname, pinfo, pval):
desc = pinfo.get("docString")
validTypes = []
if pval.get("allowDouble") == "true":
validTypes.append("double")
if pval.get("allowInt") == "true":
validTypes.append("int")
if pval.get("allowString") == "true":
validTypes.append("string")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Valid values are any values of type:\n")
for vtype in validTypes:
outfile.write(" \\li "+vtype+"\n")
outfile.write(" - Validator type: Teuchos::AnyNumberParameterEntryValidator\n")
outfile.write("\n")
def parameterStringToIntegral(pname, pinfo, pval):
desc = pinfo.get("docString")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Valid values:\n")
for node in pval:
if node.tag == "String":
svalue = node.get("stringValue")
sdoc = "unset"
if "stringDoc" in node.keys():
sdoc = node.get("stringDoc")
if sdoc == "unset":
outfile.write(" \\li \\e "+svalue+"\n")
else:
outfile.write(" \\li \\e "+svalue+" "+sdoc+"\n")
outfile.write(" - Validator type: Teuchos::StringToIntegralParameterEntryValidator\n")
outfile.write("\n")
def parameterFileName(pname, pinfo, pval):
desc = pinfo.get("docString")
mustExist = pinfo.get("fileMustExist")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
if mustExist == "true":
outfile.write(" File must exist.\n")
else:
outfile.write(" File need not already exist.\n")
outfile.write(" - Validator type: Teuchos::FileNameValidator\n")
outfile.write("\n")
def parameterString(pname, pinfo, pval):
desc = pinfo.get("docString")
outfile.write("- \\b "+pname+" \\anchor "+pname+"\n")
outfile.write(" - Description: "+desc+"\n")
outfile.write(" - Valid values:\n")
for node in pval:
if node.tag == "String":
outfile.write(" \\li \\e "+node.get("value")+"\n")
outfile.write(" - Validator type: Teuchos::StringValidator\n")
outfile.write("\n")
def writeInfo(param):
pname = param[0]
pinfo = param[1]
pval = param[2]
pvalidatorType = pval.get("type")
if pvalidatorType == "anynumberValidator":
parameterAnyNumber(pname, pinfo, pval)
elif pvalidatorType == "FilenameValidator":
parameterFileName(pname, pinfo, pval)
elif pvalidatorType == "StringValidator":
parameterString(pname, pinfo, pval)
elif "StringIntegralValidator" in pvalidatorType:
parameterStringToIntegral(pname, pinfo, pval)
elif "IntegerRangeListValidator" in pvalidatorType:
parameterIntegerRangeList(pname, pinfo, pval)
elif "EnhancedNumberValidator" in pvalidatorType:
parameterEnhancedNumber(pname, pinfo, pval)
else:
print "Error 4: This is not a valid Zoltan2 parameter list."
exit
##
## Begin
##
tree = ET.parse("../data/parameters.xml")
root = tree.getroot()
if root.tag != "ParameterList":
print "Error 1: This is not a valid Zoltan2 parameter list."
exit
validators = []
for node in root:
if node.tag == "Validators":
validators = node
break
if len(validators) == 0:
print "Error 1: This is not a valid Zoltan2 parameter list."
exit
# Create a dictionary of Validators
vals={}
for node in validators:
id = node.get("validatorId")
vals[id] = node
##
# Create list of a 3-tuples for each parameter, including
# the parameter name, its data, and its validator.
##
parameterInfo = []
for node in root:
if node.tag != "Parameter":
continue
id = node.get("validatorId")
if id not in vals.keys():
print "Error 3: This is not a valid Zoltan2 parameter list."
exit
paramName = node.get("name")
parameterInfo.append((paramName, node, vals[id]))
##
# Write the doxygen documentation for these parameters
##
parameterDocumentationHeader()
for info in sorted(set(parameterInfo)):
print "Parameter: ",info[0]
writeInfo(info)
METHOD_NAME()
outfile.close()
|
939 |
test gpu4 fp16 zero2
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import os
import re
from .BingBertSquad_test_common import BaseTestCase
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "bert_squad_progress: step="
match_number = re.compile(r'loss=([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class BingBertSquadFuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on BingBertSquad model"):
super(BingBertSquadFuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_gpu4_fp16(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def METHOD_NAME(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_zero2_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp16(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu4_fp32(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp32(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "BingBertSquad_func"
test_config['other_args'] += f" --max_steps {test_config['max_steps']}"
test_config['other_args'] += f" --max_steps_per_epoch {test_config['max_epoch_steps']}"
# baseline run...
test_config["deepspeed"] = False
base_file = self.gen_output_name(test_config, prefix)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_BingBertSquad_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_BingBertSquad_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def suite():
suite = unittest.TestSuite()
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16_zero2'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp32'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp32'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
940 |
colorbar ctrl name
|
import logging
import threading
import time
import wx
from cellprofiler_core.setting import ValidationError
import cellprofiler.gui.constants.module_view as mv_constants
from cellprofiler.gui.module_view._validation_request_controller import (
ValidationRequestController,
)
LOGGER = logging.getLogger(__name__)
def text_control_name(v):
"""Return the name of a setting's text control
v - the setting
The text control name is built using the setting's key
"""
return "%s_text" % (str(v.key()))
def button_control_name(v, idx=None):
"""Return the name of a setting's button
v - the setting
idx - if present, the index of one of several buttons for the setting
"""
if idx is None:
return "%s_button" % (str(v.key()))
else:
return "%s_button_%d" % (str(v.key()), idx)
def edit_control_name(v):
"""Return the name of a setting's edit control
v - the setting
The edit control name is built using the setting's key
"""
return str(v.key())
def min_control_name(v):
"""For a range, return the control that sets the minimum value
v - the setting
"""
return "%s_min" % (str(v.key()))
def max_control_name(v):
"""For a range, return the control that sets the maximum value
v - the setting
"""
return "%s_max" % (str(v.key()))
def absrel_control_name(v):
"""For a range, return the control that chooses between absolute and relative
v - the setting
Absolute - far coordinate is an absolute value
From edge - far coordinate is a distance from the far edge
"""
return "%s_absrel" % (str(v.key()))
def x_control_name(v):
"""For coordinates, return the control that sets the x value
v - the setting
"""
return "%s_x" % (str(v.key()))
def y_control_name(v):
"""For coordinates, return the control that sets the y value
v - the setting
"""
return "%s_y" % (str(v.key()))
def category_control_name(v):
"""For measurements, return the control that sets the measurement category
v - the setting
"""
return "%s_category" % (str(v.key()))
def category_text_control_name(v):
return "%s_category_text" % (str(v.key()))
def feature_control_name(v):
"""For measurements, return the control that sets the feature name
v - the setting
"""
return "%s_feature" % (str(v.key()))
def feature_text_control_name(v):
return "%s_feature_text" % (str(v.key()))
def image_control_name(v):
"""For measurements, return the control that sets the image name
v - the setting
"""
return "%s_image" % (str(v.key()))
def image_text_control_name(v):
return "%s_image_text" % (str(v.key()))
def object_control_name(v):
"""For measurements, return the control that sets the object name
v - the setting
"""
return "%s_object" % (str(v.key()))
def object_text_control_name(v):
return "%s_object_text" % (str(v.key()))
def scale_control_name(v):
"""For measurements, return the control that sets the measurement scale
v - the setting
"""
return "%s_scale" % (str(v.key()))
def scale_text_ctrl_name(v):
return "%s_scale_text" % (str(v.key()))
def combobox_ctrl_name(v):
return "%s_combobox" % (str(v.key()))
def METHOD_NAME(v):
return "%s_colorbar" % (str(v.key()))
def help_ctrl_name(v):
return "%s_help" % str(v.key())
def subedit_control_name(v):
return "%s_subedit" % str(v.key())
def grid_control_name(v):
return "%s_grid" % str(v.key())
def custom_label_name(v):
return "%s_customlabel" % str(v.key())
def folder_label_name(v):
return "%s_folderlabel" % str(v.key())
def encode_label(text):
"""Encode text escapes for the static control and button labels
The ampersand (&) needs to be encoded as && for wx.StaticText
and wx.Button in order to keep it from signifying an accelerator.
"""
return text.replace("&", "&&")
def validate_module(pipeline, module_num, callback):
"""Validate a module and execute the callback on error on the main thread
pipeline - a pipeline to be validated
module_num - the module number of the module to be validated
callback - a callback with the signature, "fn(setting, message, pipeline_data)"
where setting is the setting that is in error and message is the message to
display.
"""
modules = [m for m in pipeline.modules() if m.module_num == module_num]
if len(modules) != 1:
return
module = modules[0]
level = logging.INFO
setting_idx = None
message = None
try:
level = logging.ERROR
module.test_valid(pipeline) # this method validates each visible
# setting first, then the module itself.
level = logging.WARNING
module.test_module_warnings(pipeline)
level = logging.INFO
except ValidationError as instance:
message = instance.message
setting_idx = [m.key() for m in module.visible_settings()].index(
instance.get_setting().key()
)
except Exception as e:
LOGGER.error("Error in validation thread", e)
wx.CallAfter(callback, setting_idx, message, level)
def validation_queue_handler():
try:
while mv_constants.validation_queue_keep_running:
request = mv_constants.validation_queue.get()
if (
not isinstance(request, ValidationRequestController)
or request.cancelled
):
continue
start = time.perf_counter()
try:
validate_module(
request.pipeline, request.module_num, request.callback,
)
except:
pass
# Make sure this thread utilizes less than 1/2 of GIL clock
wait_for = max(0.25, time.perf_counter() - start)
time.sleep(wait_for)
except:
LOGGER.warning("Error in validation thread.", exc_info=True)
LOGGER.info("Exiting the pipeline validation thread")
def request_module_validation(validation_request):
"""Request that a module be validated
"""
if mv_constants.pipeline_queue_thread is None:
mv_constants.pipeline_queue_thread = threading.Thread(
target=validation_queue_handler
)
mv_constants.pipeline_queue_thread.setName("Pipeline validation thread")
mv_constants.pipeline_queue_thread.setDaemon(True)
mv_constants.pipeline_queue_thread.start()
mv_constants.validation_queue.put(validation_request)
def stop_validation_queue_thread():
"""Stop the thread that handles module validation"""
if mv_constants.pipeline_queue_thread is not None:
mv_constants.validation_queue_keep_running = False
mv_constants.validation_queue.put(None)
mv_constants.pipeline_queue_thread.join()
|
941 |
debug interactive
|
from __future__ import annotations
from collections.abc import AsyncIterator, Iterator, Callable
from typing import AsyncContextManager, Dict, cast, Any
from asyncio import AbstractEventLoop, Future, sleep
from contextlib import asynccontextmanager
from dataclasses import dataclass
import webbrowser as _webbrowser
import time
import os
from aiohttp.test_utils import TestClient
from aiohttp.web import Application
import pytest
from lona.shell.shell import embed_shell
from lona.worker_pool import WorkerPool
from lona.channels import Channel
from lona.server import Server
from lona import App
@dataclass
class LonaContext:
client: TestClient
app: None | App
server: Server
event_loop: AbstractEventLoop
pytestconfig: Any
def make_url(self, path: str = '') -> str:
"""
Takes a path and returns a full URL to the running test server.
This method is necessary because the Lona pytest plugin spins up a test
server that runs on an unprivileged random port, so URLs are not stable
between test runs.
:path: (str) path to to view
"""
return str(self.client.make_url(path))
def METHOD_NAME(
self,
webbrowser: bool = True,
sync: bool = False,
globals: Dict[str, Any] | None = None,
) -> Future | None: # pragma: no cover
"""
This method pauses the current test by starting an rlpython shell and
starts a webbrowser that points to the currently running test server.
The test continues when the rlpython shell is exited.
Async Example:
async def test_my_feature(lona_app_context):
context = await lona_app_context(setup_app)
await context.debug_interactive(locals=locals())
Sync Example:
async def test_my_feature(lona_app_context):
context = await lona_app_context(setup_app)
def run_test():
context.debug_interactive(
sync=True,
globals=locals(),
)
context.event_loop.run_in_executor(None, run_test)
:webbrowser: (bool) start a webbrowser that points to the test server
:sync: (bool) run blocking in the current thread
:globals: (dict|None) variable overrides for the rlpython shell
"""
capmanager = self.pytestconfig.pluginmanager.getplugin(
'capturemanager',
)
def _debug_interactive():
try:
# disable stdin and stdout capturing temporarily
capmanager.suspend_global_capture(in_=True)
# start browser
if webbrowser:
os.environ['DISPLAY'] = ':0'
_webbrowser.open(self.make_url())
# embed shell
_globals = globals or {}
_globals = {
'server': self.server,
'lona_context': self,
**_globals,
}
embed_shell(
server=self.server,
globals=_globals,
)
finally:
capmanager.resume_global_capture()
if sync:
_debug_interactive()
return None
return self.event_loop.run_in_executor(None, _debug_interactive)
@pytest.fixture()
def lona_app_context(request, aiohttp_client, event_loop, pytestconfig):
async def setup_lona_app_context(
setup_app: Callable[[App], None],
project_root: str = '',
) -> LonaContext:
# setup lona app
lona_app = App(project_root or str(request.fspath))
setup_app(lona_app)
# setup client
async def setup_aiohttp_app() -> Application:
lona_app.setup_server(loop=event_loop)
return cast(Application, lona_app.aiohttp_app)
client = await aiohttp_client(await setup_aiohttp_app())
return LonaContext(
client=client,
app=lona_app,
server=cast(Server, lona_app.server),
event_loop=event_loop,
pytestconfig=pytestconfig,
)
yield setup_lona_app_context
Channel._clear_state()
@pytest.fixture()
def lona_project_context(request, aiohttp_client, event_loop, pytestconfig):
async def setup_lona_project_context(
project_root: str = '',
settings: None | list[str] = None,
settings_pre_overrides: None | dict = None,
settings_post_overrides: None | dict = None,
) -> LonaContext:
# setup aiohttp app
server = None
async def setup_aiohttp_app() -> Application:
nonlocal server
# setup lona server
server = Server(
project_root=project_root or request.fspath,
settings_paths=settings or [],
settings_pre_overrides=settings_pre_overrides or {},
settings_post_overrides=settings_post_overrides or {},
)
server._loop = event_loop
server._worker_pool = WorkerPool(settings=server.settings)
return server._app
client = await aiohttp_client(await setup_aiohttp_app())
return LonaContext(
client=client,
app=None,
server=cast(Server, server),
event_loop=event_loop,
pytestconfig=pytestconfig,
)
yield setup_lona_project_context
Channel._clear_state()
def eventually(
timeout: float = 5,
interval: float = 1,
) -> Iterator[AsyncContextManager]:
"""
Wait for expected state in async test.
The function is meant to be iterated. Each time it returns new attempt.
Attempt is an async context manager to wrap assertions.
It suppresses all exceptions until time is out.
If no exception is raised iteration stops.
Example::
counter = 0
for attempt in eventually():
async with attempt:
counter += 1
assert counter > 3
:param timeout: time in seconds during which it produces new attempts
:param interval: seconds to sleep between attempts
"""
end_time = time.time() + timeout
success = False
while not success:
context_manager_was_used = False
@asynccontextmanager
async def attempt() -> AsyncIterator[None]:
nonlocal context_manager_was_used, success
context_manager_was_used = True
try:
yield # execute assertions
except Exception:
if time.time() > end_time:
raise
else:
await sleep(interval)
else:
success = True
yield attempt()
if not context_manager_was_used:
raise SyntaxError('context manager must be used')
|
942 |
delete
|
from sentry.tsdb.base import BaseTSDB
class DummyTSDB(BaseTSDB):
"""
A no-op time-series storage.
"""
def incr(self, model, key, timestamp=None, count=1, environment_id=None):
self.validate_arguments([model], [environment_id])
def merge(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments([model], environment_ids)
def METHOD_NAME(self, models, keys, start=None, end=None, timestamp=None, environment_ids=None):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
def get_range(
self,
model,
keys,
start,
end,
rollup=None,
environment_ids=None,
use_cache=False,
jitter_value=None,
tenant_ids=None,
referrer_suffix=None,
):
self.validate_arguments([model], environment_ids if environment_ids is not None else [None])
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def record(self, model, key, values, timestamp=None, environment_id=None):
self.validate_arguments([model], [environment_id])
def get_distinct_counts_series(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def get_distinct_counts_totals(
self,
model,
keys,
start,
end=None,
rollup=None,
environment_id=None,
use_cache=False,
jitter_value=None,
tenant_ids=None,
referrer_suffix=None,
):
self.validate_arguments([model], [environment_id])
return {k: 0 for k in keys}
def get_distinct_counts_union(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
return 0
def merge_distinct_counts(
self, model, destination, sources, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments([model], environment_ids)
def delete_distinct_counts(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
def record_frequency_multi(self, requests, timestamp=None, environment_id=None):
self.validate_arguments([model for model, request in requests], [environment_id])
def get_most_frequent(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
return {key: [] for key in keys}
def get_most_frequent_series(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
return {key: [(timestamp, {}) for timestamp in series] for key in keys}
def get_frequency_series(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = {}
for key, members in items.items():
result = results[key] = []
for timestamp in series:
result.append((timestamp, {k: 0.0 for k in members}))
return results
def get_frequency_totals(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
results = {}
for key, members in items.items():
results[key] = {member: 0.0 for member in members}
return results
def merge_frequencies(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = list(
(set(environment_ids) if environment_ids is not None else set()).union([None])
)
self.validate_arguments([model], environment_ids)
def delete_frequencies(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
def flush(self):
pass
|
943 |
is valid enum type
|
from xsdata.codegen.mixins import RelativeHandlerInterface
from xsdata.codegen.models import Attr
from xsdata.codegen.models import AttrType
from xsdata.codegen.models import Class
from xsdata.formats.converter import converter
from xsdata.logger import logger
from xsdata.models.enums import DataType
class SanitizeAttributesDefaultValue(RelativeHandlerInterface):
"""
Sanitize attributes default values.
Cases:
1. Ignore enumerations.
2. List fields can not have a default value
3. Optional choice/sequence fields can not have a default value
4. xsi:type fields are ignored, mark them as optional
5. Convert string literal default value for enum fields.
"""
__slots__ = ()
def process(self, target: Class):
for attr in target.attrs:
self.process_attribute(target, attr)
for choice in attr.choices:
self.process_attribute(target, choice)
def process_attribute(self, target: Class, attr: Attr):
if self.should_reset_required(attr):
attr.restrictions.min_occurs = 0
if self.should_reset_default(attr):
attr.fixed = False
attr.default = None
if attr.default is not None:
self.process_types(target, attr)
elif attr.xml_type is None and str in attr.native_types:
# String text nodes get an empty string as default!
attr.default = ""
def process_types(self, target: Class, attr: Attr):
if self.is_valid_external_value(target, attr):
return
if self.is_valid_native_value(target, attr):
return
logger.warning(
"Failed to match %s.%s default value `%s` to one of %s",
target.name,
attr.local_name,
attr.default,
[tp.qname for tp in attr.types],
)
self.reset_attribute_types(attr)
def is_valid_external_value(self, target: Class, attr: Attr) -> bool:
"""Return whether the default value of the given attr can be mapped to
user defined type like an enumeration or an inner complex content
class."""
for tp in attr.user_types:
source = self.find_type(target, tp)
if self.is_valid_inner_type(source, attr, tp):
return True
if self.METHOD_NAME(source, attr):
return True
return False
def find_type(self, target: Class, attr_type: AttrType) -> Class:
if attr_type.forward:
return self.container.find_inner(target, attr_type.qname)
return self.container.first(attr_type.qname)
@classmethod
def is_valid_inner_type(
cls, source: Class, attr: Attr, attr_type: AttrType
) -> bool:
"""Return whether the inner class can inherit the attr default value
and swap them as well."""
if attr_type.forward:
for src_attr in source.attrs:
if src_attr.xml_type is None:
src_attr.default = attr.default
src_attr.fixed = attr.fixed
attr.default = None
attr.fixed = False
return True
return False
@classmethod
def METHOD_NAME(cls, source: Class, attr: Attr) -> bool:
"""
Convert string literal default values to enumeration members
placeholders and return result.
The placeholders will be converted to proper references from the
generator filters.
Placeholder examples: Single -> @enum@qname::member_name
Multiple -> @enum@qname::first_member@second_member
"""
assert attr.default is not None
value_members = {x.default: x.name for x in source.attrs}
name = value_members.get(attr.default)
if name:
attr.default = f"@enum@{source.qname}::{name}"
return True
names = [
value_members[token]
for token in attr.default.split()
if token in value_members
]
if names:
attr.default = f"@enum@{source.qname}::{'@'.join(names)}"
return True
return False
@classmethod
def is_valid_native_value(cls, target: Class, attr: Attr) -> bool:
"""
Return whether the default value of the given attribute can be
converted successfully to and from xml.
The test process for enumerations and fixed value fields are
strict, meaning the textual representation also needs to match
the original.
"""
assert attr.default is not None
types = converter.sort_types(attr.native_types)
if not types:
return False
if attr.restrictions.tokens:
tokens = attr.default.split()
else:
tokens = [attr.default]
if len(tokens) == 1 and attr.is_enumeration and attr.restrictions.tokens:
attr.restrictions.tokens = False
# Enumerations are also fixed!!!
strict = attr.fixed
return all(
converter.test(
token,
types,
strict=strict,
ns_map=target.ns_map,
format=attr.restrictions.format,
)
for token in tokens
)
@classmethod
def should_reset_required(cls, attr: Attr) -> bool:
"""
Return whether the min occurrences for the attr needs to be reset.
@Todo figure out if wildcards are supposed to be optional!
"""
return (
not attr.is_attribute
and attr.default is None
and object in attr.native_types
and not attr.is_list
)
@classmethod
def should_reset_default(cls, attr: Attr) -> bool:
"""
Return whether we should unset the default value of the attribute.
- Default value is not set
- Attribute is xsi:type (ignorable)
- Attribute is part of a choice
"""
return attr.default is not None and (
attr.is_xsi_type
or attr.is_list
or (not attr.is_attribute and attr.is_optional)
)
@classmethod
def reset_attribute_types(cls, attr: Attr):
attr.types.clear()
attr.types.append(AttrType(qname=str(DataType.STRING), native=True))
attr.restrictions.format = None
|
944 |
test syntax error for function definition
|
"""Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def sortnum(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def METHOD_NAME(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
# more than 255 positional arguments, should compile ok
fundef = "def f(%s):\n pass\n" % ', '.join('i%d' % i for i in range(300))
compile(fundef, "<test>", "single")
# more than 255 keyword-only arguments, should compile ok
fundef = "def f(*, %s):\n pass\n" % ', '.join('i%d' % i for i in range(300))
compile(fundef, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes from 1 to 2 positional arguments but 3 were given"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], sortnum(3,2,1))
self.assertEqual([3,2,1], sortnum(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_issue13343(self):
# The Python compiler must scan all symbols of a function to
# determine their scope: global, local, cell...
# This was not done for the default values of keyword
# arguments in a lambda definition, and the following line
# used to fail with a SystemError.
lambda *, k1=unittest: None
def test_mangling(self):
class X:
def f(self, *, __a=42):
return __a
self.assertEqual(X().f(), 42)
def test_default_evaluation_order(self):
# See issue 16967
a = 42
with self.assertRaises(NameError) as err:
def f(v=a, x=b, *, y=c, z=d):
pass
self.assertEqual(str(err.exception), "name 'b' is not defined")
with self.assertRaises(NameError) as err:
f = lambda v=a, x=b, *, y=c, z=d: None
self.assertEqual(str(err.exception), "name 'b' is not defined")
if __name__ == "__main__":
unittest.main()
|
945 |
safe repr
|
"""Various utility functions."""
from collections import namedtuple, OrderedDict
from os.path import commonprefix
__unittest = True
_MAX_LENGTH = 80
_PLACEHOLDER_LEN = 12
_MIN_BEGIN_LEN = 5
_MIN_END_LEN = 5
_MIN_COMMON_LEN = 5
_MIN_DIFF_LEN = _MAX_LENGTH - \
(_MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN +
_PLACEHOLDER_LEN + _MIN_END_LEN)
assert _MIN_DIFF_LEN >= 0
def _shorten(s, prefixlen, suffixlen):
skip = len(s) - prefixlen - suffixlen
if skip > _PLACEHOLDER_LEN:
s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:])
return s
def _common_shorten_repr(*args):
args = tuple(map(METHOD_NAME, args))
maxlen = max(map(len, args))
if maxlen <= _MAX_LENGTH:
return args
prefix = commonprefix(args)
prefixlen = len(prefix)
common_len = _MAX_LENGTH - \
(maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
if common_len > _MIN_COMMON_LEN:
assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
(maxlen - prefixlen) < _MAX_LENGTH
prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
return tuple(prefix + s[prefixlen:] for s in args)
prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
for s in args)
def METHOD_NAME(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
|
946 |
get variable
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import tensorflow as tf
from tensorflow.python.ops.rnn_cell_impl import RNNCell
def METHOD_NAME(variable_dict, name, shape, initializer=None, dtype=tf.float32):
if name not in variable_dict:
variable_dict[name] = tf.get_variable(
name=name, shape=shape, initializer=initializer, dtype=dtype)
return variable_dict[name]
class DotAttention:
'''
DotAttention
'''
def __init__(self, name,
hidden_dim,
is_vanilla=True,
is_identity_transform=False,
need_padding=False):
self._name = '/'.join([name, 'dot_att'])
self._hidden_dim = hidden_dim
self._is_identity_transform = is_identity_transform
self._need_padding = need_padding
self._is_vanilla = is_vanilla
self._var = {}
@property
def is_identity_transform(self):
return self._is_identity_transform
@property
def is_vanilla(self):
return self._is_vanilla
@property
def need_padding(self):
return self._need_padding
@property
def hidden_dim(self):
return self._hidden_dim
@property
def name(self):
return self._name
@property
def var(self):
return self._var
def _get_var(self, name, shape, initializer=None):
with tf.variable_scope(self.name):
return METHOD_NAME(self.var, name, shape, initializer)
def _define_params(self, src_dim, tgt_dim):
hidden_dim = self.hidden_dim
self._get_var('W', [src_dim, hidden_dim])
if not self.is_vanilla:
self._get_var('V', [src_dim, hidden_dim])
if self.need_padding:
self._get_var('V_s', [src_dim, src_dim])
self._get_var('V_t', [tgt_dim, tgt_dim])
if not self.is_identity_transform:
self._get_var('T', [tgt_dim, src_dim])
self._get_var('U', [tgt_dim, hidden_dim])
self._get_var('b', [1, hidden_dim])
self._get_var('v', [hidden_dim, 1])
def get_pre_compute(self, s):
'''
:param s: [src_sequence, batch_size, src_dim]
:return: [src_sequence, batch_size. hidden_dim]
'''
hidden_dim = self.hidden_dim
src_dim = s.get_shape().as_list()[-1]
assert src_dim is not None, 'src dim must be defined'
W = self._get_var('W', shape=[src_dim, hidden_dim])
b = self._get_var('b', shape=[1, hidden_dim])
return tf.tensordot(s, W, [[2], [0]]) + b
def get_prob(self, src, tgt, mask, pre_compute, return_logits=False):
'''
:param s: [src_sequence_length, batch_size, src_dim]
:param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim]
:param mask: [src_sequence_length, batch_size]\
or [tgt_sequence_length, src_sequence_length, batch_sizse]
:param pre_compute: [src_sequence_length, batch_size, hidden_dim]
:return: [src_sequence_length, batch_size]\
or [tgt_sequence_length, src_sequence_length, batch_size]
'''
s_shape = src.get_shape().as_list()
h_shape = tgt.get_shape().as_list()
src_dim = s_shape[-1]
tgt_dim = h_shape[-1]
assert src_dim is not None, 'src dimension must be defined'
assert tgt_dim is not None, 'tgt dimension must be defined'
self._define_params(src_dim, tgt_dim)
if len(h_shape) == 2:
tgt = tf.expand_dims(tgt, 0)
if pre_compute is None:
pre_compute = self.get_pre_compute(src)
buf0 = pre_compute
buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]])
buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1))
if not self.is_vanilla:
xh1 = tgt
xh2 = tgt
s1 = src
if self.need_padding:
xh1 = tf.tensordot(xh1, self.var['V_t'], 1)
xh2 = tf.tensordot(xh2, self.var['S_t'], 1)
s1 = tf.tensordot(s1, self.var['V_s'], 1)
if not self.is_identity_transform:
xh1 = tf.tensordot(xh1, self.var['T'], 1)
xh2 = tf.tensordot(xh2, self.var['T'], 1)
buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1)
buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]]))
buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3))
else:
buf = buf2
v = self.var['v']
e = tf.tensordot(buf, v, [[3], [0]])
e = tf.squeeze(e, axis=[3])
tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e))
prob = tf.nn.softmax(tmp, 1)
if len(h_shape) == 2:
prob = tf.squeeze(prob, axis=[0])
tmp = tf.squeeze(tmp, axis=[0])
if return_logits:
return prob, tmp
return prob
def get_att(self, s, prob):
'''
:param s: [src_sequence_length, batch_size, src_dim]
:param prob: [src_sequence_length, batch_size]\
or [tgt_sequence_length, src_sequence_length, batch_size]
:return: [batch_size, src_dim] or [tgt_sequence_length, batch_size, src_dim]
'''
buf = s * tf.expand_dims(prob, axis=-1)
att = tf.reduce_sum(buf, axis=-3)
return att
|
947 |
highest
|
import hashlib
import os
import stat
import traceback
from enum import Enum
from pathlib import Path
from typing import List, Optional, Set, Union, final
import attr
@attr.define(kw_only=True, frozen=True)
class Report:
"""A common base class for different reports."""
def asdict(self) -> dict:
return attr.asdict(self)
class Severity(Enum):
"""Represents possible problems encountered during execution."""
ERROR = "ERROR"
WARNING = "WARNING"
@attr.define(kw_only=True, frozen=True)
class ErrorReport(Report):
severity: Severity
STR = Union[str, Exception]
"""STR is a workaround for UnknownError.exception & pyright, do not use elsewhere!
Note, that when you inline this type, the documentation for UnknownError.exception
as generated by pyright will be wrong.
It will also break the formatting for `black`, which is a big issue, as we have
a pyright comment there to work around pyright.
See:
- https://www.attrs.org/en/stable/types.html#pyright
- https://github.com/microsoft/pyright/blob/main/docs/comments.md#line-level-diagnostic-suppression
"""
def _convert_exception_to_str(obj: Union[str, Exception]) -> str:
if isinstance(obj, str):
return obj
if isinstance(obj, Exception):
e: Exception = obj
return "".join(traceback.format_exception(type(e), e, e.__traceback__))
raise ValueError("Invalid exception object", obj)
@attr.define(kw_only=True, frozen=True)
class UnknownError(ErrorReport):
"""Describes an exception raised during file processing."""
severity: Severity = attr.field(default=Severity.ERROR)
exception: STR = attr.field( # pyright: ignore[reportGeneralTypeIssues]
converter=_convert_exception_to_str
)
"""Exceptions are also formatted at construct time.
`attrs` is not integrated enough with type checker/LSP provider `pyright` to include converters.
See: https://www.attrs.org/en/stable/types.html#pyright
"""
@attr.define(kw_only=True, frozen=True)
class CalculateChunkExceptionReport(UnknownError):
"""Describes an exception raised during calculate_chunk execution."""
start_offset: int
# Stored in `str` rather than `Handler`, because the pickle picks ups structs from `C_DEFINITIONS`
handler: str
@attr.define(kw_only=True, frozen=True)
class CalculateMultiFileExceptionReport(UnknownError):
"""Describes an exception raised during calculate_chunk execution."""
path: Path
# Stored in `str` rather than `Handler`, because the pickle picks ups structs from `C_DEFINITIONS`
handler: str
@attr.define(kw_only=True, frozen=True)
class ExtractCommandFailedReport(ErrorReport):
"""Describes an error when failed to run the extraction command."""
severity: Severity = Severity.WARNING
command: str
stdout: bytes
stderr: bytes
exit_code: int
@attr.define(kw_only=True, frozen=True)
class ExtractDirectoryExistsReport(ErrorReport):
severity: Severity = Severity.ERROR
path: Path
@attr.define(kw_only=True, frozen=True)
class ExtractorDependencyNotFoundReport(ErrorReport):
"""Describes an error when the dependency of an extractor doesn't exist."""
severity: Severity = Severity.ERROR
dependencies: List[str]
@attr.define(kw_only=True, frozen=True)
class ExtractorTimedOut(ErrorReport):
"""Describes an error when the extractor execution timed out."""
severity: Severity = Severity.ERROR
cmd: str
timeout: float
@attr.define(kw_only=True, frozen=True)
class MaliciousSymlinkRemoved(ErrorReport):
"""Describes an error when malicious symlinks have been removed from disk."""
severity: Severity = Severity.WARNING
link: str
target: str
@attr.define(kw_only=True, frozen=True)
class MultiFileCollisionReport(ErrorReport):
"""Describes an error when MultiFiles collide on the same file."""
severity: Severity = Severity.ERROR
paths: Set[Path]
handler: str
@attr.define(kw_only=True, frozen=True)
class StatReport(Report):
path: Path
size: int
is_dir: bool
is_file: bool
is_link: bool
link_target: Optional[Path]
@classmethod
def from_path(cls, path: Path):
st = path.lstat()
mode = st.st_mode
try:
link_target = Path(os.readlink(path))
except OSError:
link_target = None
return cls(
path=path,
size=st.st_size,
is_dir=stat.S_ISDIR(mode),
is_file=stat.S_ISREG(mode),
is_link=stat.S_ISLNK(mode),
link_target=link_target,
)
@attr.define(kw_only=True, frozen=True)
class HashReport(Report):
md5: str
sha1: str
sha256: str
@classmethod
def from_path(cls, path: Path):
chunk_size = 1024 * 64
md5 = hashlib.md5() # noqa: S324
sha1 = hashlib.sha1() # noqa: S324
sha256 = hashlib.sha256()
with path.open("rb") as f:
while chunk := f.read(chunk_size):
md5.update(chunk)
sha1.update(chunk)
sha256.update(chunk)
return cls(
md5=md5.hexdigest(),
sha1=sha1.hexdigest(),
sha256=sha256.hexdigest(),
)
@attr.define(kw_only=True, frozen=True)
class FileMagicReport(Report):
magic: str
mime_type: str
@attr.define(kw_only=True, frozen=True)
class EntropyReport(Report):
percentages: List[float]
block_size: int
mean: float
@property
def METHOD_NAME(self):
return max(self.percentages)
@property
def lowest(self):
return min(self.percentages)
@final
@attr.define(kw_only=True, frozen=True)
class ChunkReport(Report):
id: str # noqa: A003
handler_name: str
start_offset: int
end_offset: int
size: int
is_encrypted: bool
extraction_reports: List[Report]
@final
@attr.define(kw_only=True, frozen=True)
class UnknownChunkReport(Report):
id: str # noqa: A003
start_offset: int
end_offset: int
size: int
entropy: Optional[EntropyReport]
@final
@attr.define(kw_only=True, frozen=True)
class MultiFileReport(Report):
id: str # noqa: A003
handler_name: str
name: str
paths: List[Path]
extraction_reports: List[Report]
@attr.define(kw_only=True, frozen=True)
class ExtractionProblem(Report):
"""A non-fatal problem discovered during extraction.
A report like this still means, that the extraction was successful,
but there were problems that got resolved.
The output is expected to be complete, with the exception of
the reported path.
Examples
--------
- duplicate entries for certain archive formats (tar, zip)
- unsafe symlinks pointing outside of extraction directory
"""
problem: str
resolution: str
path: Optional[str] = None
@property
def log_msg(self):
return f"{self.problem} {self.resolution}"
def log_with(self, logger):
logger.warning(self.log_msg, path=self.path)
@attr.define(kw_only=True, frozen=True)
class PathTraversalProblem(ExtractionProblem):
extraction_path: str
def log_with(self, logger):
logger.warning(
self.log_msg,
path=self.path,
extraction_path=self.extraction_path,
)
@attr.define(kw_only=True, frozen=True)
class LinkExtractionProblem(ExtractionProblem):
link_path: str
def log_with(self, logger):
logger.warning(self.log_msg, path=self.path, link_path=self.link_path)
@attr.define(kw_only=True, frozen=True)
class SpecialFileExtractionProblem(ExtractionProblem):
mode: int
device: int
def log_with(self, logger):
logger.warning(self.log_msg, path=self.path, mode=self.mode, device=self.device)
|
948 |
module rhel client by ip
|
"""Test class for Job Invocation procedure
:Requirement: JobInvocation
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: RemoteExecution
:Team: Endeavour
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from inflection import camelize
from robottelo.utils.datafactory import gen_string
@pytest.fixture
def METHOD_NAME(module_org, smart_proxy_location, rhel7_contenthost, target_sat):
"""Setup a broker rhel client to be used in remote execution by ip"""
rhel7_contenthost.configure_rex(satellite=target_sat, org=module_org)
target_sat.api_factory.update_vm_host_location(
rhel7_contenthost, location_id=smart_proxy_location.id
)
yield rhel7_contenthost
@pytest.mark.tier4
def test_positive_run_default_job_template_by_ip(
session, module_org, smart_proxy_location, METHOD_NAME
):
"""Run a job template on a host connected by ip
:id: 9a90aa9a-00b4-460e-b7e6-250360ee8e4d
:Setup: Use pre-defined job template.
:Steps:
1. Set remote_execution_connect_by_ip on host to true
2. Navigate to an individual host and click Run Job
3. Select the job and appropriate template
4. Run the job
:expectedresults: Verify the job was successfully ran against the host
:parametrized: yes
:CaseLevel: System
"""
hostname = METHOD_NAME.hostname
with session:
session.organization.select(module_org.name)
session.location.select(smart_proxy_location.name)
assert session.host.search(hostname)[0]['Name'] == hostname
session.jobinvocation.run(
{
'job_category': 'Commands',
'job_template': 'Run Command - Script Default',
'search_query': f'name ^ {hostname}',
'template_content.command': 'ls',
}
)
session.jobinvocation.wait_job_invocation_state(entity_name='Run ls', host_name=hostname)
status = session.jobinvocation.read(entity_name='Run ls', host_name=hostname)
assert status['overview']['hosts_table'][0]['Status'] == 'success'
@pytest.mark.tier4
def test_positive_run_custom_job_template_by_ip(
session, module_org, smart_proxy_location, METHOD_NAME
):
"""Run a job template on a host connected by ip
:id: e283ae09-8b14-4ce1-9a76-c1bbd511d58c
:Setup: Create a working job template.
:Steps:
1. Set remote_execution_connect_by_ip on host to true
2. Navigate to an individual host and click Run Job
3. Select the job and appropriate template
4. Run the job
:expectedresults: Verify the job was successfully ran against the host
:parametrized: yes
:CaseLevel: System
"""
hostname = METHOD_NAME.hostname
job_template_name = gen_string('alpha')
with session:
session.organization.select(module_org.name)
session.location.select(smart_proxy_location.name)
assert session.host.search(hostname)[0]['Name'] == hostname
session.jobtemplate.create(
{
'template.name': job_template_name,
'template.template_editor.rendering_options': 'Editor',
'template.template_editor.editor': '<%= input("command") %>',
'job.provider_type': 'Script',
'inputs': [{'name': 'command', 'required': True, 'input_type': 'User input'}],
}
)
assert session.jobtemplate.search(job_template_name)[0]['Name'] == job_template_name
session.jobinvocation.run(
{
'job_category': 'Miscellaneous',
'job_template': job_template_name,
'search_query': f'name ^ {hostname}',
'template_content.command': 'ls',
}
)
job_description = f'{camelize(job_template_name.lower())} with inputs command="ls"'
session.jobinvocation.wait_job_invocation_state(
entity_name=job_description, host_name=hostname
)
status = session.jobinvocation.read(entity_name=job_description, host_name=hostname)
assert status['overview']['hosts_table'][0]['Status'] == 'success'
@pytest.mark.stubbed
@pytest.mark.tier2
def test_positive_schedule_recurring_host_job(self):
"""Using the new Host UI, schedule a recurring job on a Host
:id: 5052be04-28ab-4349-8bee-851ef76e4ffa
:caseComponent: Ansible
:Team: Rocket
:Steps:
1. Register a RHEL host to Satellite.
2. Import all roles available by default.
3. Assign a role to host.
4. Navigate to the new UI for the given Host.
5. Select the Jobs subtab.
6. Click the Schedule Recurring Job button, and using the popup, schedule a
recurring Job.
7. Navigate to Job Invocations.
:expectedresults: The scheduled Job appears in the Job Invocation list at the appointed
time
"""
@pytest.mark.stubbed
@pytest.mark.tier2
def test_positive_schedule_recurring_hostgroup_job(self):
"""Using the new recurring job scheduler, schedule a recurring job on a Hostgroup
:id: c65db99b-11fe-4a32-89d0-0a4692b07efe
:caseComponent: Ansible
:Team: Rocket
:Steps:
1. Register a RHEL host to Satellite.
2. Import all roles available by default.
3. Assign a role to host.
4. Navigate to the Host Group page.
5. Select the "Configure Ansible Job" action.
6. Click the Schedule Recurring Job button, and using the popup, schedule a
recurring Job.
7. Navigate to Job Invocations.
:expectedresults: The scheduled Job appears in the Job Invocation list at the appointed
time
"""
|
949 |
get request
|
from unittest import TestCase
from unittest import skip
import ConfigParser
import os
import sys
import tempfile
import shutil
sys.path.append(os.path.dirname(__file__))
import listener
import passive
import passive.nrds
class NRDSHandler(TestCase):
def setUp(self):
listener.server.listener.config['iconfig'] = {}
self.testing_plugin_dir = os.path.join(tempfile.gettempdir(), 'testing-plugins')
shutil.rmtree(self.testing_plugin_dir, ignore_errors=True)
self.config = ConfigParser.ConfigParser()
self.config.optionxform = str
self.config.file_path = os.path.join(self.testing_plugin_dir, "test.cfg")
self.config.add_section('plugin directives')
self.config.set('plugin directives', 'plugin_path', self.testing_plugin_dir)
self.config.add_section('passive checks')
self.n = passive.nrds.Handler(self.config)
try:
os.mkdir(self.testing_plugin_dir)
except OSError:
pass
def test_get_plugin(self):
def METHOD_NAME(*args, **kwargs):
return 'SECRET PAYLOAD'
passive.utils.send_request = METHOD_NAME
plugin_path = self.n.config.get('plugin directives', 'plugin_path')
testing_dict = {
'nrds_url': 'localhost',
'nrds_os': 'NCPA',
'nrds_token': 'token',
'plugin_path': plugin_path,
'plugin': 'pluginname'
}
self.n.get_plugin(**testing_dict)
expected_abs_plugin_path = os.path.join(plugin_path, 'pluginname')
self.assertTrue(os.path.isfile(expected_abs_plugin_path),
"Plugin was not created at testing site: %s" % expected_abs_plugin_path)
with open(expected_abs_plugin_path, 'r') as plugin_test:
l = plugin_test.readlines()[0].strip()
self.assertEquals(l, 'SECRET PAYLOAD')
def test_config_update_is_required(self):
def mock_request(*args, **kwargs):
return "<result><status>0</status><message>OK</message></result>"
passive.utils.send_request = mock_request
update = self.n.config_update_is_required('mocked', 'mocked', 'TESTING', '.1')
self.assertFalse(update)
def mock_request(*args, **kwargs):
return "<result><status>1</status><message>Config version is available</message></result>"
passive.utils.send_request = mock_request
update = self.n.config_update_is_required('mocked', 'mocked', 'TESTING', '.2')
self.assertTrue(update)
def mock_request(*args, **kwargs):
return "<result><status>2</status><message>Config version is available</message></result>"
passive.utils.send_request = mock_request
update = self.n.config_update_is_required('mocked', 'mocked', 'TESTING', '.3')
self.assertFalse(update)
def test_update_config(self):
def mock_request(*args, **kwargs):
return ""
passive.utils.send_request = mock_request
success = self.n.update_config('', '', '')
self.assertFalse(success)
def mock_request(*args, **kwargs):
return "[test]\nvalue = foobar"
passive.utils.send_request = mock_request
success = self.n.update_config('', '', '')
self.assertTrue(success)
os.unlink(self.config.file_path)
def test_get_os(self):
platform = self.n.get_os()
self.assertIsInstance(platform, str)
def test_list_missing_plugins(self):
required_plugins = self.n.get_required_plugins()
self.assertEquals(required_plugins, set())
self.n.config.set('passive checks', 'bingo|bongo', '/api/plugin/foobar.py/moola')
required_plugins = self.n.get_required_plugins()
self.assertEquals(required_plugins, {'foobar.py'})
self.n.config.set('passive checks', 'bogus_entry', '/api/plugin/bogus.bingo/foobar')
required_plugins = self.n.get_required_plugins()
self.assertEquals(required_plugins, {'foobar.py'})
def test_get_installed_plugins(self):
installed_plugins = self.n.get_installed_plugins()
self.assertEquals(installed_plugins, set())
foobar_plugin = os.path.join(self.testing_plugin_dir, 'foobar')
with open(foobar_plugin, 'w') as _:
installed_plugins = self.n.get_installed_plugins()
self.assertEquals(installed_plugins, {'foobar'})
os.unlink(foobar_plugin)
def tearDown(self):
shutil.rmtree(self.testing_plugin_dir, ignore_errors=True)
|
950 |
subp
|
pkgname = "boost"
pkgver = "1.83.0"
pkgrel = 0
hostmakedepends = ["pkgconf"]
makedepends = [
"icu-devel",
"bzip2-devel",
"xz-devel",
"zstd-devel",
"linux-headers",
"python-devel",
"zlib-devel",
]
provides = [f"boost{pkgver[:-2]}={pkgver}-r{pkgrel}"]
pkgdesc = "Free peer-reviewed portable C++ source libraries"
maintainer = "q66 <[email protected]>"
license = "BSL-1.0"
url = "https://boost.org"
source = f"https://boostorg.jfrog.io/artifactory/main/release/{pkgver}/source/boost_{pkgver.replace('.', '_')}.tar.gz"
sha256 = "c0685b68dd44cc46574cce86c4e17c0f611b15e195be9848dfd0769a0a207628"
tool_flags = {"CXXFLAGS": ["-std=c++14"]}
# FIXME: odd failures, but seems test-related
options = ["!check", "!cross"] # i don't dare touch this yet
# libs have semi-auto-generated subpkgs using this array
# needs to be updated with new libs regularly
_libs = [
"atomic",
"chrono",
"container",
"context",
"contract",
"coroutine",
"date_time",
"fiber",
"filesystem",
"graph",
"iostreams",
"json",
"locale",
"log_setup",
"log",
"math",
"nowide",
"prg_exec_monitor",
"program_options",
"python",
"random",
"regex",
"serialization",
"stacktrace_addr2line",
"stacktrace_basic",
"stacktrace_noop",
"system",
"thread",
"timer",
"type_erasure",
"unit_test_framework",
"url",
"wave",
"wserialization",
]
match self.profile().arch:
case "ppc64le" | "ppc64" | "ppc":
_arch, _abi = "power", "sysv"
case "aarch64":
_arch, _abi = "arm", "aapcs"
case "x86_64":
_arch, _abi = "x86", "sysv"
case "riscv64":
_arch, _abi = "riscv", "sysv"
case _:
broken = f"Unknown CPU architecture: {self.profile().arch}"
def init_configure(self):
self._pyver = (
self.do("pkgconf", "--modversion", "python3", capture_output=True)
.stdout.decode()
.strip()
)
def _call_b2(self, *args):
self.do(
self.chroot_cwd / "b2",
f"-j{self.make_jobs}",
f"--user-config={self.chroot_cwd}/user-config.jam",
f"--prefix={self.chroot_destdir}/usr",
"release",
f"python={self._pyver}",
"toolset=clang",
"cxxflags=" + self.get_cxxflags(shell=True),
"linkflags=" + self.get_ldflags(shell=True),
"threading=multi",
"debug-symbols=off",
"runtime-link=shared",
"link=shared,static",
"--layout=system",
*args,
)
def do_build(self):
self.do(
self.chroot_cwd / "bootstrap.sh",
f"--prefix={self.chroot_destdir}/usr",
"--with-python=/usr/bin/python",
"--with-python-root=/usr",
)
with open(self.cwd / "user-config.jam", "w") as cf:
cf.write(
f"""
using clang : : {self.get_tool("CXX")} : <cxxflags>"{self.get_cxxflags(shell = True)}" <linkflags>"{self.get_ldflags(shell = True)}" <warnings-as-errors>"off" ;
using python : {self._pyver} : /usr/bin/python3 : /usr/include/python{self._pyver} : /usr/lib/python{self._pyver} ;
"""
)
_call_b2(self)
def do_install(self):
# install b2 globally
self.install_bin("tools/build/src/engine/b2")
# install boost itself
_call_b2(self, "install")
# install Boost.Build files
self.install_dir("usr/share/b2")
for f in (self.cwd / "tools/build").glob("*"):
self.cp(f, self.destdir / "usr/share/b2", recursive=True)
for f in (self.destdir / "usr/share/b2").rglob("*.orig"):
f.unlink()
self.rm(self.destdir / "usr/share/b2/src/engine/b2")
self.install_dir("etc")
with open(self.destdir / "etc/site-config.jam", "w") as sc:
sc.write(
"""# System-wide configuration file for Boost.Build.
using clang ;
"""
)
self.install_license("LICENSE_1_0.txt")
def do_check(self):
self.do(
"python",
"test_all.py",
"--default-bjam",
wrksrc="tools/build/test",
env={"PATH": f"{self.chroot_cwd}/tools/build/src/engine:/usr/bin"},
)
@subpackage("boost-build")
def _jam(self):
self.pkgdesc = f"{pkgdesc} (Boost.Build framework)"
self.depends = [f"boost={pkgver}-r{pkgrel}"]
self.provides = [f"boost{pkgver[:-2]}-build={pkgver}-r{pkgrel}"]
return ["usr/bin/b2", "etc/site-config.jam", "usr/share/b2"]
@subpackage("boost-devel")
def _devel(self):
self.depends = [f"boost={pkgver}-r{pkgrel}"] + makedepends
self.provides = [f"boost{pkgver[:-2]}-devel={pkgver}-r{pkgrel}"]
return self.default_devel()
def _gen_libp(libname):
@subpackage(f"libboost_{libname}")
def METHOD_NAME(self):
self.pkgdesc = f"{pkgdesc} ({libname})"
self.depends = [f"boost={pkgver}-r{pkgrel}"]
return [f"usr/lib/libboost_{libname}*.so.*"]
for _blib in _libs:
_gen_libp(_blib)
|
951 |
test get jid
|
import logging
import pytest
import salt.returners.etcd_return as etcd_return
import salt.utils.json
from salt.utils.etcd_util import get_conn
from tests.support.pytest.etcd import * # pylint: disable=wildcard-import,unused-wildcard-import
docker = pytest.importorskip("docker")
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.skip_if_binaries_missing("docker", "dockerd", check_all=False),
]
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
etcd_return: {
"__opts__": minion_opts,
},
}
@pytest.fixture(scope="module")
def update_etcd_profile(profile_name, prefix, etcd_profile):
etcd_profile.update(
{
"etcd.returner": profile_name,
"etcd.returner_root": prefix,
}
)
return etcd_profile
@pytest.fixture(scope="module")
def minion_config_overrides(update_etcd_profile):
return update_etcd_profile
@pytest.fixture(scope="module")
def etcd_client(minion_opts, profile_name):
return get_conn(minion_opts, profile=profile_name)
@pytest.fixture(scope="module")
def prefix():
return "/salt/pillar/test"
@pytest.fixture(autouse=True)
def cleanup_prefixed_entries(etcd_client, prefix):
"""
Cleanup after each test to ensure a consistent starting state.
"""
try:
assert etcd_client.get(prefix, recurse=True) is None
yield
finally:
etcd_client.delete(prefix, recurse=True)
def test_returner(prefix, etcd_client):
"""
Test returning values to etcd
"""
ret = {
"id": "test-id",
"jid": "123456789",
"single-key": "single-value",
"dict-key": {
"dict-subkey-1": "subvalue-1",
"dict-subkey-2": "subvalue-2",
},
}
etcd_return.returner(ret)
assert etcd_client.get("/".join((prefix, "minions", ret["id"]))) == ret["jid"]
expected = {key: salt.utils.json.dumps(ret[key]) for key in ret}
assert (
etcd_client.get("/".join((prefix, "jobs", ret["jid"], ret["id"])), recurse=True)
== expected
)
def test_save_and_get_load():
"""
Test saving a data load to etcd
"""
jid = "123456789"
load = {
"single-key": "single-value",
"dict-key": {
"dict-subkey-1": "subvalue-1",
"dict-subkey-2": "subvalue-2",
},
}
etcd_return.save_load(jid, load)
assert etcd_return.get_load(jid) == load
def METHOD_NAME():
"""
Test getting the return for a given jid
"""
jid = "123456789"
ret = {
"id": "test-id-1",
"jid": jid,
"single-key": "single-value",
"dict-key": {
"dict-subkey-1": "subvalue-1",
"dict-subkey-2": "subvalue-2",
},
"return": "test-return-1",
}
etcd_return.returner(ret)
ret = {"id": "test-id-2", "jid": jid, "return": "test-return-2"}
etcd_return.returner(ret)
expected = {
"test-id-1": {"return": "test-return-1"},
"test-id-2": {"return": "test-return-2"},
}
assert etcd_return.get_jid(jid) == expected
def test_get_fun():
"""
Test getting the latest fn run for each minion and matching to a target fn
"""
ret = {
"id": "test-id-1",
"jid": "1",
"single-key": "single-value",
"dict-key": {
"dict-subkey-1": "subvalue-1",
"dict-subkey-2": "subvalue-2",
},
"return": "test-return-1",
"fun": "test.ping",
}
etcd_return.returner(ret)
ret = {
"id": "test-id-2",
"jid": "2",
"return": "test-return-2",
"fun": "test.collatz",
}
etcd_return.returner(ret)
expected = {
"test-id-2": "test.collatz",
}
assert etcd_return.get_fun("test.collatz") == expected
def test_get_jids():
"""
Test getting all jids
"""
ret = {
"id": "test-id-1",
"jid": "1",
}
etcd_return.returner(ret)
ret = {
"id": "test-id-2",
"jid": "2",
}
etcd_return.returner(ret)
retval = etcd_return.get_jids()
assert len(retval) == 2
assert "1" in retval
assert "2" in retval
def test_get_minions():
"""
Test getting a list of minions
"""
ret = {
"id": "test-id-1",
"jid": "1",
}
etcd_return.returner(ret)
ret = {
"id": "test-id-2",
"jid": "2",
}
etcd_return.returner(ret)
retval = etcd_return.get_minions()
assert len(retval) == 2
assert "test-id-1" in retval
assert "test-id-2" in retval
|
952 |
test api region validate ok
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from rest_framework import status
from ralph.accounts.tests.factories import RegionFactory
from ralph.api.tests._base import RalphAPITestCase
from ralph.assets.tests.factories import ServiceEnvironmentFactory
from ralph.back_office.tests.factories import BackOfficeAssetFactory
from ralph.licences.models import BaseObjectLicence, Licence, LicenceUser
from ralph.licences.tests.factories import LicenceFactory
class LicenceAPITests(RalphAPITestCase):
def setUp(self):
super().setUp()
self.licence1, self.licence2 = LicenceFactory.create_batch(2)
region_pl = RegionFactory(name='pl')
self.licence3 = LicenceFactory(region=region_pl)
self.base_object = BackOfficeAssetFactory()
self.base_object2 = BackOfficeAssetFactory(region=region_pl)
LicenceUser.objects.create(licence=self.licence1, user=self.user1)
BaseObjectLicence.objects.create(
licence=self.licence2, base_object=self.base_object
)
self.service_env = ServiceEnvironmentFactory()
self.licence4 = LicenceFactory(service_env=self.service_env)
def test_get_licence_list(self):
url = reverse('licence-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], Licence.objects.count())
def test_get_licence_with_user_details(self):
url = reverse('licence-detail', args=(self.licence1.id,))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['number_bought'], self.licence1.number_bought
)
self.assertEqual(
response.data['region']['id'], self.licence1.region.id
)
self.assertEqual(
response.data['manufacturer']['id'], self.licence1.manufacturer.id
)
self.assertEqual(
response.data['licence_type']['id'], self.licence1.licence_type.id
)
self.assertEqual(
response.data['software']['id'], self.licence1.software.id
)
self.assertEqual(
response.data['users'][0]['user']['id'], self.user1.id,
)
self.assertEqual(
response.data['depreciation_rate'], self.licence1.depreciation_rate
)
def test_get_licence_with_service_env(self):
url = reverse('licence-detail', args=(self.licence4.id,))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['service_env']['id'], self.service_env.id
)
self.assertEqual(
response.data['service_env']['service'],
self.service_env.service.name
)
self.assertEqual(
response.data['service_env']['environment'],
self.service_env.environment.name
)
def test_get_licence_with_base_object_details(self):
url = reverse('licence-detail', args=(self.licence2.id,))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(
response.data['base_objects'][0]['base_object'].endswith(
reverse('baseobject-detail', args=(self.base_object.id,))
)
)
def test_api_region_validate_error(self):
url = reverse('baseobjectlicence-list')
data = {
"base_object": self.base_object.id,
"licence": self.licence1.id
}
response = self.client.post(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['non_field_errors'],
['Asset region is in a different region than licence.']
)
def METHOD_NAME(self):
url = reverse('baseobjectlicence-list')
data = {
"base_object": self.base_object2.id,
"licence": self.licence3.id
}
response = self.client.post(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(BaseObjectLicence.objects.filter(
base_object=self.base_object2.id,
licence=self.licence3.id
).exists())
|
953 |
to pil
|
# Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# References:
# - http://paulbourke.net/dataformats/pic/
# - https://github.com/plepers/hdr2png/blob/master/hdrloader.cpp
# - https://github.com/enkimute/hdrpng.js/blob/master/hdrpng.js
import math
import re
import sys
from utils.range import clamp_int
assert sys.version_info >= (3, 0), 'Python 3 is required to run this script.'
GAMMA = 2.0
class HDR:
@classmethod
def load_from_file(cls, filename):
"""Parse the HDR file."""
# HDR Format Specifications: http://paulbourke.net/dataformats/pic/
#
# Typical header:
# #?RADIANCE
# SOFTWARE=gegl 0.4.12
# FORMAT=32-bit_rle_rgbe
#
# -Y 1024 +X 2048
# Data
hdr = HDR()
data = []
header = False
with open(filename, "rb") as f:
while True:
line = ''
c = f.read(1).decode('ascii')
while c != '\n':
line += c
c = f.read(1).decode('ascii')
# Case: Empty lines
if line == '' or (len(line) == 1 and ord(line[0]) == 10):
continue
# Case: header
m = re.match(r'^#\?RADIANCE$', line)
if m:
header = True
continue
# Case: Size
m = re.match(r'^(.)(.)\s(\d+)\s(.)(.)\s(\d+)$', line)
if m:
hdr.rotated = m.group(2) == 'X'
hdr.xFlipped = m.group(1 if hdr.rotated else 4) == '-'
hdr.yFlipped = m.group(4 if hdr.rotated else 1) == '+'
hdr.width = int(m.group(6))
hdr.height = int(m.group(3))
break
# Case: ignored header entries
if line.startswith('FORMAT=') or \
line.startswith('EXPOSURE=') or \
line.startswith('COLORCORR=') or \
line.startswith('SOFTWARE=') or \
line.startswith('PIXASPECT=') or \
line.startswith('VIEW=') or \
line.startswith('PRIMARIES=') or \
line.startswith('GAMMA=') or \
line.startswith('# '):
continue
break
# Case: Data
data = f.read()
assert header, 'Invalid header.'
assert 4 * hdr.width * hdr.height == len(data) and len(data) > 0, \
'Invalid dimensions (expected dimension: 4x%dx%d, get %d floats)' % (hdr.width, hdr.height, len(data))
assert not (hdr.rotated or hdr.xFlipped or hdr.yFlipped), 'Flip or rotation flags are not supported.'
# Convert data to floats
hdr.data = [0.0] * (3 * hdr.width * hdr.height)
for i in range(hdr.width * hdr.height):
r = float(data[4 * i])
g = float(data[4 * i + 1])
b = float(data[4 * i + 2])
e = pow(2.0, float(data[4 * i + 3]) - 128.0 + 8.0)
hdr.data[3 * i] = pow(r * e, 1.0 / GAMMA) / 255.0
hdr.data[3 * i + 1] = pow(g * e, 1.0 / GAMMA) / 255.0
hdr.data[3 * i + 2] = pow(b * e, 1.0 / GAMMA) / 255.0
return hdr
@classmethod
def create_black_image(cls, width, height):
"""Create an HDR black image."""
hdr = HDR()
hdr.width = width
hdr.height = height
hdr.data = [0.0] * (3 * hdr.width * hdr.height)
return hdr
def __init__(self):
"""Constructor: simply reset the fields. Prefer the static methods."""
self.data = [] # Contains the 1D array of floats (size: 3*w*h, black: 0.0, white: 1.0, hdr: >1.0)
self.width = -1
self.height = -1
self.xFlipped = False
self.yFlipped = False
self.rotated = False
def is_valid(self):
"""Return True if the image has been loaded correctly."""
return 3 * self.width * self.height == len(self.data)
def get_pixel(self, x, y):
"""Get pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
return (
self.data[i],
self.data[i + 1],
self.data[i + 2]
)
def set_pixel(self, x, y, pixel):
"""Set pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
self.data[i] = pixel[0]
self.data[i + 1] = pixel[1]
self.data[i + 2] = pixel[2]
def clamp(self, threshold):
"""Clamp all the floats to some value."""
assert self.is_valid()
t = pow(threshold, 1.0 / GAMMA)
for i in range(3 * self.width * self.height):
self.data[i] = t if self.data[i] > t else self.data[i]
def save(self, filename):
"""Save the image to a file."""
assert self.is_valid()
assert filename.endswith('.hdr')
assert not (self.rotated or self.xFlipped or self.yFlipped), 'Flip or rotation flags are not supported.'
with open(filename, "wb") as f:
f.write('#?RADIANCE\n'.encode('ascii'))
f.write('FORMAT=32-bit_rle_rgbe\n'.encode('ascii'))
f.write('\n'.encode('ascii'))
f.write(('-Y %d +X %d\n' % (self.height, self.width)).encode('ascii'))
for i in range(self.width * self.height):
r = pow(self.data[3 * i], GAMMA)
g = pow(self.data[3 * i + 1], GAMMA)
b = pow(self.data[3 * i + 2], GAMMA)
v = max(r, g, b)
e = math.ceil(math.log(v, 2)) if v != 0.0 else 0.0
s = pow(2, e - 8)
arr = [
clamp_int(r / s, 0, 255),
clamp_int(g / s, 0, 255),
clamp_int(b / s, 0, 255),
clamp_int(e + 128, 0, 255)
]
f.write(bytes(arr))
def METHOD_NAME(self):
"""Create a PIL image to test the script."""
assert self.is_valid()
from PIL import Image
im = Image.new('RGB', (self.width, self.height))
pixels = im.load()
for y in range(self.height):
for x in range(self.width):
i = 3 * (y * self.width + x)
r = clamp_int(255.0 * self.data[i], 0, 255)
g = clamp_int(255.0 * self.data[i + 1], 0, 255)
b = clamp_int(255.0 * self.data[i + 2], 0, 255)
pixels[x, y] = (r, g, b)
return im
|
954 |
test points
|
from nutils import element, points, transform, numeric
from nutils.testing import TestCase, parametrize
import numpy
class gauss(TestCase):
def test_line(self):
line = element.getsimplex(1)
for degree in range(1, 8):
points = line.getpoints('gauss', degree)
self.assertEqual(points.npoints, degree//2+1)
self.assertLess(abs(points.weights.sum()-1), 2e-15)
def test_quad(self):
quad = element.getsimplex(1)**2
for degree in range(1, 8):
points = quad.getpoints('gauss', degree)
self.assertEqual(points.npoints, (degree//2+1)**2)
self.assertLess(abs(points.weights.sum()-1), 2e-15)
def test_hexahedron(self):
hex = element.getsimplex(1)**3
for degree in range(1, 8):
points = hex.getpoints('gauss', degree)
self.assertEqual(points.npoints, (degree//2+1)**3)
self.assertLess(abs(points.weights.sum()-1), 2e-15)
def test_triangle(self):
tri = element.getsimplex(2)
for degree in range(1, 8):
points = tri.getpoints('gauss', degree)
self.assertLess(abs(points.weights.sum()-.5), 2e-15)
def test_pyramid(self):
pyramid12 = element.getsimplex(1)*element.getsimplex(2)
pyramid21 = element.getsimplex(2)*element.getsimplex(1)
for degree in range(1, 8):
points12 = pyramid12.getpoints('gauss', degree)
points21 = pyramid21.getpoints('gauss', degree)
self.assertEqual(points12.npoints, points21.npoints)
self.assertLess(abs(points12.weights.sum()-.5), 2e-15)
self.assertLess(abs(points21.weights.sum()-.5), 2e-15)
def test_tetrahedron(self):
tet = element.getsimplex(3)
for degree in range(1, 9):
points = tet.getpoints('gauss', degree)
self.assertLess(abs(points.weights.sum()-1/6), 2e-15)
class bezier(TestCase):
def test_line(self):
line = element.getsimplex(1)
for n in range(2, 8):
bezier = line.getpoints('bezier', n)
self.assertEqual(bezier.npoints, n)
self.assertEqual(len(bezier.tri), n-1)
self.assertEqual(len(bezier.hull), 2)
def test_quad(self):
quad = element.getsimplex(1)**2
for n in range(2, 8):
bezier = quad.getpoints('bezier', n)
self.assertEqual(bezier.npoints, n**2)
self.assertEqual(len(bezier.tri), 2*(n-1)**2)
self.assertEqual(len(bezier.hull), 4*(n-1))
def test_hexahedron(self):
hex = element.getsimplex(1)**3
for n in range(2, 8):
bezier = hex.getpoints('bezier', n)
self.assertEqual(bezier.npoints, n**3)
self.assertEqual(len(bezier.tri), 6*(n-1)**3)
self.assertEqual(len(bezier.hull), 12*(n-1)**2)
def test_triangle(self):
tri = element.getsimplex(2)
for n in range(2, 8):
bezier = tri.getpoints('bezier', n)
self.assertEqual(bezier.npoints, (n*(n+1))//2)
self.assertEqual(len(bezier.tri), (n-1)**2)
self.assertEqual(len(bezier.hull), 3*(n-1))
def test_tetrahedron(self):
tet = element.getsimplex(3)
for n in range(2, 8):
bezier = tet.getpoints('bezier', n)
self.assertEqual(bezier.npoints, (n*(n+1)*(n+2))//6)
self.assertEqual(len(bezier.tri), (n-1)**3)
self.assertEqual(len(bezier.hull), 4*(n-1)**2)
def test_pyramid(self):
pyramid = element.getsimplex(1)*element.getsimplex(2)
for n in range(2, 8):
bezier = pyramid.getpoints('bezier', n)
self.assertEqual(bezier.npoints, n*(n*(n+1))//2)
self.assertEqual(len(bezier.tri), 3*(n-1)**3)
self.assertEqual(len(bezier.hull), 8*(n-1)**2)
fullhull = points.Points.hull.func(bezier).tolist() # contains additional internal faces for n >= 3
for h in bezier.hull: # assert that hull is a subset of fullfull
self.assertIn(sorted(h), fullhull)
class trimmed(TestCase):
def setUp(self):
super().setUp()
quad = element.getsimplex(1)**2
levels = numeric.overlapping(numpy.arange(-1, 16, 2), n=5) # linear ramp cutting at x + y == .125
trimmed = quad.trim(levels.ravel(), maxrefine=2, ndivisions=16)
self.bezier = trimmed.getpoints('bezier', 5)
self.gauss = trimmed.getpoints('gauss', 3)
self.uniform = trimmed.getpoints('uniform', 3)
def test_type(self):
for pnt in self.bezier, self.gauss, self.uniform:
self.assertIsInstance(pnt, points.ConcatPoints)
for i, subpoints in enumerate(pnt.allpoints):
self.assertIsInstance(subpoints, points.TransformPoints)
self.assertIsInstance(subpoints.points, points.TensorPoints if i else points.ConcatPoints)
def test_weights(self):
exact = 1-.5*.125**2
for pnt in self.gauss, self.uniform:
self.assertLess(abs(pnt.weights.sum()-exact), 1e-15)
def METHOD_NAME(self):
self.assertEqual(self.bezier.npoints, 26)
for x in [0., .25, .5, .75, 1.]:
for y in [0., .25, .5, .75, 1.]:
if x or y:
self.assertIn([x, y], self.bezier.coords.tolist())
self.assertIn([0., .125], self.bezier.coords.tolist())
self.assertIn([.125, 0.], self.bezier.coords.tolist())
def test_tri(self):
self.assertEqual(len(self.bezier.tri), 33)
def test_hull(self):
self.assertEqual(len(self.bezier.hull), 17)
|
955 |
test printing the server manifest with image
|
import yaml
from prefect.infrastructure.kubernetes import KubernetesJob
from prefect.settings import (
PREFECT_API_KEY,
PREFECT_API_URL,
PREFECT_LOGGING_SERVER_LEVEL,
)
from prefect.testing.cli import invoke_and_assert
from prefect.utilities.dockerutils import get_prefect_image_name
def test_printing_the_server_manifest_with_no_args():
"""`prefect kubernetes manifest server` should print a valid YAML file
representing a basic Prefect server deployment to a cluster"""
result = invoke_and_assert(
["kubernetes", "manifest", "server"],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
# Spot-check a few things. This test is mostly just confirming that the output
# looks roughly like a set of Kubernetes manifests in YAML, not that this is a
# valid and working API deployment.
assert manifests
for manifest in manifests:
assert manifest["metadata"]["namespace"] == "default"
if manifest["kind"] == "Deployment":
assert manifest["metadata"]["name"] == "prefect-server"
assert len(manifest["spec"]["template"]["spec"]["containers"]) == 1
server_container = manifest["spec"]["template"]["spec"]["containers"][0]
assert server_container["image"] == get_prefect_image_name()
assert server_container["command"][0:3] == ["prefect", "server", "start"]
assert server_container["command"][0:3] == ["prefect", "server", "start"]
assert server_container["command"][5:] == [
"--log-level",
str(PREFECT_LOGGING_SERVER_LEVEL.value()),
]
def METHOD_NAME():
result = invoke_and_assert(
[
"kubernetes",
"manifest",
"server",
"-i",
"test_image_tag",
"--log-level",
"test_log_level",
],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
assert manifests
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
assert manifests
deployment = next(m for m in manifests if m["kind"] == "Deployment")
assert deployment["metadata"]["name"] == "prefect-server"
assert len(deployment["spec"]["template"]["spec"]["containers"]) == 1
server_container = deployment["spec"]["template"]["spec"]["containers"][0]
assert server_container["image"] == "test_image_tag"
assert server_container["command"][5:] == ["--log-level", "test_log_level"]
def test_printing_the_server_manifest_with_namespace():
result = invoke_and_assert(
["kubernetes", "manifest", "server", "-n", "test_namespace"],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
assert manifests
for manifest in manifests:
assert manifest["metadata"]["namespace"] == "test_namespace"
def test_printing_the_agent_manifest_with_no_args():
"""`prefect kubernetes manifest agent` should print a valid YAML file
representing a basic agent deployment to a cluster"""
result = invoke_and_assert(
["kubernetes", "manifest", "agent"],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
# Spot-check a few things. This test is mostly just confirming that the output
# looks roughly like a set of Kubernetes manifests in YAML.
assert manifests
for manifest in manifests:
if manifest["kind"] not in ["ClusterRole", "ClusterRoleBinding"]:
assert manifest["metadata"]["namespace"] == "default"
if manifest["kind"] == "Deployment":
assert manifest["metadata"]["name"] == "prefect-agent"
assert len(manifest["spec"]["template"]["spec"]["containers"]) == 1
agent_container = manifest["spec"]["template"]["spec"]["containers"][0]
assert agent_container["image"] == get_prefect_image_name()
assert agent_container["command"] == [
"prefect",
"agent",
"start",
"-q",
"kubernetes",
]
assert len(agent_container["env"]) == 2
assert agent_container["env"][0]["name"] == "PREFECT_API_URL"
assert agent_container["env"][1]["name"] == "PREFECT_API_KEY"
assert agent_container["env"][0]["value"] == str(PREFECT_API_URL.value())
assert agent_container["env"][1]["value"] == str(PREFECT_API_KEY.value())
def test_printing_the_agent_manifest_with_api_url_image_tag_and_work_queue():
result = invoke_and_assert(
[
"kubernetes",
"manifest",
"agent",
"--api-url",
"test_api_url",
"--api-key",
"test_api_key",
"-i",
"test_image_tag",
"-q",
"test_work_queue",
],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
assert manifests
deployment = next(m for m in manifests if m["kind"] == "Deployment")
assert deployment["metadata"]["name"] == "prefect-agent"
assert len(deployment["spec"]["template"]["spec"]["containers"]) == 1
agent_container = deployment["spec"]["template"]["spec"]["containers"][0]
assert agent_container["image"] == "test_image_tag"
assert agent_container["command"][3:5] == ["-q", "test_work_queue"]
assert len(agent_container["env"]) == 2
assert agent_container["env"][0]["name"] == "PREFECT_API_URL"
assert agent_container["env"][1]["name"] == "PREFECT_API_KEY"
assert agent_container["env"][0]["value"] == "test_api_url"
assert agent_container["env"][1]["value"] == "test_api_key"
def test_printing_the_agent_manifest_with_namespace():
result = invoke_and_assert(
["kubernetes", "manifest", "agent", "-n", "test_namespace"],
expected_output_contains="kind: Deployment",
)
manifests = yaml.load_all(result.stdout, yaml.SafeLoader)
assert manifests
for manifest in manifests:
if manifest["kind"] not in ["ClusterRole", "ClusterRoleBinding"]:
assert manifest["metadata"]["namespace"] == "test_namespace"
def test_printing_the_job_base_manifest():
"""`prefect kubernetes manifest flow-run-job` should print a valid YAML file
representing the minimum starting point for a Kubernetes Job"""
result = invoke_and_assert(
["kubernetes", "manifest", "flow-run-job"],
expected_output_contains="kind: Job",
)
# check for the presence of helpful comments
assert "# the first container is required" in result.stdout
parsed = yaml.load(result.stdout, yaml.SafeLoader)
assert parsed == KubernetesJob.base_job_manifest()
|
956 |
get filtered gene ids
|
import re
from collections import defaultdict
from django.db.models import Q, prefetch_related_objects, Prefetch
from django.db.models.functions import Length
from reference_data.models import GeneInfo, GeneConstraint, dbNSFPGene, Omim, MGI, PrimateAI, GeneCopyNumberSensitivity, \
GenCC, ClinGen
from seqr.utils.xpos_utils import get_xpos
from seqr.views.utils.orm_to_json_utils import _get_json_for_model, _get_json_for_models, _get_empty_json_for_model, \
get_json_for_gene_notes_by_gene_id
def get_gene(gene_id, user):
gene = GeneInfo.objects.get(gene_id=gene_id)
gene_json = _get_json_for_model(gene, get_json_for_models=_get_json_for_genes, user=user, gene_fields=ALL_GENE_FIELDS)
return gene_json
def get_genes(gene_ids):
return _get_genes(gene_ids)
def get_genes_for_variant_display(gene_ids):
return _get_genes(gene_ids, gene_fields=VARIANT_GENE_DISPLAY_FIELDS)
def get_genes_for_variants(gene_ids):
return _get_genes(gene_ids, gene_fields=VARIANT_GENE_FIELDS)
def get_genes_with_detail(gene_ids, user):
return _get_genes(gene_ids, user=user, gene_fields=ALL_GENE_FIELDS)
def _get_genes(gene_ids, user=None, gene_fields=None):
gene_filter = {}
if gene_ids is not None:
gene_filter['gene_id__in'] = gene_ids
genes = GeneInfo.objects.filter(**gene_filter)
return {gene['geneId']: gene for gene in _get_json_for_genes(genes, user=user, gene_fields=gene_fields)}
def get_gene_ids_for_gene_symbols(gene_symbols):
genes = GeneInfo.objects.filter(gene_symbol__in=gene_symbols).only('gene_symbol', 'gene_id').order_by('-gencode_release')
symbols_to_ids = defaultdict(list)
for gene in genes:
symbols_to_ids[gene.gene_symbol].append(gene.gene_id)
return symbols_to_ids
def METHOD_NAME(gene_filter):
return [gene.gene_id for gene in GeneInfo.objects.filter(gene_filter).only('gene_id')]
def get_queried_genes(query, max_results):
matching_genes = GeneInfo.objects.filter(
Q(gene_id__icontains=query) | Q(gene_symbol__icontains=query)
).only('gene_id', 'gene_symbol').order_by(Length('gene_symbol').asc(), 'gene_symbol').distinct()
return [{'gene_id': gene.gene_id, 'gene_symbol': gene.gene_symbol} for gene in matching_genes[:max_results]]
def _get_gene_model(gene, field):
# prefetching only works with all()
return next((model for model in getattr(gene, '{}_set'.format(field)).all()), None)
def _add_gene_model(field, return_key, default):
def _add_gene_model_func(gene):
model = _get_gene_model(gene, field)
return {return_key: _get_json_for_model(model) if model else default()}
return _add_gene_model_func
def _add_dbnsfp(gene):
model = _get_gene_model(gene, 'dbnsfpgene')
if model:
return _get_json_for_model(model)
else:
return _get_empty_json_for_model(dbNSFPGene)
def _add_omim(gene):
omim_phenotypes = _get_json_for_models(gene.omim_set.all())
return {
'omimPhenotypes': [phenotype for phenotype in omim_phenotypes if phenotype['phenotypeMimNumber']],
'mimNumber': omim_phenotypes[0]['mimNumber'] if omim_phenotypes else None,
}
def _add_mgi(gene):
model = _get_gene_model(gene, 'mgi')
return {'mgiMarkerId': model.marker_id if model else None}
OMIM = 'omim'
CONSTRAINT = 'constraint'
CN_SENSITIVITY = 'cn_sensitivity'
DBNSFP = 'dbnsfp'
GENCC = 'gencc'
PRIMATE_AI = 'primate_ai'
MGI_FIELD = 'mgi'
CLINGEN = 'clingen'
NOTES= 'notes'
VARIANT_GENE_DISPLAY_FIELDS = {
OMIM: (Omim, _add_omim),
CONSTRAINT: (GeneConstraint, None),
CN_SENSITIVITY: (GeneCopyNumberSensitivity, _add_gene_model('genecopynumbersensitivity', 'cnSensitivity', dict)),
GENCC: (GenCC, _add_gene_model('gencc', 'genCc', dict)),
CLINGEN: (ClinGen, _add_gene_model('clingen', 'clinGen', lambda: None)),
}
VARIANT_GENE_FIELDS = {
DBNSFP: (dbNSFPGene, _add_dbnsfp),
PRIMATE_AI: (PrimateAI, _add_gene_model('primateai', 'primateAi', lambda: None)),
}
VARIANT_GENE_FIELDS.update(VARIANT_GENE_DISPLAY_FIELDS)
ALL_GENE_FIELDS = {
MGI_FIELD: (MGI, _add_mgi),
NOTES: (None, None),
}
ALL_GENE_FIELDS.update(VARIANT_GENE_FIELDS)
def _get_json_for_genes(genes, user=None, gene_fields=None):
if not gene_fields:
gene_fields = {}
total_gene_constraints = None
if CONSTRAINT in gene_fields:
total_gene_constraints = GeneConstraint.objects.count()
if NOTES in gene_fields:
gene_notes_json = get_json_for_gene_notes_by_gene_id([gene.gene_id for gene in genes], user)
def _add_total_constraint_count(result, *args):
result['totalGenes'] = total_gene_constraints
def _process_result(result, gene):
for field, (_, result_func) in gene_fields.items():
if field == NOTES:
updates = {'notes': gene_notes_json.get(result['geneId'], [])}
elif field == CONSTRAINT:
constraint = _get_gene_model(gene, 'geneconstraint')
updates = {'constraints': _get_json_for_model(constraint, process_result=_add_total_constraint_count) if constraint else {}}
else:
updates = result_func(gene)
result.update(updates)
for model, _ in gene_fields.values():
if model:
prefetch_related_objects(genes, Prefetch(
'{}_set'.format(model.__name__.lower()),
queryset=model.objects.only('gene__gene_id', *model._meta.json_fields)))
return _get_json_for_models(genes, process_result=_process_result)
def parse_locus_list_items(request_json):
raw_items = request_json.get('rawItems')
if not raw_items:
return None, None, None
invalid_items = []
intervals = []
gene_ids = set()
gene_symbols = set()
for item in raw_items.replace(',', ' ').replace('\t', '<TAB>').split():
interval_match = re.match('(?P<chrom>\w+):(?P<start>\d+)-(?P<end>\d+)(%(?P<offset>(\d+)))?', item)
if not interval_match:
interval_match = re.match('(?P<chrom>\w+)<TAB>(?P<start>\d+)<TAB>(?P<end>\d+)', item)
if interval_match:
interval = interval_match.groupdict()
try:
interval['chrom'] = interval['chrom'].lstrip('chr')
interval['start'] = int(interval['start'])
interval['end'] = int(interval['end'])
if interval.get('offset'):
interval['offset'] = int(interval['offset']) / 100
if interval['start'] > interval['end']:
raise ValueError
get_xpos(interval['chrom'], interval['start'])
get_xpos(interval['chrom'], interval['end'])
intervals.append(interval)
except (KeyError, ValueError):
invalid_items.append('chr{chrom}:{start}-{end}'.format(
chrom=interval.get('chrom'), start=interval.get('start'), end=interval.get('end')
))
elif item.upper().startswith('ENSG'):
gene_ids.add(item.replace('<TAB>', ''))
else:
gene_symbols.add(item.replace('<TAB>', ''))
gene_symbols_to_ids = get_gene_ids_for_gene_symbols(gene_symbols)
invalid_items += [symbol for symbol in gene_symbols if not gene_symbols_to_ids.get(symbol)]
gene_ids.update({gene_ids[0] for gene_ids in gene_symbols_to_ids.values() if len(gene_ids)})
genes_by_id = get_genes(list(gene_ids)) if gene_ids else {}
invalid_items += [gene_id for gene_id in gene_ids if not genes_by_id.get(gene_id)]
return genes_by_id, intervals, invalid_item
|
957 |
qcisd
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Coupled Cluster
===============
Simple usage::
>>> from pyscf import gto, scf, cc
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> cc.CCSD(mf).run()
:func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters
to control CCSD calculation.
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-7.
conv_tol_normt : float
converge threshold for norm(t1,t2). Default is 1e-5.
max_cycle : int
max number of iterations. Default is 50.
diis_space : int
DIIS space size. Default is 6.
diis_start_cycle : int
The step to start DIIS. Default is 0.
direct : bool
AO-direct CCSD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
incore_complete : bool
Avoid all I/O. Default is False.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CC
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CC calculation.
Saved results
converged : bool
CCSD converged or not
e_tot : float
Total CCSD energy (HF + correlation)
t1, t2 :
t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)
l1, l2 :
Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)
'''
from pyscf.cc import ccsd
from pyscf.cc import ccsd_lambda
from pyscf.cc import ccsd_rdm
from pyscf.cc import addons
from pyscf.cc import rccsd
from pyscf.cc import uccsd
from pyscf.cc import gccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import eom_uccsd
from pyscf.cc import eom_gccsd
from pyscf.cc import qcisd
from pyscf.cc import momgfccsd
from pyscf import scf
def CCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):
if isinstance(mf, scf.uhf.UHF):
return UCCSD(mf, frozen, mo_coeff, mo_occ)
elif isinstance(mf, scf.ghf.GHF):
return GCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return RCCSD(mf, frozen, mo_coeff, mo_occ)
CCSD.__doc__ = ccsd.CCSD.__doc__
scf.hf.SCF.CCSD = CCSD
def RCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):
import numpy
from pyscf import lib
from pyscf.soscf import newton_ah
from pyscf.cc import dfccsd
if isinstance(mf, scf.uhf.UHF):
raise RuntimeError('RCCSD cannot be used with UHF method.')
elif isinstance(mf, scf.rohf.ROHF):
lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object '
'is converted to UHF object and UCCSD method is called.')
mf = scf.addons.convert_to_uhf(mf)
return UCCSD(mf, frozen, mo_coeff, mo_occ)
if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.hf.RHF):
mf = scf.addons.convert_to_rhf(mf)
if getattr(mf, 'with_df', None):
return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
elif numpy.iscomplexobj(mo_coeff) or numpy.iscomplexobj(mf.mo_coeff):
return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)
RCCSD.__doc__ = ccsd.CCSD.__doc__
def UCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):
from pyscf.soscf import newton_ah
if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.uhf.UHF):
mf = scf.addons.convert_to_uhf(mf)
if getattr(mf, 'with_df', None):
# TODO: DF-UCCSD with memory-efficient particle-particle ladder,
# similar to dfccsd.RCCSD
return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)
UCCSD.__doc__ = uccsd.UCCSD.__doc__
def GCCSD(mf, frozen=None, mo_coeff=None, mo_occ=None):
from pyscf.soscf import newton_ah
if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.ghf.GHF):
mf = scf.addons.convert_to_ghf(mf)
if getattr(mf, 'with_df', None):
raise NotImplementedError('DF-GCCSD')
else:
return gccsd.GCCSD(mf, frozen, mo_coeff, mo_occ)
GCCSD.__doc__ = gccsd.GCCSD.__doc__
def METHOD_NAME(mf, frozen=None, mo_coeff=None, mo_occ=None):
if isinstance(mf, scf.uhf.UHF):
raise NotImplementedError
elif isinstance(mf, scf.ghf.GHF):
raise NotImplementedError
else:
return RQCISD(mf, frozen, mo_coeff, mo_occ)
METHOD_NAME.__doc__ = qcisd.METHOD_NAME.__doc__
scf.hf.SCF.METHOD_NAME = METHOD_NAME
def RQCISD(mf, frozen=None, mo_coeff=None, mo_occ=None):
import numpy
from pyscf import lib
from pyscf.soscf import newton_ah
if isinstance(mf, scf.uhf.UHF):
raise RuntimeError('RQCISD cannot be used with UHF method.')
elif isinstance(mf, scf.rohf.ROHF):
lib.logger.warn(mf, 'RQCISD method does not support ROHF method. ROHF object '
'is converted to UHF object and UQCISD method is called.')
mf = scf.addons.convert_to_uhf(mf)
raise NotImplementedError
if isinstance(mf, newton_ah._CIAH_SOSCF) or not isinstance(mf, scf.hf.RHF):
mf = scf.addons.convert_to_rhf(mf)
elif numpy.iscomplexobj(mo_coeff) or numpy.iscomplexobj(mf.mo_coeff):
raise NotImplementedError
else:
return qcisd.METHOD_NAME(mf, frozen, mo_coeff, mo_occ)
RQCISD.__doc__ = qcisd.METHOD_NAME.__doc__
def FNOCCSD(mf, thresh=1e-6, pct_occ=None, nvir_act=None):
"""Frozen natural orbital CCSD
Attributes:
thresh : float
Threshold on NO occupation numbers. Default is 1e-6 (very conservative).
pct_occ : float
Percentage of total occupation number. Default is None. If present, overrides `thresh`.
nvir_act : int
Number of virtual NOs to keep. Default is None. If present, overrides `thresh` and `pct_occ`.
"""
from pyscf import mp
pt = mp.MP2(mf).set(verbose=0).run()
frozen, no_coeff = pt.make_fno(thresh=thresh, pct_occ=pct_occ, nvir_act=nvir_act)
pt_no = mp.MP2(mf, frozen=frozen, mo_coeff=no_coeff).set(verbose=0).run()
mycc = ccsd.CCSD(mf, frozen=frozen, mo_coeff=no_coeff)
mycc.delta_emp2 = pt.e_corr - pt_no.e_corr
from pyscf.lib import logger
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
if self.converged:
logger.info(self, 'FNO-%s converged', self.__class__.__name__)
else:
logger.note(self, 'FNO-%s not converged', self.__class__.__name__)
logger.note(self, 'E(FNO-%s) = %.16g E_corr = %.16g',
self.__class__.__name__, self.e_tot, self.e_corr)
logger.note(self, 'E(FNO-%s+delta-MP2) = %.16g E_corr = %.16g',
self.__class__.__name__, self.e_tot+self.delta_emp2,
self.e_corr+self.delta_emp2)
return self
mycc._finalize = _finalize.__get__(mycc, mycc.__class__)
return mycc
MomGFCCSD = momgfccsd.MomGFCCSD
|
958 |
train mean
|
"""WMT workload parent class."""
import abc
import math
import os
from typing import Any, Dict, Optional, Tuple
import jax
import numpy as np
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.wmt import input_pipeline
from algorithmic_efficiency.workloads.wmt.wmt_jax import decode
VOCAB_PATH = './wmt_256/sentencepiece_model'
WORKDIR = './wmt_256'
USE_PYTORCH_DDP = 'LOCAL_RANK' in os.environ
class BaseWmtWorkload(spec.Workload):
"""A WMT workload."""
_vocab_size: int = 32000
def __init__(self) -> None:
super().__init__()
self._tokenizer = None
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'bleu'
def has_reached_validation_target(self, eval_result: float) -> bool:
return eval_result['validation/bleu'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 30.8491
def has_reached_test_target(self, eval_result: float) -> bool:
return eval_result['test/bleu'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 30.7219
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
# wmt17_translate/de-en 'train' split size
return 5906184
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
# wmt14_translate/de-en 'validation' split size.
return 3000
@property
def num_test_examples(self) -> int:
# wmt14_translate/de-en 'test' split size.
return 3003
@property
def eval_batch_size(self) -> int:
return 128
@property
def METHOD_NAME(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def max_allowed_runtime_sec(self) -> int:
return 48_151 # ~13.5 hours
@property
def eval_period_time_sec(self) -> int:
return 14 * 60
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 133_333
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
is_training = split == 'train'
ds, self._tokenizer = input_pipeline.get_wmt_dataset(
data_rng,
split,
data_dir,
is_training=is_training,
vocab_size=self._vocab_size,
global_batch_size=global_batch_size,
num_batches=num_batches,
repeat_final_dataset=repeat_final_dataset)
# Separate function is necessary because the code above has to be executed
# when _build_input_queue is called (not when next() is first called on it).
def _input_queue_generator():
for batch in iter(ds):
weights = batch.get('weights')
updated_weights = np.where(batch['targets'] > 0, 1, 0)
if weights is not None:
updated_weights = np.logical_and(weights, updated_weights)
batch['weights'] = updated_weights
yield batch
return _input_queue_generator()
@abc.abstractmethod
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del model_state
del global_step
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
# These iterators will repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset=True)
eval_metrics = {}
for _ in range(num_batches):
eval_batch = next(self._eval_iters[split])
metrics = self.eval_step(params, eval_batch)
for metric_name, metric_value in metrics.items():
if metric_name not in eval_metrics:
eval_metrics[metric_name] = 0.0
eval_metrics[metric_name] += metric_value
eval_results = self._normalize_eval_metrics(num_examples, eval_metrics)
eval_results['bleu'] = self.translate_and_calculate_bleu(
params=params,
ds_iter=self._eval_iters[split],
num_batches=num_batches,
max_predict_length=256)
return eval_results
def compute_weighted_accuracy(
self, logits: spec.Tensor, targets: spec.Tensor,
weights: spec.Tensor) -> Tuple[spec.Tensor, spec.Tensor]:
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: array of shape [batch, length]
Returns:
Tuple of scalar summed accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and '
f'{targets.shape} targets.')
accuracy = (logits.argmax(-1) == targets) * weights
normalizing_factor = weights.sum()
return accuracy.sum(), normalizing_factor
def _decode_tokens(self, toks: spec.Tensor) -> spec.Tensor:
if isinstance(toks, torch.Tensor):
toks = toks.cpu().numpy()
valid_toks = toks[:np.argmax(toks == decode.EOS_ID) + 1].astype(np.int32)
return self._tokenizer.detokenize(valid_toks).numpy().decode('utf-8')
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
return self.compute_weighted_cross_entropy(
logits_batch,
label_batch,
weights=mask_batch,
label_smoothing=label_smoothing)
|
959 |
do transformation
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuse Pad into Conv Graph Rewriter."""
import tensorflow as tf
from tensorflow.python.framework import tensor_util
from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper
from neural_compressor.adaptor.tf_utils.util import version1_gt_version2
from ..graph_base import GraphRewriterBase
class FusePadWithConv2DOptimizer(GraphRewriterBase):
"""Fuse Pad op into Conv2D/DepthwiseConv2dNative/Conv3D."""
def __init__(self, model, excluded_op_names, inputs, cfg, new_api, itex_qdq_mode=False):
"""Intilization."""
super().__init__(model)
self.excluded_conv = excluded_op_names
self.inputs = inputs
self.cfg = cfg
self.new_api = new_api
self.itex_qdq_mode = itex_qdq_mode
def METHOD_NAME(self):
"""Fuse Pad + Conv2D/DepthwiseConv2dNative/Conv3D --> Conv2D/DepthwiseConv2dNative/Conv3D."""
cur_graph = GraphAnalyzer()
cur_graph.graph = self.model
graph_info = cur_graph.parse_graph()
target_nodes = cur_graph.query_fusion_pattern_nodes(
[["Pad"], ["Conv2D", "Conv3D", "DepthwiseConv2dNative"], ("BiasAdd", "Add", "AddV2")]
)
padding_tensor_dict = {}
for node_combination in target_nodes:
conv_name = node_combination[1]
pattern = node_combination[-1]
if conv_name not in self.cfg:
continue
is_perchannel = self.cfg[conv_name][0]
# Line 55 to line 65 should be removed once the TFDO enabling the single quantized
# conv2D supporting.
if len(pattern) == 2:
# TODO we need to enable single quantizedconv2d with s8 input.
if not is_perchannel and not cur_graph.has_positive_input(conv_name):
continue
# TFDO has the limitation that the single QuantizedConv2DPerchannel doesn't
# support padding_list filed.
if is_perchannel:
continue
if conv_name in self.excluded_conv:
continue
padding_tensor = None
pad_node = None
if node_combination[0] not in padding_tensor_dict:
pad_node = graph_info[node_combination[0]].node
if graph_info[pad_node.input[1]].node.op != "Const":
input_node = graph_info[pad_node.input[1]].node
if input_node.op == "DataFormatVecPermute":
parent_input_node = graph_info[input_node.input[0]].node
if parent_input_node.op == "Const":
padding_tensor = tensor_util.MakeNdarray(parent_input_node.attr["value"].tensor).flatten()
else:
continue
else:
continue
else:
padding_tensor = tensor_util.MakeNdarray(
graph_info[pad_node.input[1]].node.attr["value"].tensor
).flatten()
padding_tensor_dict[node_combination[0]] = padding_tensor
else:
padding_tensor = padding_tensor_dict[node_combination[0]]
if self.itex_qdq_mode:
enabled_pad_conv2d = bool(
tf.version.VERSION == "1.15.0-up3" or version1_gt_version2(tf.version.VERSION, "2.7")
)
else:
enabled_pad_conv2d = bool(tf.version.VERSION == "1.15.0-up3" or self.new_api)
if any(padding_tensor) and not enabled_pad_conv2d: # pragma: no cover
continue
if pad_node:
if graph_info[pad_node.input[1]].node.op != "Const":
cur_graph.node_name_details[pad_node.name].node.input.remove(pad_node.input[1])
cur_graph.remove_node_with_single_input_output(pad_node.name)
else:
cur_graph.remove_node_with_single_input_output(pad_node.name)
cur_graph.remove_node(pad_node.input[1])
conv_node = graph_info[node_combination[1]].node
if self.itex_qdq_mode:
if any(padding_tensor) and enabled_pad_conv2d: # pragma: no cover
Helper.set_attr_string(conv_node, "padding", b"EXPLICIT")
Helper.set_attr_int_list(conv_node, "explicit_paddings", padding_tensor)
else:
Helper.set_attr_int_list(conv_node, "padding_list", padding_tensor)
if any(padding_tensor) and enabled_pad_conv2d: # pragma: no cover
Helper.set_attr_string(conv_node, "padding", b"EXPLICIT")
return cur_graph.dump_graph()
|
960 |
do split
|
#!/usr/bin/env python3
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r"\s*#.*")
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + "#" * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r"(.*?)(#)(.*)")
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "{}{}{}{}".format(
matchobj.group(1),
matchobj.group(2),
"x" * len(matchobj.group(3)),
matchobj.group(2),
)
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r"(.*?)" + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def METHOD_NAME(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r"\n" + line[split:]
masked_line = masked_line[:split] + r"\n" + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r"\n"))
mask_output.extend(masked_line.split(r"\n"))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r"(.*?[\[\{\(,])(\s*)([\[\{\(])")
double_close_brace_re = re.compile(r"(.*?[\]\}\)],?)(\s*)([\]\}\)])")
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = METHOD_NAME(input, masked_input, double_open_brace_re)
(output, mask_output) = METHOD_NAME(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ["[", "(", "{"]
close_braces = ["]", ")", "}"]
closing_prefix_re = re.compile(r"[^\s\]\}\)]\s*[\]\}\)]+,?\s*$")
cnt = 0
stripline = COMMENT_RE.sub(r"", line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
for line in lines:
if COMMENT_RE.match(line):
print(line)
else:
line = line.strip("\r\n\t ") # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print(" " * (basic_offset * indent) + line)
indent += brace_diff
else:
indent += brace_diff
print(" " * (basic_offset * indent) + line)
else:
print(" " * (basic_offset * indent) + line)
else:
print("")
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == "__main__":
sys.exit(main())
|
961 |
test circ policy create
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Circulation policies tests."""
from __future__ import absolute_import, print_function
from copy import deepcopy
import pytest
from jsonschema.exceptions import ValidationError
from rero_ils.modules.circ_policies.api import CircPolicy, \
circ_policy_id_fetcher
def test_no_default_policy(app):
"""Test when no default circulation policy configured."""
cipo = CircPolicy.get_default_circ_policy('org1')
assert not cipo
def METHOD_NAME(circ_policy_martigny_data_tmp,
circ_policy_short_martigny_data,
org_martigny,
lib_martigny, lib_saxon,
patron_type_children_martigny,
item_type_standard_martigny,
patron_type_adults_martigny,
item_type_specific_martigny,
item_type_regular_sion,
patron_type_youngsters_sion):
"""Test circulation policy creation."""
cipo = CircPolicy.create(circ_policy_martigny_data_tmp, delete_pid=True)
assert cipo == circ_policy_martigny_data_tmp
assert cipo.get('pid') == '1'
cipo = CircPolicy.get_record_by_pid('1')
assert cipo == circ_policy_martigny_data_tmp
fetched_pid = circ_policy_id_fetcher(cipo.id, cipo)
assert fetched_pid.pid_value == '1'
assert fetched_pid.pid_type == 'cipo'
circ_policy_data = deepcopy(circ_policy_short_martigny_data)
del circ_policy_data['$schema']
cipo = CircPolicy.create(circ_policy_data, delete_pid=True)
assert cipo.get('$schema')
assert cipo.get('pid') == '2'
cipo_data = {
'$schema': 'https://bib.rero.ch/schemas/'
'circ_policies/circ_policy-v0.0.1.json',
'pid': 'cipo_test',
'name': 'test',
'organisation': {
'$ref': 'https://bib.rero.ch/api/organisations/org1'
},
'is_default': False,
'allow_requests': True,
'policy_library_level': False,
'settings': [{
'patron_type': {
'$ref': 'https://bib.rero.ch/api/patron_types/ptty3'
},
'item_type': {
'$ref': 'https://bib.rero.ch/api/item_types/itty1'
}
}, {
'patron_type': {
'$ref': 'https://bib.rero.ch/api/patron_types/ptty2'
},
'item_type': {
'$ref': 'https://bib.rero.ch/api/item_types/itty4'
}
}]
}
with pytest.raises(ValidationError):
cipo = CircPolicy.create(cipo_data, delete_pid=False)
# TEST #2 : create a second defaut policy
# The first created policy (pid=1) is the default policy.
# Creation of a second default policy should raise a ValidationError
default_cipo = CircPolicy.get_record_by_pid('1')
assert default_cipo.get('is_default')
with pytest.raises(ValidationError) as excinfo:
CircPolicy.create(circ_policy_martigny_data_tmp, delete_pid=True)
assert 'CircPolicy: already a default policy for this org' \
in str(excinfo.value)
def test_circ_policy_exist_name_and_organisation_pid(
circ_policy_short_martigny):
"""Test policy name existence."""
cipo = circ_policy_short_martigny.replace_refs()
assert CircPolicy.exist_name_and_organisation_pid(
cipo.get('name'), cipo.get('organisation', {}).get('pid'))
assert not CircPolicy.exist_name_and_organisation_pid(
'not exists yet', cipo.get('organisation', {}).get('pid'))
def test_circ_policy_can_not_delete(circ_policy_short_martigny):
"""Test can not delete a policy."""
org_pid = circ_policy_short_martigny.organisation_pid
defaut_cipo = CircPolicy.get_default_circ_policy(org_pid)
can, reasons = defaut_cipo.can_delete
assert not can
assert reasons['others']['is_default']
can, reasons = circ_policy_short_martigny.can_delete
assert can
assert reasons == {}
def test_circ_policy_can_delete(app, circ_policy_martigny_data_tmp):
"""Test can delete a policy."""
circ_policy_martigny_data_tmp['is_default'] = False
cipo = CircPolicy.create(circ_policy_martigny_data_tmp, delete_pid=True)
can, reasons = cipo.can_delete
assert can
assert reasons == {}
def test_circ_policy_extended_validation(
app,
circ_policy_short_martigny,
circ_policy_short_martigny_data
):
"""Test extended validation for circ policy"""
cipo_data = deepcopy(circ_policy_short_martigny_data)
cipo_data['allow_requests'] = False
cipo_data['pickup_hold_duration'] = 10
del cipo_data['pid']
cipo = CircPolicy.create(cipo_data)
assert cipo
assert 'pickup_hold_duration' not in cipo
cipo.delete()
|
962 |
list
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.policyinsights.aio.PolicyInsightsClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def METHOD_NAME(self, **kwargs: Any) -> _models.OperationsListResults:
"""Lists available operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationsListResults or the result of cls(response)
:rtype: ~azure.mgmt.policyinsights.models.OperationsListResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-04-01"))
cls: ClsType[_models.OperationsListResults] = kwargs.pop("cls", None)
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.QueryFailure, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("OperationsListResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {"url": "/providers/Microsoft.PolicyInsights/operations"}
|
963 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateLinkServicesForM365ComplianceCenterResult',
'AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult',
'get_private_link_services_for_m365_compliance_center',
'get_private_link_services_for_m365_compliance_center_output',
]
@pulumi.output_type
class GetPrivateLinkServicesForM365ComplianceCenterResult:
"""
The description of the service.
"""
def __init__(__self__, etag=None, id=None, identity=None, kind=None, location=None, METHOD_NAME=None, properties=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
An etag associated with the resource, used for optimistic concurrency when editing it.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ServicesResourceResponseIdentity']:
"""
Setting indicating whether the service has a managed identity associated with it.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the service.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ServicesPropertiesResponse':
"""
The common properties of a service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult(GetPrivateLinkServicesForM365ComplianceCenterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkServicesForM365ComplianceCenterResult(
etag=self.etag,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_private_link_services_for_m365_compliance_center(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult:
"""
Get the metadata of a privateLinkServicesForM365ComplianceCenter resource.
Azure REST API version: 2021-03-25-preview.
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:m365securityandcompliance:getPrivateLinkServicesForM365ComplianceCenter', __args__, opts=opts, typ=GetPrivateLinkServicesForM365ComplianceCenterResult).value
return AwaitableGetPrivateLinkServicesForM365ComplianceCenterResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_link_services_for_m365_compliance_center)
def get_private_link_services_for_m365_compliance_center_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkServicesForM365ComplianceCenterResult]:
"""
Get the metadata of a privateLinkServicesForM365ComplianceCenter resource.
Azure REST API version: 2021-03-25-preview.
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
...
|
964 |
unix server
|
import asyncio
import asyncio.events
import contextlib
import os
import pprint
import select
import socket
import tempfile
import threading
class FunctionalTestCaseMixin:
def new_loop(self):
return asyncio.new_event_loop()
def run_loop_briefly(self, *, delay=0.01):
self.loop.run_until_complete(asyncio.sleep(delay))
def loop_exception_handler(self, loop, context):
self.__unhandled_exceptions.append(context)
self.loop.default_exception_handler(context)
def setUp(self):
self.loop = self.new_loop()
asyncio.set_event_loop(None)
self.loop.set_exception_handler(self.loop_exception_handler)
self.__unhandled_exceptions = []
# Disable `_get_running_loop`.
self._old_get_running_loop = asyncio.events._get_running_loop
asyncio.events._get_running_loop = lambda: None
def tearDown(self):
try:
self.loop.close()
if self.__unhandled_exceptions:
print('Unexpected calls to loop.call_exception_handler():')
pprint.pprint(self.__unhandled_exceptions)
self.fail('unexpected calls to loop.call_exception_handler()')
finally:
asyncio.events._get_running_loop = self._old_get_running_loop
asyncio.set_event_loop(None)
self.loop = None
def tcp_server(self, server_prog, *,
family=socket.AF_INET,
addr=None,
timeout=5,
backlog=1,
max_clients=10):
if addr is None:
if hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
with tempfile.NamedTemporaryFile() as tmp:
addr = tmp.name
else:
addr = ('127.0.0.1', 0)
sock = socket.create_server(addr, family=family, backlog=backlog)
if timeout is None:
raise RuntimeError('timeout is required')
if timeout <= 0:
raise RuntimeError('only blocking sockets are supported')
sock.settimeout(timeout)
return TestThreadedServer(
self, sock, server_prog, timeout, max_clients)
def tcp_client(self, client_prog,
family=socket.AF_INET,
timeout=10):
sock = socket.socket(family, socket.SOCK_STREAM)
if timeout is None:
raise RuntimeError('timeout is required')
if timeout <= 0:
raise RuntimeError('only blocking sockets are supported')
sock.settimeout(timeout)
return TestThreadedClient(
self, sock, client_prog, timeout)
def METHOD_NAME(self, *args, **kwargs):
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError
return self.tcp_server(*args, family=socket.AF_UNIX, **kwargs)
def unix_client(self, *args, **kwargs):
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError
return self.tcp_client(*args, family=socket.AF_UNIX, **kwargs)
@contextlib.contextmanager
def unix_sock_name(self):
with tempfile.TemporaryDirectory() as td:
fn = os.path.join(td, 'sock')
try:
yield fn
finally:
try:
os.unlink(fn)
except OSError:
pass
def _abort_socket_test(self, ex):
try:
self.loop.stop()
finally:
self.fail(ex)
##############################################################################
# Socket Testing Utilities
##############################################################################
class TestSocketWrapper:
def __init__(self, sock):
self.__sock = sock
def recv_all(self, n):
buf = b''
while len(buf) < n:
data = self.recv(n - len(buf))
if data == b'':
raise ConnectionAbortedError
buf += data
return buf
def start_tls(self, ssl_context, *,
server_side=False,
server_hostname=None):
ssl_sock = ssl_context.wrap_socket(
self.__sock, server_side=server_side,
server_hostname=server_hostname,
do_handshake_on_connect=False)
try:
ssl_sock.do_handshake()
except:
ssl_sock.close()
raise
finally:
self.__sock.close()
self.__sock = ssl_sock
def __getattr__(self, name):
return getattr(self.__sock, name)
def __repr__(self):
return '<{} {!r}>'.format(type(self).__name__, self.__sock)
class SocketThread(threading.Thread):
def stop(self):
self._active = False
self.join()
def __enter__(self):
self.start()
return self
def __exit__(self, *exc):
self.stop()
class TestThreadedClient(SocketThread):
def __init__(self, test, sock, prog, timeout):
threading.Thread.__init__(self, None, None, 'test-client')
self.daemon = True
self._timeout = timeout
self._sock = sock
self._active = True
self._prog = prog
self._test = test
def run(self):
try:
self._prog(TestSocketWrapper(self._sock))
except Exception as ex:
self._test._abort_socket_test(ex)
class TestThreadedServer(SocketThread):
def __init__(self, test, sock, prog, timeout, max_clients):
threading.Thread.__init__(self, None, None, 'test-server')
self.daemon = True
self._clients = 0
self._finished_clients = 0
self._max_clients = max_clients
self._timeout = timeout
self._sock = sock
self._active = True
self._prog = prog
self._s1, self._s2 = socket.socketpair()
self._s1.setblocking(False)
self._test = test
def stop(self):
try:
if self._s2 and self._s2.fileno() != -1:
try:
self._s2.send(b'stop')
except OSError:
pass
finally:
super().stop()
def run(self):
try:
with self._sock:
self._sock.setblocking(0)
self._run()
finally:
self._s1.close()
self._s2.close()
def _run(self):
while self._active:
if self._clients >= self._max_clients:
return
r, w, x = select.select(
[self._sock, self._s1], [], [], self._timeout)
if self._s1 in r:
return
if self._sock in r:
try:
conn, addr = self._sock.accept()
except BlockingIOError:
continue
except socket.timeout:
if not self._active:
return
else:
raise
else:
self._clients += 1
conn.settimeout(self._timeout)
try:
with conn:
self._handle_client(conn)
except Exception as ex:
self._active = False
try:
raise
finally:
self._test._abort_socket_test(ex)
def _handle_client(self, sock):
self._prog(TestSocketWrapper(sock))
@property
def addr(self):
return self._sock.getsockname()
|
965 |
bot messages
|
# %%
from typing import Dict, Optional
import os
import copy
import pprint
import logging
import sentry_sdk
sentry_sdk.init(os.getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
MAX_HISTORY_LEN = 4
game_keys = ["id", "name_original"]
def get_game_hash(data):
if all([key in data for key in game_keys]):
return ":".join([f"{key}:{data[key]}" for key in game_keys])
def get_game_by_hash(hash, games):
try:
hash_segments = hash.split(":")
if len(hash_segments) > 2 and hash_segments[0] == "id":
return games[hash]
except Exception as exc:
sentry_sdk.capture_exception(exc)
logger.exception(exc)
games = list(games.values())
alt_game = games[0] if games else {}
return alt_game
def serialize_games(data):
if isinstance(data, list):
return [serialize_games(i) for i in data]
elif isinstance(data, dict):
game_hash = get_game_hash(data)
if game_hash is not None:
return game_hash
else:
return {key: serialize_games(val) for key, val in data.items()}
else:
return data
def deserialize_games(data, games):
if isinstance(data, list):
return [deserialize_games(i, games) for i in data]
elif isinstance(data, dict):
return {key: deserialize_games(val, games) for key, val in data.items()}
elif isinstance(data, str):
game = get_game_by_hash(data, games)
if game:
return game
else:
return data
else:
return data
class State:
def __init__(self, games: Dict, state: Optional[Dict] = None):
if not state:
self.state = {
"content_state": {}, # {content_type_str: [content_1_dict, content_2_dict, ...], ...}
"skill_scores": {}, # {skill_name_str: [scores_1_dict, scores_2_dict, ...], ...}
"skill_states": {}, # {skill_name_1_str: skill_state_1_dict, ...}
"messages": [],
"hypotheses": [],
"intents": {},
"policy_state": {"current_scenario_skill": "", "interrupted_scenario_stack": [], "st2": {}},
}
else:
self.state = deserialize_games(state, games)
self.state["hypotheses"] = []
self.state["intents"] = {}
def get_skill_state(self, skill_name: str):
return self.state["skill_states"].get(skill_name, {})
def update_st2_policy(self, policy: Dict):
self.state["policy_state"]["st2"].update(policy)
def reset_st2_policy(self):
self.state["policy_state"]["st2"] = {}
def update_skill_state(self, skill_name: str, skill_state: Dict):
if skill_name in self.state["skill_states"]:
self.state["skill_states"][skill_name].update(skill_state)
else:
self.state["skill_states"][skill_name] = skill_state
def get_content(self, content_name: str, **kwargs):
return self.state["content_state"].get(content_name, [])
def add_content(self, content_name: str, content: Dict, **kwargs):
self.state["content_state"][content_name] = self.state["content_state"].get(content_name, []) + [content]
def add_skill_scores(self, skill_name: str, scores: Dict, **kwargs):
scores = copy.deepcopy(scores)
scores.update(kwargs)
self.state["skill_scores"][skill_name] = self.state["skill_scores"].get(skill_name, []) + [scores]
def add_message(self, msg):
self.state["messages"].append(msg)
if len(self.state["messages"]) > MAX_HISTORY_LEN:
self.state["messages"].pop(0)
def add_human_message(self, text: str, **kwargs):
msg = {"user_type": "human", "text": text}
msg.update(kwargs)
self.add_message(msg)
def add_bot_message(self, skill_name: str, text: str, confidence: float, scenario: bool = False, **kwargs):
msg = {"user_type": "bot", "skill_name": skill_name, "text": text, "confidence": confidence}
self.state["policy_state"]["current_scenario_skill"] = skill_name if scenario else ""
msg.update(kwargs)
self.add_message(msg)
def add_hypothesis(self, skill_name: str, text: str, confidence: float, scenario: bool = False, **kwargs):
hypothesis = {
"skill_name": skill_name,
"text": text,
"confidence": confidence,
"scenario": scenario,
}
hypothesis.update(kwargs)
self.state["hypotheses"].append(hypothesis)
def add_intent(self, intent_model_name: str, intent: Dict, **kwargs):
intent = copy.deepcopy(intent)
intent.update(kwargs)
self.state["intents"][intent_model_name] = intent
def interrupt_scenario(self):
if self.current_scenario_skill:
self.state["policy_state"]["interrupted_scenario_stack"].append(self.current_scenario_skill)
def to_dict(self):
return dict(serialize_games(self.state))
def __repr__(self):
return pprint.pformat(self.to_dict())
@property
def content_state(self):
return self.state["content_state"]
@property
def skill_stats(self):
return self.state["skill_stats"]
@property
def skill_states(self):
return self.state["skill_states"]
@property
def utterances(self):
return [msg["text"] for msg in self.state["messages"]]
@property
def human_utterances(self):
return [msg["text"] for msg in self.state["messages"] if msg["user_type"] == "human"]
@property
def bot_utterances(self):
return [msg["text"] for msg in self.state["messages"] if msg["user_type"] == "bot"]
@property
def skill_history(self):
return [msg.get("skill_name", "") for msg in self.state["messages"] if msg["user_type"] == "bot"]
@property
def messages(self):
return self.state["messages"]
@property
def human_messages(self):
return [msg for msg in self.state["messages"] if msg["user_type"] == "human"]
@property
def METHOD_NAME(self):
return [msg for msg in self.state["messages"] if msg["user_type"] == "bot"]
@property
def hypotheses(self):
return self.state["hypotheses"]
@property
def intents(self):
return self.state["intents"]
@property
def current_scenario_skill(self):
return self.state["policy_state"]["current_scenario_skill"]
@property
def interrupted_scenario_stack(self):
return self.state["policy_state"]["interrupted_scenario_stack"]
@property
def st2_policy(self):
return self.state["policy_state"]["st2"]
|
966 |
load network dict
|
import datetime
import re
from dateutil import tz
from sickchill.helper.common import try_int
from .. import logger
from . import db, helpers
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(?P<hour>\d{1,2})(?:[:.](?P<minute>\d{2})?)? ?(?P<meridiem>[PA]\.? ?M?)?\b", re.I)
network_dict = {}
sb_timezone = tz.tzlocal()
def update_network_dict():
"""Update timezone information from SC repositories"""
url = "https://sickchill.github.io/sb_network_timezones/network_timezones.txt"
data = helpers.getURL(url, session=helpers.make_session(), returns="text")
if not data:
logger.warning(f"Updating network timezones failed, this can happen from time to time. URL: {url}")
METHOD_NAME()
return
d = {}
try:
for line in data.splitlines():
(key, val) = line.strip().rsplit(":", 1)
if not val: # n Lets set a default
val = "US/Eastern"
if key and val:
d[key.lower()] = val
except (IOError, OSError):
raise
if not d:
logger.warning("Parsing network timezones failed, not going to touch the db")
METHOD_NAME()
return
cache_db_con = db.DBConnection("cache.db")
network_list = dict(cache_db_con.select("SELECT * FROM network_timezones;"))
queries = []
for network, timezone in d.items():
existing = network in network_list
if not existing:
queries.append(["INSERT OR IGNORE INTO network_timezones VALUES (?,?);", [network, timezone]])
elif network_list[network] != timezone:
queries.append(["UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;", [timezone, network]])
if existing:
del network_list[network]
for network in network_list:
queries.append(["DELETE FROM network_timezones WHERE network_name = ?;", [network]])
if queries:
cache_db_con.mass_action(queries)
METHOD_NAME()
def METHOD_NAME():
"""
Load network timezones from db into dict network_dict (global dict)
"""
try:
cache_db_con = db.DBConnection("cache.db")
cur_network_list = cache_db_con.select("SELECT * FROM network_timezones;")
if not cur_network_list:
update_network_dict()
cur_network_list = cache_db_con.select("SELECT * FROM network_timezones;")
network_dict.clear()
network_dict.update(dict(cur_network_list))
except Exception:
pass
def get_network_timezone(network):
"""
Get the timezone of a network, or return sb_timezone
:param network: network to look up
:return: network timezone if found, or sb_timezone
"""
if network:
network = network.lower()
network_tz_name = network_dict.get(network)
try:
network_tz = (tz.gettz(network_tz_name) or sb_timezone) if network_tz_name else sb_timezone
except Exception:
return sb_timezone
return network_tz
def parse_date_time(d, t, network):
"""
Parse date and time string into local time
:param d: date string
:param t: time string
:param network: network to use as base
:return: datetime object containing local time
"""
if not network_dict:
METHOD_NAME()
parsed_time = time_regex.search(t)
network_tz = get_network_timezone(network)
hr = 0
m = 0
if parsed_time:
hr = try_int(parsed_time.group("hour"))
m = try_int(parsed_time.group("minute"))
ap = parsed_time.group("meridiem")
ap = ap[0].lower() if ap else ""
if ap == "a" and hr == 12:
hr -= 12
elif ap == "p" and hr != 12:
hr += 12
hr = hr if 0 <= hr <= 23 else 0
m = m if 0 <= m <= 59 else 0
result = datetime.datetime.fromordinal(max(try_int(d), 1))
return result.replace(hour=hr, minute=m, tzinfo=network_tz)
def test_timeformat(time_string):
return time_regex.search(time_string) is not None
|
967 |
test export attendees csv
|
import unittest
from datetime import datetime
from app.api.helpers.csv_jobs_util import *
from app.models import db
from tests.all.integration.auth_helper import create_user
from tests.all.integration.utils import OpenEventTestCase
from tests.factories import common
from tests.factories.attendee import AttendeeFactory
from tests.factories.custom_form import CustomFormFactory
from tests.factories.order import OrderFactory
from tests.factories.session import SessionSubFactory
from tests.factories.speaker import SpeakerFactory
from app.models.custom_form import ATTENDEE_CUSTOM_FORM
class TestExportCSV(OpenEventTestCase):
def test_export_orders_csv(self):
"""Method to check the orders data export"""
with self.app.test_request_context():
test_order = OrderFactory(created_at=datetime.now())
test_order.amount = 2
field_data = export_orders_csv([test_order])
assert field_data[1][2] == 'initializing'
assert field_data[1][5] == '2'
def METHOD_NAME(self):
"""Method to check the attendees data export"""
with self.app.test_request_context():
test_attendee = AttendeeFactory()
test_order = OrderFactory(created_at=datetime.now())
test_attendee.order = test_order
custom_forms = CustomFormFactory()
field_data = export_attendees_csv(
[test_attendee], [custom_forms], ATTENDEE_CUSTOM_FORM)
# new export_attendees_csv will return list of dictionary for csv_writer
assert field_data[0].get("Tax ID") == "tax id"
def _test_export_session_csv(self, test_session=None):
with self.app.test_request_context():
if not test_session:
test_session = SessionSubFactory()
field_data = export_sessions_csv([test_session])
session_row = field_data[1]
assert session_row[0] == 'example (accepted)'
assert session_row[12] == 'accepted'
def test_export_sessions_csv(self):
"""Method to check sessions data export"""
with self.app.test_request_context():
self._test_export_session_csv()
def test_export_sessions_none_csv(self):
"""Method to check sessions data export with no abstract"""
with self.app.test_request_context():
test_session = SessionSubFactory()
test_session.long_abstract = None
test_session.level = None
self._test_export_session_csv(test_session)
def test_export_sessions_with_details_csv(self):
"""Method to check that sessions details are correct"""
with self.app.test_request_context():
test_session = SessionSubFactory(
short_abstract='short_abstract',
long_abstract='long_abstract',
comments='comment',
level='level',
created_at=common.date_,
average_rating=common.average_rating_,
rating_count=common.rating_count_,
)
db.session.commit()
field_data = export_sessions_csv([test_session])
session_row = field_data[1]
assert session_row == [
'example (accepted)',
test_session.starts_at.astimezone(
pytz.timezone(test_session.event.timezone)
).strftime('%B %-d, %Y %H:%M %z'),
test_session.ends_at.astimezone(
pytz.timezone(test_session.event.timezone)
).strftime('%B %-d, %Y %H:%M %z'),
'',
'',
common.string_,
'short_abstract',
'long_abstract',
'comment',
session_row[9],
'Yes',
'level',
'accepted',
'',
'',
'English',
common.url_,
common.url_,
common.url_,
common.average_rating_,
common.rating_count_,
]
def test_export_speakers_csv(self):
"""Method to check speakers data export"""
with self.app.test_request_context():
test_speaker = SpeakerFactory(
name='Mario Behling',
mobile='9004345009',
short_biography='Speaker Bio',
organisation='FOSSASIA',
position='position',
speaking_experience='1',
sponsorship_required='No',
city='Berlin',
country='Germany',
)
user = create_user(email='[email protected]', password='password')
user.id = 2
field_data = export_speakers_csv([test_speaker])
speaker_row = field_data[1]
assert speaker_row[0] == 'Mario Behling'
assert speaker_row[1] == '[email protected]'
assert speaker_row[2] == ''
assert speaker_row[3] == '9004345009'
assert speaker_row[4] == 'Speaker Bio'
assert speaker_row[5] == 'FOSSASIA'
assert speaker_row[6] == 'position'
assert speaker_row[7] == '1'
assert speaker_row[8] == 'No'
assert speaker_row[9] == 'Berlin'
assert speaker_row[10] == 'Germany'
assert speaker_row[11] == common.url_
assert speaker_row[12] == common.url_
assert speaker_row[13] == common.url_
assert speaker_row[14] == common.url_
assert speaker_row[15] == common.url_
if __name__ == '__main__':
unittest.main()
|
968 |
repository external slug
|
from __future__ import annotations
from typing import Any, Mapping, MutableMapping, Sequence
from sentry.integrations import IntegrationInstallation
from sentry.models import Organization, PullRequest, Repository
from sentry.plugins.providers import IntegrationRepositoryProvider
from sentry.services.hybrid_cloud.integration import integration_service
from sentry.services.hybrid_cloud.organization.model import RpcOrganization
from sentry.shared_integrations.exceptions import ApiError, IntegrationError
from sentry.utils.json import JSONData
WEBHOOK_EVENTS = ["push", "pull_request"]
class GitHubRepositoryProvider(IntegrationRepositoryProvider):
name = "GitHub"
repo_provider = "github"
def _validate_repo(
self, client: Any, installation: IntegrationInstallation, repo: str
) -> JSONData:
try:
repo_data = client.get_repo(repo)
except Exception as e:
raise installation.raise_error(e)
try:
# make sure installation has access to this specific repo
# use hooks endpoint since we explicitly ask for those permissions
# when installing the app (commits can be accessed for public repos)
# https://docs.github.com/en/rest/webhooks/repo-config#list-hooks
client.repo_hooks(repo)
except ApiError:
raise IntegrationError(f"You must grant Sentry access to {repo}")
return repo_data
def get_repository_data(
self, organization: Organization, config: MutableMapping[str, Any]
) -> Mapping[str, Any]:
installation = self.get_installation(config.get("installation"), organization.id)
client = installation.get_client()
repo = self._validate_repo(client, installation, config["identifier"])
config["external_id"] = str(repo["id"])
config["integration_id"] = installation.model.id
return config
def build_repository_config(
self, organization: RpcOrganization, data: Mapping[str, Any]
) -> Mapping[str, Any]:
return {
"name": data["identifier"],
"external_id": data["external_id"],
"url": "https://github.com/{}".format(data["identifier"]),
"config": {"name": data["identifier"]},
"integration_id": data["integration_id"],
}
def compare_commits(
self, repo: Repository, start_sha: str | None, end_sha: str
) -> Sequence[Mapping[str, Any]]:
def eval_commits(client: Any) -> Sequence[Mapping[str, Any]]:
# use config name because that is kept in sync via webhooks
name = repo.config["name"]
if start_sha is None:
res = client.get_last_commits(name, end_sha)
return self._format_commits(client, name, res[:20])
else:
res = client.compare_commits(name, start_sha, end_sha)
return self._format_commits(client, name, res["commits"])
integration_id = repo.integration_id
if integration_id is None:
raise NotImplementedError("GitHub apps requires an integration id to fetch commits")
integration = integration_service.get_integration(integration_id=integration_id)
installation = integration.get_installation(organization_id=repo.organization_id)
client = installation.get_client()
try:
return eval_commits(client)
except Exception as e:
installation.raise_error(e)
return []
def _format_commits(
self,
client: Any,
repo_name: str,
commit_list: JSONData,
) -> Sequence[Mapping[str, Any]]:
"""Convert GitHub commits into our internal format
For each commit in the list we have to fetch patch data, as the
compare API gives us all of the files changed in the commit
range but not which files changed in each commit. Without this
we cannot know which specific commit changed a given file.
See sentry.models.Release.set_commits
"""
return [
{
"id": c["sha"],
"repository": repo_name,
"author_email": c["commit"]["author"].get("email"),
"author_name": c["commit"]["author"].get("name"),
"message": c["commit"]["message"],
"timestamp": self.format_date(c["commit"]["author"].get("date")),
"patch_set": self._get_patchset(client, repo_name, c["sha"]),
}
for c in commit_list
]
def _get_patchset(self, client: Any, repo_name: str, sha: str) -> Sequence[Mapping[str, Any]]:
"""Get the modified files for a commit"""
commit = client.get_commit(repo_name, sha)
return self._transform_patchset(commit["files"])
def _transform_patchset(self, diff: Sequence[Mapping[str, Any]]) -> Sequence[Mapping[str, Any]]:
"""Convert the patch data from GitHub into our internal format
See sentry.models.Release.set_commits
"""
changes = []
for change in diff:
if change["status"] == "modified":
changes.append({"path": change["filename"], "type": "M"})
if change["status"] == "added":
changes.append({"path": change["filename"], "type": "A"})
if change["status"] == "removed":
changes.append({"path": change["filename"], "type": "D"})
if change["status"] == "renamed":
changes.append({"path": change["previous_filename"], "type": "D"})
changes.append({"path": change["filename"], "type": "A"})
return changes
def pull_request_url(self, repo: Repository, pull_request: PullRequest) -> str:
return f"{repo.url}/pull/{pull_request.key}"
def METHOD_NAME(self, repo: Repository) -> str:
return repo.name
|
969 |
can manage attachments
|
# This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import session
from sqlalchemy import DDL
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base import NEVER_SET, NO_VALUE
from indico.core.db import db
from indico.core.db.sqlalchemy.locations import LocationMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.modules.events.timetable.models.entries import TimetableEntry
from indico.util.iterables import materialize_iterable
from indico.util.locators import locator_property
from indico.util.string import format_repr, slugify
class SessionBlock(LocationMixin, db.Model):
__tablename__ = 'session_blocks'
__auto_table_args = (db.UniqueConstraint('id', 'session_id'), # useless but needed for the compound fkey
db.CheckConstraint("date_trunc('minute', duration) = duration", 'duration_no_seconds'),
{'schema': 'events'})
location_backref_name = 'session_blocks'
allow_relationship_preloading = True
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
session_id = db.Column(
db.Integer,
db.ForeignKey('events.sessions.id'),
index=True,
nullable=False
)
title = db.Column(
db.String,
nullable=False,
default=''
)
code = db.Column(
db.String,
nullable=False,
default=''
)
duration = db.Column(
db.Interval,
nullable=False
)
#: Persons associated with this session block
person_links = db.relationship(
'SessionBlockPersonLink',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'session_block',
lazy=True
)
)
# relationship backrefs:
# - contributions (Contribution.session_block)
# - legacy_mapping (LegacySessionBlockMapping.session_block)
# - room_reservation_links (ReservationLink.session_block)
# - session (Session.blocks)
# - timetable_entry (TimetableEntry.session_block)
# - vc_room_associations (VCRoomEventAssociation.linked_block)
@declared_attr
def contribution_count(cls):
from indico.modules.events.contributions.models.contributions import Contribution
query = (db.select([db.func.count(Contribution.id)])
.where((Contribution.session_block_id == cls.id) & ~Contribution.is_deleted)
.correlate_except(Contribution)
.scalar_subquery())
return db.column_property(query, deferred=True)
def __init__(self, **kwargs):
# explicitly initialize those relationships with None to avoid
# an extra query to check whether there is an object associated
# when assigning a new one (e.g. during cloning)
kwargs.setdefault('timetable_entry', None)
super().__init__(**kwargs)
@property
def event(self):
return self.session.event
@locator_property
def locator(self):
return dict(self.session.locator, block_id=self.id)
@property
def location_parent(self):
return self.session
def can_access(self, user, allow_admin=True):
return self.session.can_access(user, allow_admin=allow_admin)
@property
def has_note(self):
return self.session.has_note
@property
def note(self):
return self.session.note
@property
def full_title(self):
return f'{self.session.title}: {self.title}' if self.title else self.session.title
def can_manage(self, user, allow_admin=True):
return self.session.can_manage_blocks(user, allow_admin=allow_admin)
def METHOD_NAME(self, user):
return self.session.METHOD_NAME(user)
def can_edit_note(self, user):
return self.session.can_edit_note(user)
@materialize_iterable()
def get_manage_button_options(self, *, note_may_exist=False):
if self.event.is_locked:
return
if self.can_edit_note(session.user) and (note_may_exist or not self.session.has_note):
yield 'notes_edit'
if self.METHOD_NAME(session.user):
yield 'attachments_edit' # XXX for session, not block!
if self.can_manage(session.user):
yield 'session_block_edit'
if self.session.can_manage(session.user, 'coordinate'):
yield 'session_timetable_edit'
yield 'session_protection_edit'
@hybrid_property
def start_dt(self):
return self.timetable_entry.start_dt if self.timetable_entry else None
@start_dt.expression
def start_dt(cls):
return (db.select([TimetableEntry.start_dt])
.where(TimetableEntry.session_block_id == cls.id)
.as_scalar())
@hybrid_property
def end_dt(self):
return self.timetable_entry.start_dt + self.duration if self.timetable_entry else None
@end_dt.expression
def end_dt(cls):
return cls.start_dt + cls.duration
@property
def slug(self):
return slugify('b', self.id, self.session.title, self.title, maxlen=30)
def __repr__(self):
return format_repr(self, 'id', _text=self.title or None)
def log(self, *args, **kwargs):
"""Log with prefilled metadata for the session block."""
return self.event.log(*args, meta={'session_block_id': self.id}, **kwargs)
SessionBlock.register_location_events()
@listens_for(SessionBlock.duration, 'set')
def _set_duration(target, value, oldvalue, *unused):
from indico.modules.events.util import register_time_change
if oldvalue in (NEVER_SET, NO_VALUE):
return
if value != oldvalue and target.timetable_entry is not None:
register_time_change(target.timetable_entry)
@listens_for(SessionBlock.__table__, 'after_create')
def _add_timetable_consistency_trigger(target, conn, **kw):
sql = '''
CREATE CONSTRAINT TRIGGER consistent_timetable
AFTER INSERT OR UPDATE OF session_id, duration
ON {}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE events.check_timetable_consistency('session_block');
'''.format(target.fullname)
DDL(sql).execute(conn)
|
970 |
setup class
|
"""Test the codeclimate JSON formatter."""
from __future__ import annotations
import json
import pathlib
import subprocess
import sys
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import CodeclimateJSONFormatter
from ansiblelint.rules import AnsibleLintRule
class TestCodeclimateJSONFormatter:
"""Unit test for CodeclimateJSONFormatter."""
rule = AnsibleLintRule()
matches: list[MatchError] = []
formatter: CodeclimateJSONFormatter | None = None
def METHOD_NAME(self) -> None:
"""Set up few MatchError objects."""
self.rule = AnsibleLintRule()
self.rule.id = "TCF0001"
self.rule.severity = "VERY_HIGH"
self.matches = []
self.matches.append(
MatchError(
message="message",
lineno=1,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
)
self.matches.append(
MatchError(
message="message",
lineno=2,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
ignored=True,
),
)
self.formatter = CodeclimateJSONFormatter(
pathlib.Path.cwd(),
display_relative_path=True,
)
def test_format_list(self) -> None:
"""Test if the return value is a string."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
assert isinstance(self.formatter.format_result(self.matches), str)
def test_result_is_json(self) -> None:
"""Test if returned string value is a JSON."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
output = self.formatter.format_result(self.matches)
json.loads(output)
# https://github.com/ansible/ansible-navigator/issues/1490
assert "\n" not in output
def test_single_match(self) -> None:
"""Test negative case. Only lists are allowed. Otherwise a RuntimeError will be raised."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
with pytest.raises(RuntimeError):
self.formatter.format_result(self.matches[0]) # type: ignore[arg-type]
def test_result_is_list(self) -> None:
"""Test if the return JSON contains a list with a length of 2."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
assert len(result) == 2
def test_validate_codeclimate_schema(self) -> None:
"""Test if the returned JSON is a valid codeclimate report."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
single_match = result[0]
assert "type" in single_match
assert single_match["type"] == "issue"
assert "check_name" in single_match
assert "categories" in single_match
assert isinstance(single_match["categories"], list)
assert "severity" in single_match
assert single_match["severity"] == "major"
assert "description" in single_match
assert "fingerprint" in single_match
assert "location" in single_match
assert "path" in single_match["location"]
assert single_match["location"]["path"] == self.matches[0].filename
assert "lines" in single_match["location"]
assert single_match["location"]["lines"]["begin"] == self.matches[0].lineno
assert "positions" not in single_match["location"]
# check that the 2nd match is marked as 'minor' because it was created with ignored=True
assert result[1]["severity"] == "minor"
def test_validate_codeclimate_schema_with_positions(self) -> None:
"""Test if the returned JSON is a valid codeclimate report (containing 'positions' instead of 'lines')."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(
self.formatter.format_result(
[
MatchError(
message="message",
lineno=1,
column=42,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
],
),
)
assert result[0]["location"]["positions"]["begin"]["line"] == 1
assert result[0]["location"]["positions"]["begin"]["column"] == 42
assert "lines" not in result[0]["location"]
def test_code_climate_parsable_ignored() -> None:
"""Test that -p option does not alter codeclimate format."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-v",
"-p",
]
file = "examples/playbooks/empty_playbook.yml"
result = subprocess.run([*cmd, file], check=False)
result2 = subprocess.run([*cmd, "-p", file], check=False)
assert result.returncode == result2.returncode
assert result.stdout == result2.stdout
|
971 |
get journal or part of
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo CSL-JSON schema."""
from __future__ import absolute_import, print_function
import re
from invenio_formatter.filters.datetime import from_isodate
from marshmallow import Schema, fields, missing
from zenodo.modules.records.models import ObjectType
class AuthorSchema(Schema):
"""Schema for an author."""
family = fields.Method('get_family_name')
given = fields.Method('get_given_names')
def get_family_name(self, obj):
"""Get family name."""
if {'familyname', 'givennames'} <= set(obj):
return obj.get('familyname')
else:
return obj['name']
def get_given_names(self, obj):
"""Get given names."""
if {'familyname', 'givennames'} <= set(obj):
return obj.get('givennames')
return missing
class RecordSchemaCSLJSON(Schema):
"""Schema for records in CSL-JSON."""
id = fields.Str(attribute='pid.pid_value')
type = fields.Method('get_type')
title = fields.Str(attribute='metadata.title')
abstract = fields.Str(attribute='metadata.description')
author = fields.List(fields.Nested(AuthorSchema),
attribute='metadata.creators')
issued = fields.Method('get_issue_date')
language = fields.Str(attribute='metadata.language')
version = fields.Str(attribute='metadata.version')
note = fields.Str(attribute='metadata.notes')
DOI = fields.Str(attribute='metadata.doi')
ISBN = fields.Str(attribute='metadata.imprint.isbn')
ISSN = fields.Method('get_issn')
container_title = fields.Method('get_container_title')
page = fields.Method('get_pages')
volume = fields.Str(attribute='metadata.journal.volume')
issue = fields.Str(attribute='metadata.journal.issue')
publisher = fields.Method('get_publisher')
publisher_place = fields.Str(attribute='metadata.imprint.place')
event = fields.Method('get_event')
event_place = fields.Str(
attribute='metadata.meeting.place', dump_to='event-place')
# TODO: check if possible to dump in EDTF format
# event_date = fields.Str(
# attribute='metadata.meeting.dates', dump_to='event-date')
def get_event(self, obj):
"""Get event/meeting title and acronym."""
m = obj['metadata']
meeting = m.get('meeting', {})
if meeting:
title = meeting.get('title')
acronym = meeting.get('acronym')
if title and acronym:
return u'{} ({})'.format(title, acronym)
elif title or acronym:
return title or acronym
return missing
def METHOD_NAME(self, obj, key):
"""Get journal or part of."""
m = obj['metadata']
journal = m.get('journal', {}).get(key)
part_of = m.get('part_of', {}).get(key)
return journal or part_of or missing
def get_container_title(self, obj):
"""Get container title."""
return self.METHOD_NAME(obj, 'title')
def get_pages(self, obj):
"""Get pages."""
# Remove multiple dashes between page numbers (eg. 12--15)
pages = self.METHOD_NAME(obj, 'pages')
pages = re.sub('-+', '-', pages) if pages else pages
return pages
def get_publisher(self, obj):
"""Get publisher."""
m = obj['metadata']
publisher = m.get('imprint', {}).get('publisher')
if publisher:
return publisher
if m.get('doi', '').startswith('10.5281/'):
return 'Zenodo'
return missing
def get_type(self, obj):
"""Get record CSL type."""
metadata = obj['metadata']
obj_type = ObjectType.get_by_dict(metadata.get('resource_type'))
return obj_type.get('csl', 'article') if obj_type else 'article'
def get_issn(self, obj):
"""Get the record's ISSN."""
for id in obj['metadata'].get('alternate_identifiers', []):
if id['scheme'] == 'issn':
return id['identifier']
return missing
def get_issue_date(self, obj):
"""Get a date in list format."""
d = from_isodate(obj['metadata'].get('publication_date'))
return {'date-parts': [[d.year, d.month, d.day]]} if d else missing
|
972 |
post operations
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"dnc delegated-subnet-service show",
is_preview=True,
)
class Show(AAZCommand):
"""Get details about the specified dnc DelegatedSubnet Link.
:example: Get details of a subnet delegated to DNC
az dnc delegated-subnet-service show --resource-group "TestRG" --resource-name "delegated1"
"""
_aaz_info = {
"version": "2023-06-27-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.delegatednetwork/delegatedsubnets/{}", "2023-06-27-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.resource_name = AAZStrArg(
options=["-n", "--name", "--resource-name"],
help="The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-z][a-z0-9]*$",
max_length=63,
min_length=3,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.DelegatedSubnetServiceGetDetails(ctx=self.ctx)()
self.METHOD_NAME()
@register_callback
def pre_operations(self):
pass
@register_callback
def METHOD_NAME(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class DelegatedSubnetServiceGetDetails(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"resourceName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-06-27-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.allocation_block_prefix_size = AAZIntType(
serialized_name="allocationBlockPrefixSize",
)
properties.controller_details = AAZObjectType(
serialized_name="controllerDetails",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.subnet_details = AAZObjectType(
serialized_name="subnetDetails",
)
controller_details = cls._schema_on_200.properties.controller_details
controller_details.id = AAZStrType()
subnet_details = cls._schema_on_200.properties.subnet_details
subnet_details.id = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
973 |
url parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"graph-services account show",
)
class Show(AAZCommand):
"""Get an account resource given its name.
:example: Get account
az --subscription mySubscriptionGUID --resource-group myRG --resource-name myGraphAppBilling
"""
_aaz_info = {
"version": "2023-04-13",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.graphservices/accounts/{}", "2023-04-13"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of an Azure resource group in your subscription",
required=True,
)
_args_schema.resource_name = AAZStrArg(
options=["-n", "--name", "--resource-name"],
help="The name of the resource.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.AccountsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class AccountsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.GraphServices/accounts/{resourceName}",
**self.METHOD_NAME
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"resourceName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-13",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.app_id = AAZStrType(
serialized_name="appId",
flags={"required": True},
)
properties.billing_plan_id = AAZStrType(
serialized_name="billingPlanId",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
974 |
test evaluation
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.overfeat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim.nets import overfeat
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class OverFeatTest(test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = [
'overfeat/conv1', 'overfeat/pool1', 'overfeat/conv2',
'overfeat/pool2', 'overfeat/conv3', 'overfeat/conv4',
'overfeat/conv5', 'overfeat/pool5', 'overfeat/fc6', 'overfeat/fc7',
'overfeat/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.cached_session():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = [
'overfeat/conv1/weights',
'overfeat/conv1/biases',
'overfeat/conv2/weights',
'overfeat/conv2/biases',
'overfeat/conv3/weights',
'overfeat/conv3/biases',
'overfeat/conv4/weights',
'overfeat/conv4/biases',
'overfeat/conv5/weights',
'overfeat/conv5/biases',
'overfeat/fc6/weights',
'overfeat/fc6/biases',
'overfeat/fc7/weights',
'overfeat/fc7/biases',
'overfeat/fc8/weights',
'overfeat/fc8/biases',
]
model_variables = [v.op.name for v in variables_lib.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def METHOD_NAME(self):
batch_size = 2
height, width = 231, 231
num_classes = 1000
with self.cached_session():
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = math_ops.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 231, 231
with self.cached_session() as sess:
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(variables.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
test.main()
|
975 |
test get possible jobs
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import requests
from . import utils
from django.test import override_settings
from ci.tests import utils as test_utils
from client import JobGetter, BaseClient
from mock import patch
from ci.tests import DBTester
BaseClient.setup_logger()
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(DBTester.DBTester):
def create_getter(self):
self.client_info = utils.default_client_info()
getter = JobGetter.JobGetter(self.client_info)
return getter
@patch.object(requests, 'get')
def METHOD_NAME(self, mock_get):
g = self.create_getter()
# test the non error operation
job_response = {"jobs": "jobs"}
mock_get.return_value = test_utils.Response(job_response)
self.set_counts()
jobs = g.get_possible_jobs()
self.assertEqual(jobs, job_response["jobs"])
self.compare_counts()
# check when the server responds incorrectly
job_response = {"none": "none"}
mock_get.return_value = test_utils.Response(job_response)
self.set_counts()
jobs = g.get_possible_jobs()
self.compare_counts()
self.assertEqual(jobs, None)
# check when requests has bad status code
mock_get.return_value = test_utils.Response(job_response, do_raise=True)
self.set_counts()
jobs = g.get_possible_jobs()
self.compare_counts()
self.assertEqual(jobs, None)
@patch.object(requests, 'post')
def test_claim_job(self, mock_post):
g = self.create_getter()
j0 = {"config": "unknown_config", "id": 1}
j1 = {"config": g.client_info["build_configs"][0], "id": 2}
jobs = [j0, j1]
response_data = utils.create_json_response()
response_data['job_info'] = {'recipe_name': 'test'}
mock_post.return_value = test_utils.Response(response_data)
# successfull operation
self.set_counts()
ret = g.claim_job(jobs)
self.compare_counts()
self.assertEqual(response_data, ret)
# didn't succeed
response_data["success"] = False
mock_post.return_value = test_utils.Response(response_data)
self.set_counts()
ret = g.claim_job(jobs)
self.compare_counts()
self.assertEqual(ret, None)
# no jobs with matching config
jobs = [j1]
self.set_counts()
ret = g.claim_job(jobs)
self.compare_counts()
self.assertEqual(ret, None)
# try when server problems
mock_post.return_value = test_utils.Response(response_data, do_raise=True)
self.set_counts()
ret = g.claim_job(jobs)
self.compare_counts()
self.assertEqual(ret, None)
@patch.object(requests, 'get')
@patch.object(requests, 'post')
def test_find_job(self, mock_post, mock_get):
g = self.create_getter()
j0 = {"config": "unknown_config", "id": 1}
j1 = {"config": g.client_info["build_configs"][0], "id": 2}
jobs = [j0, j1]
mock_get.return_value = test_utils.Response({"jobs": jobs})
response_data = utils.create_json_response()
response_data['job_info'] = {'recipe_name': 'test'}
mock_post.return_value = test_utils.Response(response_data)
# normal operation
self.set_counts()
result = g.find_job()
self.compare_counts()
self.assertEqual(result, response_data)
# no jobs
mock_get.return_value = test_utils.Response([])
self.set_counts()
result = g.find_job()
self.compare_counts()
self.assertEqual(result, None)
|
976 |
render
|
from random import randint
import numpy as np
import voluptuous as vol
from ledfx.color import parse_color, validate_color
from ledfx.effects.audio import AudioReactiveEffect
from ledfx.effects.droplets import DROPLET_NAMES, load_droplet
class RainAudioEffect(AudioReactiveEffect):
NAME = "Rain"
CATEGORY = "Classic"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(
"mirror",
description="Mirror the effect",
default=True,
): bool,
# TODO drops should be controlled by some sort of effectlet class,
# which will provide a list of available drop names rather than just
# this static range
vol.Optional(
"lows_color",
description="color for low sounds, ie beats",
default="white",
): validate_color,
vol.Optional(
"mids_color",
description="color for mid sounds, ie vocals",
default="red",
): validate_color,
vol.Optional(
"high_color",
description="color for high sounds, ie hi hat",
default="blue",
): validate_color,
vol.Optional(
"lows_sensitivity",
description="Sensitivity to low sounds",
default=0.1,
): vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
vol.Optional(
"mids_sensitivity",
description="Sensitivity to mid sounds",
default=0.05,
): vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
vol.Optional(
"high_sensitivity",
description="Sensitivity to high sounds",
default=0.1,
): vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
vol.Optional(
"raindrop_animation",
description="Droplet animation style",
default=DROPLET_NAMES[0],
): vol.In(DROPLET_NAMES),
}
)
def on_activate(self, pixel_count):
self.drop_frames = np.zeros(self.pixel_count, dtype=int)
self.drop_colors = np.zeros((3, self.pixel_count))
def config_updated(self, config):
self.drop_animation = load_droplet(config["raindrop_animation"])
self.n_frames, self.frame_width = np.shape(self.drop_animation)
self.frame_centre_index = self.frame_width // 2
self.frame_side_lengths = self.frame_centre_index - 1
self.intensity_filter = self.create_filter(
alpha_decay=0.5, alpha_rise=0.99
)
self.filtered_intensities = np.zeros(3)
def new_drop(self, location, color):
"""
Add a new drop animation
TODO (?) this method overwrites a running drop animation in the same location
would need a significant restructure to fix
"""
self.drop_frames[location] = 1
self.drop_colors[:, location] = color
def update_drop_frames(self):
# Set any drops at final frame back to 0 and remove color data
finished_drops = self.drop_frames >= self.n_frames - 1
self.drop_frames[finished_drops] = 0
self.drop_colors[:, finished_drops] = 0
# Add one to any running frames
self.drop_frames[self.drop_frames > 0] += 1
def METHOD_NAME(self):
"""
Get colored pixel data of all drops overlaid
"""
# 2d array containing color intensity data
overlaid_frames = np.zeros((3, self.pixel_count + self.frame_width))
# Indexes of active drop animations
drop_indices = np.flatnonzero(self.drop_frames)
# TODO vectorize this to remove for loop
for index in drop_indices:
colored_frame = [
self.drop_animation[self.drop_frames[index]]
* self.drop_colors[color, index]
for color in range(3)
]
overlaid_frames[
:, index : index + self.frame_width
] += colored_frame
np.clip(overlaid_frames, 0, 255, out=overlaid_frames)
self.pixels = overlaid_frames[
:,
self.frame_side_lengths : self.frame_side_lengths
+ self.pixel_count,
].T
def audio_data_updated(self, data):
# Calculate the low, mids, and high indexes scaling based on the pixel
# count
intensities = np.fromiter(
(i.max() for i in self.melbank_thirds()), float
)
self.update_drop_frames()
if (
intensities[0] - self.filtered_intensities[0]
> self._config["lows_sensitivity"]
):
self.new_drop(
randint(0, self.pixel_count - 1),
parse_color(self._config["lows_color"]),
)
if (
intensities[1] - self.filtered_intensities[1]
> self._config["mids_sensitivity"]
):
self.new_drop(
randint(0, self.pixel_count - 1),
parse_color(self._config["mids_color"]),
)
if (
intensities[2] - self.filtered_intensities[2]
> self._config["high_sensitivity"]
):
self.new_drop(
randint(0, self.pixel_count - 1),
parse_color(self._config["high_color"]),
)
self.filtered_intensities = self.intensity_filter.update(intensities)
|
977 |
process data
|
#!/usr/bin/env python3
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Remote server monitoring script
"""
# pylint: disable=redefined-builtin, wrong-import-position, too-many-nested-blocks, broad-except
import argparse
import logging
import sys
import tempfile
import os
from gevent import monkey
from gevent import select
from gevent import socket
monkey.patch_select()
monkey.patch_socket()
from metrics_collector import start_metric_collection, stop_process, store_pid, check_is_running
from utils.process import get_process_pid_from_file, \
get_server_processes, get_server_pidfile
import configuration
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO)
TMP_DIR = tempfile.gettempdir()
METRICS_MON_SERVER_PID_FILE = os.path.join(TMP_DIR, ".metrics_monitoring_server.pid")
PID_FILE = configuration.get('server', 'pid_file', 'model_server.pid')
HOST = str(configuration.get('monitoring', 'HOST'))
PORT = int(configuration.get('monitoring', 'PORT', 9009))
SOCKET_LIST = []
RECV_BUFFER = 4096
interval = 1
def METHOD_NAME(sock):
""" process data recieved on socket"""
# receiving data from the socket.
data = sock.recv(RECV_BUFFER).decode()
if data:
if data == 'test\n':
send_message(sock, "Yep\n")
elif data == 'exit\n':
close_socket(sock)
elif data.startswith('interval'):
try:
global interval
interval = int(data.split(":")[1][:-1])
except Exception:
send_message(sock, "In-correct interval data")
elif data.startswith('metrics'):
metrics = data[:-1].split("metrics:")[1].split("\t")
server_pid = get_process_pid_from_file(get_server_pidfile(PID_FILE))
server_process = get_server_processes(server_pid)
start_metric_collection(server_process, metrics, interval, sock)
else:
# TODO - decide what to do here
pass
else:
# remove the socket that's broken
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock)
def perf_server():
""" start performance moniting server on a socket """
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
SOCKET_LIST.append(server_socket)
logger.info("Started metrics monitoring server on port %s", PORT)
while True:
ready_to_read, _, _ = select.select(SOCKET_LIST, [], [], 0)
for sock in ready_to_read:
# a new connection request recieved
if sock == server_socket:
sockfd, addr = server_socket.accept()
SOCKET_LIST.append(sockfd)
logger.info("client (%s, %s) connected", addr[0], addr[1])
# a message from a client, not a new connection
else:
try:
METHOD_NAME(sock)
except Exception as e:
logger.warning("Error %s", str(e))
continue
server_socket.close()
def send_message(socket_, message):
try:
socket_.send(message.encode("latin-1"))
except Exception as e:
logger.warning("Error while sending the message %s. Closing the socket.", str(e))
close_socket(socket_)
def close_socket(socket_):
socket_.close()
if socket_ in SOCKET_LIST:
SOCKET_LIST.remove(socket_)
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(prog='perf-mon-script', description='System Performance Monitoring')
sub_parse = parser.add_mutually_exclusive_group(required=True)
sub_parse.add_argument('--start', action='store_true', help='Start the perf-mon-script')
sub_parse.add_argument('--stop', action='store_true', help='Stop the perf-mon-script')
args = parser.parse_args()
if args.start:
check_is_running(METRICS_MON_SERVER_PID_FILE)
store_pid(METRICS_MON_SERVER_PID_FILE)
perf_server()
elif args.stop:
stop_process(METRICS_MON_SERVER_PID_FILE)
|
978 |
test weights parametrized
|
# Owner(s): ["module: unknown"]
import logging
from torch import nn
from torch.ao.pruning.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
nn.Linear(16, 16, bias=bias)
)
# Make sure the weights are not random
self.linear.weight = nn.Parameter(torch.zeros_like(self.linear.weight) + 1.0)
self.seq[0].weight = nn.Parameter(torch.zeros_like(self.seq[0].weight) + 2.0)
self.seq[1].weight = nn.Parameter(torch.zeros_like(self.seq[1].weight) + 3.0)
if bias:
self.linear = nn.Parameter(torch.zeros_like(self.linear.bias) + 10.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 20.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 30.0)
def forward(self, x):
x = self.linear(x)
x = self.seq(x)
return x
class TestFakeSparsity(TestCase):
def test_masking_logic(self):
model = nn.Linear(16, 16, bias=False)
model.weight = nn.Parameter(torch.eye(16))
x = torch.randn(3, 16)
self.assertEqual(torch.mm(x, torch.eye(16)), model(x))
mask = torch.zeros(16, 16)
sparsity = utils.FakeSparsity(mask)
parametrize.register_parametrization(model, 'weight', sparsity)
x = torch.randn(3, 16)
self.assertEqual(torch.zeros(3, 16), model(x))
def METHOD_NAME(self):
model = ModelUnderTest(bias=False)
assert not hasattr(model.linear, 'parametrizations')
assert not hasattr(model.seq[0], 'parametrizations')
assert not hasattr(model.seq[1], 'parametrizations')
mask = torch.eye(16)
parametrize.register_parametrization(model.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[1], 'weight',
utils.FakeSparsity(mask))
assert hasattr(model.linear, 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[0], 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
assert hasattr(model.seq[1], 'parametrizations')
assert parametrize.is_parametrized(model.linear, 'weight')
def test_state_dict_preserved(self):
model_save = ModelUnderTest(bias=False)
mask = torch.eye(16)
parametrize.register_parametrization(model_save.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model_save.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model_save.seq[1], 'weight',
utils.FakeSparsity(mask))
state_dict = model_save.state_dict()
model_load = ModelUnderTest(bias=False)
mask = torch.zeros(model_load.linear.weight.shape)
parametrize.register_parametrization(model_load.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.zeros(model_load.seq[0].weight.shape)
parametrize.register_parametrization(model_load.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.zeros(model_load.seq[1].weight.shape)
parametrize.register_parametrization(model_load.seq[1], 'weight',
utils.FakeSparsity(mask))
# Keep this strict, as we are not loading the 'mask'
model_load.load_state_dict(state_dict, strict=False)
# Check the parametrizations are preserved
assert hasattr(model_load.linear, 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
assert hasattr(model_load.seq[0], 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
assert hasattr(model_load.seq[1], 'parametrizations')
assert parametrize.is_parametrized(model_load.linear, 'weight')
# Check the weigths are preserved
self.assertEqual(model_save.linear.parametrizations['weight'].original,
model_load.linear.parametrizations['weight'].original)
self.assertEqual(model_save.seq[0].parametrizations['weight'].original,
model_load.seq[0].parametrizations['weight'].original)
self.assertEqual(model_save.seq[1].parametrizations['weight'].original,
model_load.seq[1].parametrizations['weight'].original)
# Check the masks are not preserved in the state_dict
# We store the state_dicts in the sparsifier, not in the model itself.
# TODO: Need to find a clean way of exporting the parametrized model
self.assertNotEqual(model_save.linear.parametrizations['weight'][0].mask,
model_load.linear.parametrizations['weight'][0].mask)
self.assertNotEqual(model_save.seq[0].parametrizations['weight'][0].mask,
model_load.seq[0].parametrizations['weight'][0].mask)
self.assertNotEqual(model_save.seq[1].parametrizations['weight'][0].mask,
model_load.seq[1].parametrizations['weight'][0].mask)
def test_jit_trace(self):
model = ModelUnderTest(bias=False)
mask = torch.eye(16)
parametrize.register_parametrization(model.linear, 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[0], 'weight',
utils.FakeSparsity(mask))
mask = torch.eye(16)
parametrize.register_parametrization(model.seq[1], 'weight',
utils.FakeSparsity(mask))
# Tracing
example_x = torch.ones(3, 16)
model_trace = torch.jit.trace_module(model, {'forward': example_x})
x = torch.randn(3, 16)
y = model(x)
y_hat = model_trace(x)
self.assertEqual(y_hat, y)
|
979 |
check log dir
|
# encoding: utf-8
"""An object for managing IPython profile directories."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import errno
from pathlib import Path
from traitlets.config.configurable import LoggingConfigurable
from ..paths import get_ipython_package_dir
from ..utils.path import expand_path, ensure_dir_exists
from traitlets import Unicode, Bool, observe
#-----------------------------------------------------------------------------
# Module errors
#-----------------------------------------------------------------------------
class ProfileDirError(Exception):
pass
#-----------------------------------------------------------------------------
# Class for managing profile directories
#-----------------------------------------------------------------------------
class ProfileDir(LoggingConfigurable):
"""An object to manage the profile directory and its resources.
The profile directory is used by all IPython applications, to manage
configuration, logging and security.
This object knows how to find, create and manage these directories. This
should be used by any code that wants to handle profiles.
"""
security_dir_name = Unicode('security')
log_dir_name = Unicode('log')
startup_dir_name = Unicode('startup')
pid_dir_name = Unicode('pid')
static_dir_name = Unicode('static')
security_dir = Unicode(u'')
log_dir = Unicode(u'')
startup_dir = Unicode(u'')
pid_dir = Unicode(u'')
static_dir = Unicode(u'')
location = Unicode(u'',
help="""Set the profile location directly. This overrides the logic used by the
`profile` option.""",
).tag(config=True)
_location_isset = Bool(False) # flag for detecting multiply set location
@observe('location')
def _location_changed(self, change):
if self._location_isset:
raise RuntimeError("Cannot set profile location more than once.")
self._location_isset = True
new = change['new']
ensure_dir_exists(new)
# ensure config files exist:
self.security_dir = os.path.join(new, self.security_dir_name)
self.log_dir = os.path.join(new, self.log_dir_name)
self.startup_dir = os.path.join(new, self.startup_dir_name)
self.pid_dir = os.path.join(new, self.pid_dir_name)
self.static_dir = os.path.join(new, self.static_dir_name)
self.check_dirs()
def _mkdir(self, path, mode=None):
"""ensure a directory exists at a given path
This is a version of os.mkdir, with the following differences:
- returns True if it created the directory, False otherwise
- ignores EEXIST, protecting against race conditions where
the dir may have been created in between the check and
the creation
- sets permissions if requested and the dir already exists
"""
if os.path.exists(path):
if mode and os.stat(path).st_mode != mode:
try:
os.chmod(path, mode)
except OSError:
self.log.warning(
"Could not set permissions on %s",
path
)
return False
try:
if mode:
os.mkdir(path, mode)
else:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
return False
else:
raise
return True
@observe('log_dir')
def METHOD_NAME(self, change=None):
self._mkdir(self.log_dir)
@observe('startup_dir')
def check_startup_dir(self, change=None):
self._mkdir(self.startup_dir)
readme = os.path.join(self.startup_dir, 'README')
if not os.path.exists(readme):
import pkgutil
with open(readme, 'wb') as f:
f.write(pkgutil.get_data(__name__, 'profile/README_STARTUP'))
@observe('security_dir')
def check_security_dir(self, change=None):
self._mkdir(self.security_dir, 0o40700)
@observe('pid_dir')
def check_pid_dir(self, change=None):
self._mkdir(self.pid_dir, 0o40700)
def check_dirs(self):
self.check_security_dir()
self.METHOD_NAME()
self.check_pid_dir()
self.check_startup_dir()
def copy_config_file(self, config_file: str, path: Path, overwrite=False) -> bool:
"""Copy a default config file into the active profile directory.
Default configuration files are kept in :mod:`IPython.core.profile`.
This function moves these from that location to the working profile
directory.
"""
dst = Path(os.path.join(self.location, config_file))
if dst.exists() and not overwrite:
return False
if path is None:
path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
assert isinstance(path, Path)
src = path / config_file
shutil.copy(src, dst)
return True
@classmethod
def create_profile_dir(cls, profile_dir, config=None):
"""Create a new profile directory given a full path.
Parameters
----------
profile_dir : str
The full path to the profile directory. If it does exist, it will
be used. If not, it will be created.
"""
return cls(location=profile_dir, config=config)
@classmethod
def create_profile_dir_by_name(cls, path, name=u'default', config=None):
"""Create a profile dir by profile name and path.
Parameters
----------
path : unicode
The path (directory) to put the profile directory in.
name : unicode
The name of the profile. The name of the profile directory will
be "profile_<profile>".
"""
if not os.path.isdir(path):
raise ProfileDirError('Directory not found: %s' % path)
profile_dir = os.path.join(path, u'profile_' + name)
return cls(location=profile_dir, config=config)
@classmethod
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwd()`` # removed for security reason.
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
@classmethod
def find_profile_dir(cls, profile_dir, config=None):
"""Find/create a profile dir and return its ProfileDir.
This will create the profile directory if it doesn't exist.
Parameters
----------
profile_dir : unicode or str
The path of the profile directory.
"""
profile_dir = expand_path(profile_dir)
if not os.path.isdir(profile_dir):
raise ProfileDirError('Profile directory not found: %s' % profile_dir)
return cls(location=profile_dir, config=config)
|
980 |
test list
|
# Copyright 2012,2014 Christoph Reiter
# 2016 Nick Boultbee
# 2019 Ruud van Asseldonk
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet.browsers.covergrid.main import CoverGrid
from senf import fsnative
from . import TestCase, run_gtk_loop
from .helper import realized
from quodlibet import config
from quodlibet.browsers.albums.prefs import DEFAULT_PATTERN_TEXT
from quodlibet.formats import AudioFile
from quodlibet.library import SongLibrary, SongLibrarian
SONGS = [
AudioFile({
"album": "one",
"artist": "piman",
"~filename": fsnative(u"/dev/null"),
}),
AudioFile({
"album": "two",
"artist": "mu",
"~filename": fsnative(u"/dev/zero"),
}),
AudioFile({
"album": "three",
"artist": "boris",
"~filename": fsnative(u"/bin/ls"),
}),
AudioFile({
"album": "three",
"artist": "boris",
"~filename": fsnative(u"/bin/ls2"),
}),
]
SONGS.sort()
class TCoverGridBrowser(TestCase):
def setUp(self):
config.init()
library = SongLibrary()
library.librarian = SongLibrarian()
CoverGrid.init(library)
for af in SONGS:
af.sanitize()
library.add(SONGS)
self.bar = CoverGrid(library)
self._id = self.bar.connect("songs-selected", self._selected)
self._id2 = self.bar.connect("songs-activated", self._activated)
with realized(self.bar):
self.bar.filter_text("")
self._wait()
self.songs = []
self.activated = False
def tearDown(self):
self.bar.disconnect(self._id)
self.bar.disconnect(self._id2)
self.bar.destroy()
del self.bar
config.quit()
def _activated(self, albumlist):
self.activated = True
def _selected(self, albumlist, songs, *args):
self.songs = songs
def _wait(self):
run_gtk_loop()
def test_activated(self):
with realized(self.bar):
view = self.bar.view
child = view.get_child_at_index(0)
child.emit('activate')
self._wait()
self.failUnless(self.activated)
def test_can_filter(self):
with realized(self.bar):
self.failUnless(self.bar.can_filter(None))
self.failUnless(self.bar.can_filter("album"))
self.failUnless(self.bar.can_filter("foobar"))
self.failIf(self.bar.can_filter("~#length"))
self.failIf(self.bar.can_filter("title"))
def test_set_text(self):
with realized(self.bar):
self.bar.filter_text("artist=piman")
self._wait()
self.failUnlessEqual(len(self.songs), 1)
self.bar.filter_text("")
self._wait()
self.failUnlessEqual(set(self.songs), set(SONGS))
def test_filter_album(self):
with realized(self.bar):
self.bar.filter_text("dsagfsag")
self._wait()
self.failUnlessEqual(len(self.songs), 0)
self.bar.filter_text("")
self._wait()
self.bar.filter("album", ["one", "three"])
self._wait()
self.failUnlessEqual(len(self.songs), 3)
def test_filter_artist(self):
with realized(self.bar):
self.bar.filter("artist", ["piman"])
self._wait()
self.failUnlessEqual(len(self.songs), 1)
self.failUnlessEqual(self.songs[0]("artist"), "piman")
def test_header(self):
self.failIf(self.bar.headers)
def METHOD_NAME(self):
albums = self.bar.list_albums()
self.failUnlessEqual(set(albums), {s.album_key for s in SONGS})
self.bar.filter_albums([SONGS[0].album_key])
self._wait()
self.failUnlessEqual({s.album_key for s in self.songs},
{SONGS[0].album_key})
def test_active_filter(self):
with realized(self.bar):
self.bar.filter("artist", ["piman"])
self._wait()
self.failUnless(self.bar.active_filter(self.songs[0]))
for s in SONGS:
if s is not self.songs[0]:
self.failIf(self.bar.active_filter(s))
def test_default_display_pattern(self):
pattern_text = self.bar.display_pattern_text
self.failUnlessEqual(pattern_text, DEFAULT_PATTERN_TEXT)
self.failUnless("<album>" in pattern_text)
|
981 |
iter exponents
|
# -*- coding: utf-8 -*-
# HeckeCharacters.py
from sage.all import gp, xmrange, Integer, pari, gcd, LCM, prod
from sage.misc.cachefunc import cached_method
from sage.groups.abelian_gps.abelian_group import AbelianGroup_class
from sage.groups.abelian_gps.abelian_group_element import AbelianGroupElement
from sage.groups.abelian_gps.dual_abelian_group import DualAbelianGroup_class, DualAbelianGroupElement
class RayClassGroup(AbelianGroup_class):
def __init__(self, number_field, mod_ideal=1, mod_archimedean=None):
if mod_archimedean is None:
mod_archimedean = [0] * len(number_field.real_places())
mod_ideal = number_field.ideal(mod_ideal)
bnf = gp(number_field.pari_bnf())
# Use PARI to compute ray class group
bnr = bnf.bnrinit([mod_ideal, mod_archimedean], 1)
invariants = bnr[5][2] # bnr.clgp.cyc
invariants = tuple(Integer(x) for x in invariants)
names = tuple("I%i" % i for i in range(len(invariants)))
generators = bnr[5][3] # bnr.gen = bnr.clgp[3]
generators = [number_field.ideal(pari(x)) for x in generators]
AbelianGroup_class.__init__(self, invariants, names)
self.__number_field = number_field
self.__bnr = bnr
self.__pari_mod = bnr[2][1]
self.__mod_ideal = mod_ideal
self.__mod_arch = mod_archimedean
self.__generators = generators
#def __call__(self, *args, **kwargs):
# return group.Group.__call__(self, *args, **kwargs)
def log(self, I):
# Use PARI to compute class of given ideal
g = self.__bnr.bnrisprincipal(I, flag=0)
return [Integer(x) for x in g]
def number_field(self):
return self.__number_field
def bnr(self):
return self.__bnr
def modulus(self):
return self.__mod_ideal
def _element_constructor_(self, *args, **kwargs):
try:
return AbelianGroupElement(args[0], self)
except Exception:
I = self.__number_field.ideal(*args, **kwargs)
return AbelianGroupElement(self, self.log(I))
@cached_method
def dual_group(self, base_ring=None):
return HeckeCharGroup(self, base_ring)
def __str__(self):
return "Ray class group of modulus %s over %s" \
% (self.modulus(), self.__number_field)
def __repr__(self):
return self.__str__()
def gen_ideals(self):
return self.__generators
def exp(self, x):
gens = self.gen_ideals()
return prod(g**e for g, e in zip(gens, x))
def lift(self, x):
return self.exp(x.exponents())
def METHOD_NAME(self):
for e in xmrange(self.invariants(), tuple):
yield e
def iter_ideals(self):
for e in self.METHOD_NAME():
yield self.exp(e)
class HeckeCharGroup(DualAbelianGroup_class):
def __init__(self, ray_class_group, base_ring):
names = tuple("chi%i" % i for i in range(ray_class_group.ngens()))
if base_ring is None:
from sage.rings.number_field.number_field import CyclotomicField
base_ring = CyclotomicField(LCM(ray_class_group.gens_orders()))
DualAbelianGroup_class.__init__(self, ray_class_group, names, base_ring)
""" ray_class_group accessible as self.group() """
def __call__(self, x):
if isinstance(x, HeckeChar) and x.parent() is self:
return x
return HeckeChar(self, x)
def __repr__(self):
return "Group of Hecke characters on %s"%self.group()
#def list(self):
# return [ HeckeChar(self, c.list()) for c in DualAbelianGroup_class.list(self) ]
def list_primitive(self):
return [chi for chi in self.list() if chi.is_primitive() ]
class HeckeChar(DualAbelianGroupElement):
def __init__(self, hecke_char_group, x):
ray_class_group = hecke_char_group.group()
if not isinstance(x, (list,tuple)) or len(x) != ray_class_group.ngens():
x = ray_class_group(x).list()
DualAbelianGroupElement.__init__(self, hecke_char_group, x)
self.__repr = None
self.__element_vector = x
#def __repr__(self):
# #return "Hecke character of index %s over %s" \
# # %(self.list(),self.parent().group())
# return str(self.list())
def number_field(self):
return self.parent().group().number_field()
def modulus(self):
return self.parent().group().modulus()
@cached_method
def conductor(self):
bnr = self.parent().group().bnr()
finite, _ = pari(bnr.bnrconductorofchar(self.list()))
return self.number_field().ideal(finite)
def is_primitive(self):
return self.conductor() == self.modulus()
def logvalue(self, x):
try:
E = self.parent().group()(x)
except Exception:
return None
E = E.exponents()
F = self.exponents()
D = self.parent().gens_orders()
r = sum( e*f/d for e,f,d in zip( E, F, D) )
if isinstance(r, (int,Integer)):
return 0
n,d = r.numerator(), r.denominator()
return n%d/d
def logvalues_on_gens(self):
F = self.exponents()
D = self.parent().gens_orders()
return tuple( f/d for f,d in zip( F, D) )
def __call__(self, x):
try:
logx = self.parent().group()(x)
except Exception:
return 0
return DualAbelianGroupElement.__call__(self,logx)
def next_character(self, only_primitive=False):
D = self.parent().gens_orders()
F = list(self.exponents())
i = len(D)-1
while True:
F[i] += 1
if F[i] == D[i]:
F[i] = 0
i -= 1
if i < 0:
return None
else:
c = HeckeChar(self.parent(), F)
if not only_primitive or c.is_primitive():
return c
def prev_character(self, only_primitive=False):
D = self.parent().gens_orders()
F = list(self.exponents())
i = len(D)-1
while True:
F[i] -= 1
if F[i] < 0:
F[i] = D[i] - 1
i -= 1
if i < 0:
return None
else:
c = HeckeChar(self.parent(), F)
if not only_primitive or c.is_primitive():
return c
def galois_orbit(self):
order = self.multiplicative_order()
return [ self.__pow__(k) for k in range(order) if gcd(k,order) == 1 ]
"""
k.<a> = NumberField(x^4+7*x^2+13)
G = RayClassGroup(k,7)
H = G.dual_group()
H(3)
H([3,1])
"""
|
982 |
properties
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A composite that tracks inputs and outputs."""
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
try:
from inspect import getfullargspec
except ImportError:
# python 2.7, we only use .arg so it's ok
from inspect import getargspec as getfullargspec
from dimod.core.composite import ComposedSampler
__all__ = ['TrackingComposite']
def tracking(f):
@wraps(f)
def _tracking(sampler, *args, **kwargs):
inpt = OrderedDict(zip(getfullargspec(f).args[1:], args)) # skip self
inpt.update(kwargs)
# we need to do this before in case they get mutated
if sampler._copy:
inpt = deepcopy(inpt)
sampleset = f(sampler, *args, **kwargs)
output = sampleset
if sampler._copy:
output = deepcopy(output)
sampler.inputs.append(inpt)
sampler.outputs.append(output)
return sampleset
return _tracking
class TrackingComposite(ComposedSampler):
"""Composite that tracks inputs and outputs for debugging and testing.
Args:
child (:obj:`dimod.Sampler`):
A dimod sampler.
copy (bool, optional, default=False):
If True, the inputs/outputs are copied (with :func:`copy.deepcopy`)
before they are stored. This is useful if the child sampler mutates
the values.
Examples:
>>> sampler = dimod.TrackingComposite(dimod.RandomSampler())
>>> sampleset = sampler.sample_ising({'a': -1}, {('a', 'b'): 1},
... num_reads=5)
>>> sampler.input
OrderedDict([('h', {'a': -1}), ('J', {('a', 'b'): 1}), ('num_reads', 5)])
>>> sampleset == sampler.output
True
If we make additional calls to the sampler, the most recent input/output
are stored in :attr:`.input` and :attr:`.output` respectively. However,
all are tracked in :attr:`.inputs` and :attr:`.outputs`.
>>> sampleset = sampler.sample_qubo({('a', 'b'): 1})
>>> sampler.input
OrderedDict([('Q', {('a', 'b'): 1})])
>>> sampler.inputs # doctest: +SKIP
[OrderedDict([('h', {'a': -1}), ('J', {('a', 'b'): 1}), ('num_reads', 5)]),
OrderedDict([('Q', {('a', 'b'): 1})])]
In the case that you want to nest the tracking composite, there are two
patterns for retrieving the data
>>> from dimod import TruncateComposite, TrackingComposite, ExactSolver
...
>>> sampler = TruncateComposite(TrackingComposite(ExactSolver()), 10)
>>> sampler.child.inputs # empty because we haven't called sample
[]
>>> intermediate_sampler = TrackingComposite(ExactSolver())
>>> sampler = TruncateComposite(intermediate_sampler, 10)
>>> intermediate_sampler.inputs
[]
"""
children = None
def __init__(self, child, copy=False):
self.children = [child]
self._inputs = []
self._outputs = []
self._copy = copy
@property
def input(self):
"""The most recent input to any sampling method."""
try:
return self.inputs[-1]
except IndexError:
pass
raise ValueError("The sample method has not been called")
@property
def inputs(self):
"""All of the inputs to any sampling methods."""
return self._inputs
@property
def output(self):
"""The most recent output of any sampling method."""
try:
return self.outputs[-1]
except IndexError:
pass
raise ValueError("The sample method has not been called")
@property
def outputs(self):
"""All of the outputs from any sampling methods."""
return self._outputs
@property
def parameters(self):
return self.child.parameters.copy()
@property
def METHOD_NAME(self):
return {'child_properties': self.child.METHOD_NAME.copy()}
def clear(self):
"""Clear all the inputs/outputs."""
# we want to use self.inputs.clear() but it's not in python2
del self.inputs[:]
del self.outputs[:]
@tracking
def sample(self, bqm, **parameters):
"""Sample from the child sampler and store the given inputs/outputs.
The binary quadratic model and any parameters are stored in
:attr:`.inputs`. The returned sample set is stored in :attr:`.outputs`.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
return self.child.sample(bqm, **parameters)
@tracking
def sample_ising(self, h, J, **parameters):
"""Sample from the child sampler and store the given inputs/outputs.
The binary quadratic model and any parameters are stored in
:attr:`.inputs`. The returned sample set is stored in :attr:`.outputs`.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
return self.child.sample_ising(h, J, **parameters)
@tracking
def sample_qubo(self, Q, **parameters):
"""Sample from the child sampler and store the given inputs/outputs.
The binary quadratic model and any parameters are stored in
:attr:`.inputs`. The returned sample set is stored in :attr:`.outputs`.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
return self.child.sample_qubo(Q, **parameters)
|
983 |
poll and read until finish
|
"""
:codeauthor: Pedro Algarvio ([email protected])
salt.utils.nb_popen
~~~~~~~~~~~~~~~~~~~
Non blocking subprocess Popen.
This functionality has been adapted to work on windows following the recipe
found on:
http://code.activestate.com/recipes/440554/
"""
import errno
import logging
import os
import select
import subprocess
import sys
import tempfile
import time
mswindows = sys.platform == "win32"
try:
import msvcrt
import pywintypes
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
except ImportError:
import fcntl
log = logging.getLogger(__name__)
class NonBlockingPopen(subprocess.Popen):
# _stdin_logger_name_ = 'salt.utils.nb_popen.STDIN.PID-{pid}'
_stdout_logger_name_ = "salt.utils.nb_popen.STDOUT.PID-{pid}"
_stderr_logger_name_ = "salt.utils.nb_popen.STDERR.PID-{pid}"
def __init__(self, *args, **kwargs):
self.stream_stds = kwargs.pop("stream_stds", False)
# Half a megabyte in memory is more than enough to start writing to
# a temporary file.
self.max_size_in_mem = kwargs.pop("max_size_in_mem", 512000)
# Let's configure the std{in, out,err} logging handler names
# self._stdin_logger_name_ = kwargs.pop(
# 'stdin_logger_name', self._stdin_logger_name_
# )
self._stdout_logger_name_ = kwargs.pop(
"stdout_logger_name", self._stdout_logger_name_
)
self._stderr_logger_name_ = kwargs.pop(
"stderr_logger_name", self._stderr_logger_name_
)
logging_command = kwargs.pop("logging_command", None)
stderr = kwargs.get("stderr", None)
super().__init__(*args, **kwargs)
# self._stdin_logger = logging.getLogger(
# self._stdin_logger_name_.format(pid=self.pid)
# )
self.stdout_buff = tempfile.SpooledTemporaryFile(self.max_size_in_mem)
self._stdout_logger = logging.getLogger(
self._stdout_logger_name_.format(pid=self.pid)
)
if stderr is subprocess.STDOUT:
self.stderr_buff = self.stdout_buff
self._stderr_logger = self._stdout_logger
else:
self.stderr_buff = tempfile.SpooledTemporaryFile(self.max_size_in_mem)
self._stderr_logger = logging.getLogger(
self._stderr_logger_name_.format(pid=self.pid)
)
log.info(
"Running command under pid %s: '%s'",
self.pid,
args if logging_command is None else logging_command,
)
def recv(self, maxsize=None):
return self._recv("stdout", maxsize)
def recv_err(self, maxsize=None):
return self._recv("stderr", maxsize)
def send_recv(self, input="", maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
# self._stdin_logger.debug(input.rstrip())
except ValueError:
return self._close("stdin")
except (pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close("stdin")
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
getattr(self, "{}_buff".format(which)).write(read)
getattr(self, "_{}_logger".format(which)).debug(read.rstrip())
if self.stream_stds:
getattr(sys, which).write(read)
if self.universal_newlines:
read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
# self._stdin_logger.debug(input.rstrip())
except OSError as why:
if why.args[0] == errno.EPIPE: # broken pipe
return self._close("stdin")
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ""
buff = conn.read(maxsize)
if not buff:
return self._close(which)
if self.universal_newlines:
buff = self._translate_newlines(buff)
getattr(self, "{}_buff".format(which)).write(buff)
getattr(self, "_{}_logger".format(which)).debug(buff.rstrip())
if self.stream_stds:
getattr(sys, which).write(buff)
return buff
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
def METHOD_NAME(self, interval=0.01):
silent_iterations = 0
while self.poll() is None:
if self.stdout is not None:
silent_iterations = 0
self.recv()
if self.stderr is not None:
silent_iterations = 0
self.recv_err()
silent_iterations += 1
if silent_iterations > 100:
silent_iterations = 0
(stdoutdata, stderrdata) = self.communicate()
if stdoutdata:
log.debug(stdoutdata)
if stderrdata:
log.error(stderrdata)
time.sleep(interval)
def communicate(self, input=None): # pylint: disable=arguments-differ
super().communicate(input)
self.stdout_buff.flush()
self.stdout_buff.seek(0)
self.stderr_buff.flush()
self.stderr_buff.seek(0)
return self.stdout_buff.read(), self.stderr_buff.read()
|
984 |
draw
|
""" BarGraph represents bar graphs with vertical bars both simple
and stacked.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
import datetime
from pylab import setp
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import (
to_timestamp,
pixelToPoint,
PrettyDateLocator,
PrettyDateFormatter,
PrettyScalarFormatter,
)
class BarGraph(PlotBase):
"""
The BarGraph class is a straightforward bar graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self, data, ax, prefs, *args, **kw):
PlotBase.__init__(self, data, ax, prefs, *args, **kw)
if "span" in self.prefs:
self.width = self.prefs["span"]
else:
self.width = 1.0
if self.gdata.key_type == "time":
# Try to guess the time bin span
nKeys = self.gdata.getNumberOfKeys()
self.width = (max(self.gdata.all_keys) - min(self.gdata.all_keys)) / (nKeys - 1)
def METHOD_NAME(self):
PlotBase.METHOD_NAME(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []
tmp_y = []
# Evaluate the bar width
width = float(self.width)
if self.gdata.key_type == "time":
# width = (1 - self.bar_graph_space) * width / 86400.0
width = width / 86400.0
offset = 0
elif self.gdata.key_type == "string":
self.bar_graph_space = 0.1
width = (1 - self.bar_graph_space) * width
offset = self.bar_graph_space / 2.0
else:
offset = 0
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num(datetime.datetime.fromtimestamp(to_timestamp(self.prefs["starttime"])))
end_plot = date2num(datetime.datetime.fromtimestamp(to_timestamp(self.prefs["endtime"])))
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
if "log_yaxis" in self.prefs:
tmp_b = [0.001] * nKeys
ymin = 0.001
else:
tmp_b = [0.0] * nKeys
ymin = 0.0
self.polygons = []
self.lines = []
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [("SimplePlot", 0.0)]
color = self.prefs.get("plot_color", "Default")
if color.find("#") != -1:
self.palette.setColor("SimplePlot", color)
else:
labels = [(color, 0.0)]
seq_b = [(self.gdata.max_num_key + width, 0.0), (self.gdata.min_num_key, 0.0)]
zorder = 0.0
dpi = self.prefs.get("dpi", 100)
for label, num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
tmp_t = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.0
tmp_x.append(offset + key)
# tmp_y.append(ymin)
tmp_y.append(0.001)
tmp_x.append(offset + key)
tmp_y.append(float(value) + tmp_b[ind])
tmp_x.append(offset + key + width)
tmp_y.append(float(value) + tmp_b[ind])
tmp_x.append(offset + key + width)
# tmp_y.append(ymin)
tmp_y.append(0.001)
tmp_t.append(float(value) + tmp_b[ind])
ind += 1
seq_t = list(zip(tmp_x, tmp_y))
seq = seq_t + seq_b
poly = Polygon(seq, facecolor=color, fill=True, linewidth=pixelToPoint(0.2, dpi), zorder=zorder)
self.ax.add_patch(poly)
self.polygons.append(poly)
tmp_b = list(tmp_t)
zorder -= 0.1
tight_bars_flag = self.prefs.get("tight_bars", False)
if tight_bars_flag:
setp(self.polygons, linewidth=0.0)
# pivots = keys
# for idx in range(len(pivots)):
# self.coords[ pivots[idx] ] = self.bars[idx]
ymax = max(tmp_b)
ymax *= 1.1
if "log_yaxis" in self.prefs:
ymin = 0.001
else:
ymin = min(min(tmp_b), 0.0)
ymin *= 1.1
xmax = max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get("ymin", ymin)
ymax = self.prefs.get("ymax", ymax)
xmin = self.prefs.get("xmin", xmin)
xmax = self.prefs.get("xmax", xmax)
self.ax.set_xlim(xmin=xmin, xmax=xmax + offset)
self.ax.set_ylim(ymin=ymin, ymax=ymax)
if self.gdata.key_type == "time":
if start_plot and end_plot:
self.ax.set_xlim(xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim(xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb(self, ax):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = sorted(smap.values())
ax.set_xticks([i + 0.5 for i in ticks])
ax.set_xticklabels([reverse_smap[i] for i in ticks])
labels = ax.get_xticklabels()
ax.grid(False)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim(xmin=xmin, xmax=len(ticks))
elif self.gdata.key_type == "time":
# ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )
dl = PrettyDateLocator()
df = PrettyDateFormatter(dl)
ax.xaxis.set_major_locator(dl)
ax.xaxis.set_major_formatter(df)
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter()
ax.yaxis.set_major_formatter(sf)
# labels = ax.get_xticklabels()
else:
return None
|
985 |
print tree
|
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) The GTG Team
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Base for all store classes."""
from gi.repository import GObject
from uuid import UUID
import logging
from lxml.etree import Element
from typing import List, Any, Dict
log = logging.getLogger(__name__)
class BaseStore(GObject.Object):
"""Base class for data stores."""
def __init__(self) -> None:
self.lookup: Dict[UUID, Any] = {}
self.data: List[Any] = []
super().__init__()
# --------------------------------------------------------------------------
# BASIC MANIPULATION
# --------------------------------------------------------------------------
def new(self) -> Any:
raise NotImplemented
def get(self, key: str) -> Any:
"""Get an item by id."""
return self.lookup[key]
def add(self, item: Any, parent_id: UUID = None) -> None:
"""Add an existing item to the store."""
if item.id in self.lookup.keys():
log.warning('Failed to add item with id %s, already added!',
item.id)
raise KeyError
if parent_id:
try:
self.lookup[parent_id].children.append(item)
item.parent = self.lookup[parent_id]
except KeyError:
log.warning(('Failed to add item with id %s to parent %s, '
'parent not found!'), item.id, parent_id)
raise
else:
self.data.append(item)
self.lookup[item.id] = item
log.debug('Added %s', item)
@GObject.Signal(name='added', arg_types=(object,))
def add_signal(self, *_):
"""Signal to emit when adding a new item."""
@GObject.Signal(name='removed', arg_types=(str,))
def remove_signal(self, *_):
"""Signal to emit when removing a new item."""
@GObject.Signal(name='parent-change', arg_types=(object, object))
def parent_change_signal(self, *_):
"""Signal to emit when an item parent changes."""
@GObject.Signal(name='parent-removed', arg_types=(object, object))
def parent_removed_signal(self, *_):
"""Signal to emit when an item's parent is removed."""
def remove(self, item_id: UUID) -> None:
"""Remove an existing item from the store."""
item = self.lookup[item_id]
parent = item.parent
for child in item.children:
del self.lookup[child.id]
if parent:
parent.children.remove(item)
del self.lookup[item_id]
else:
self.data.remove(item)
del self.lookup[item_id]
self.emit('removed', str(item_id))
# --------------------------------------------------------------------------
# PARENTING
# --------------------------------------------------------------------------
def parent(self, item_id: UUID, parent_id: UUID) -> None:
"""Add a child to an item."""
try:
item = self.lookup[item_id]
except KeyError:
raise
try:
self.data.remove(item)
self.lookup[parent_id].children.append(item)
item.parent = self.lookup[parent_id]
self.emit('parent-change', item, self.lookup[parent_id])
except KeyError:
raise
def unparent(self, item_id: UUID, parent_id: UUID) -> None:
"""Remove child item from a parent."""
for child in self.lookup[parent_id].children:
if child.id == item_id:
self.data.append(child)
self.lookup[parent_id].children.remove(child)
child.parent = None
self.emit('parent-removed',
self.lookup[item_id],
self.lookup[parent_id])
return
raise KeyError
# --------------------------------------------------------------------------
# SERIALIZING
# --------------------------------------------------------------------------
def from_xml(self, xml: Element) -> Any:
raise NotImplemented
def to_xml(self) -> Element:
raise NotImplemented
# --------------------------------------------------------------------------
# UTILITIES
# --------------------------------------------------------------------------
def count(self, root_only: bool = False) -> int:
"""Count all the items in the store."""
if root_only:
return len(self.data)
else:
return len(self.lookup)
def refresh_lookup_cache(self) -> None:
"""Refresh lookup cache."""
def add_children(nodes) -> None:
"""Recursively add children to lookup."""
for n in nodes:
self.lookup[n.id] = n
if n.children:
add_children(n.children)
self.lookup.clear()
add_children(self.data)
def print_list(self) -> None:
"""Print the entre list of items."""
print(self)
for node in self.lookup.values():
print(f'- {node}')
def METHOD_NAME(self) -> None:
"""Print the all the items as a tree."""
def recursive_print(tree: List, indent: int) -> None:
"""Inner print function. """
tab = ' ' * indent if indent > 0 else ''
for node in tree:
print(f'{tab} └ {node}')
if node.children:
recursive_print(node.children, indent + 1)
print(self)
recursive_print(self.data, 0)
|
986 |
test convert braket bell
|
# Copyright (C) 2023 qBraid
#
# This file is part of the qBraid-SDK
#
# The qBraid-SDK is free software released under the GNU General Public License v3
# or later. You can redistribute and/or modify it under the terms of the GPL v3.
# See the LICENSE file in the project root or <https://www.gnu.org/licenses/gpl-3.0.html>.
#
# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3.
"""
Unit tests for converting circuits to use contiguous qubit indexing
"""
import numpy as np
import pytest
from braket.circuits import Circuit as BKCircuit
from cirq import Circuit, LineQubit, X, Y, Z
from pytket.circuit import Circuit as TKCircuit
from qiskit import QuantumCircuit
from qiskit.qasm3 import loads
from qbraid.exceptions import ProgramTypeError
from qbraid.interface.calculate_unitary import circuits_allclose
from qbraid.interface.convert_to_contiguous import convert_to_contiguous
def test_remove_idle_qubits_qiskit():
"""Test convert_to_contigious on qiskit circuit"""
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.cx(0, 1)
contig_circuit = convert_to_contiguous(circuit)
assert contig_circuit.num_qubits == 2
def METHOD_NAME():
"""Test convert_to_contigious on bell circuit"""
circuit = BKCircuit().h(0).cnot(0, 1) # pylint: disable=no-member
h_gate = np.sqrt(1 / 2) * np.array([[1, 1], [1, -1]])
h_gate_kron = np.kron(np.eye(2), h_gate)
cnot_gate = np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
u_expected = np.einsum("ij,jk->ki", h_gate_kron, cnot_gate)
contig_circuit = convert_to_contiguous(circuit)
u_test = contig_circuit.as_unitary()
assert np.allclose(u_expected, u_test)
def test_compare_conversion_braket_cirq():
"""Test unitary equivalance after converting to contiguous qubits"""
# pylint: disable=no-member
braket_circuit = BKCircuit()
braket_circuit.x(0)
braket_circuit.y(2)
braket_circuit.z(4)
# pylint: enable=no-member
assert braket_circuit.qubit_count == 3
cirq_circuit = Circuit()
q0 = LineQubit(0)
q2 = LineQubit(2)
q4 = LineQubit(4)
cirq_circuit.append(X(q0))
cirq_circuit.append(Y(q2))
cirq_circuit.append(Z(q4))
assert len(cirq_circuit.all_qubits()) == 3
assert circuits_allclose(braket_circuit, cirq_circuit, strict_gphase=True)
braket_compat_circuit = convert_to_contiguous(braket_circuit)
assert braket_compat_circuit.qubit_count == 3
cirq_compat_circuit = convert_to_contiguous(cirq_circuit)
assert circuits_allclose(braket_compat_circuit, cirq_compat_circuit, strict_gphase=True)
cirq_expanded_circuit = convert_to_contiguous(cirq_circuit, expansion=True)
assert len(cirq_expanded_circuit.all_qubits()) == 5
def test_braket_control_modifier():
"""Test that converting braket circuits to contiguous qubits works with control modifiers"""
circuit = BKCircuit().y(target=0, control=1)
contrig_circuit = convert_to_contiguous(circuit)
assert circuit.qubit_count == contrig_circuit.qubit_count
def test_remove_blank_wires_pytket():
"""Test wires with no operations from pytket circuit"""
circuit = TKCircuit(3)
circuit.H(0)
circuit.CX(0, 1)
contig_circuit = convert_to_contiguous(circuit)
assert contig_circuit.n_qubits == 2
def test_unitary_raises():
"""Test that convert_to_contiguous raises an error when passed a non-circuit object"""
with pytest.raises(ProgramTypeError):
convert_to_contiguous(None)
def test_convert_qasm3_expansion():
"""Test that convert_to_contiguous for qasm3 string"""
qasm3_str = """
OPENQASM 3;
include "stdgates.inc";
qubit[4] q;
h q[1];
cx q[1], q[3];
"""
contig_qasm3_str = convert_to_contiguous(qasm3_str, expansion=True)
assert contig_qasm3_str == qasm3_str + """i q[0];\ni q[2];\n"""
def test_convert_qasm3():
"""Test that convert_to_contiguous for qasm3 string"""
qasm3_str = """
OPENQASM 3;
include "stdgates.inc";
qubit[4] q;
h q[1];
cx q[1], q[3];
"""
contig_qasm3_str = convert_to_contiguous(qasm3_str)
circuit_contig = loads(contig_qasm3_str)
assert circuit_contig.num_qubits == 2
|
987 |
have hardware timing
|
#!/usr/bin/env python
"""
Voltage controlled Z stage functionality.
Hazen 05/17
George 02/18 - Abstracted from mclVoltageZModule.py
"""
from PyQt5 import QtCore
import numpy as np
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.sc_hardware.baseClasses.daqModule as daqModule
import storm_control.sc_hardware.baseClasses.hardwareModule as hardwareModule
import storm_control.sc_hardware.baseClasses.lockModule as lockModule
class VoltageZFunctionality(hardwareModule.HardwareFunctionality, lockModule.ZStageFunctionalityMixin):
"""
This supports hardware timed z scans. These work by passing control of the
analog line that this functionality is using back to the DAQ at the start
of filming. A focuslock.lockModes.LockMode that uses this should *not* use
the analog line during filming. We're blocking this by checking if the
line is being used for filming before we try and set a voltage on it.
Note: This will remember the current z position at the start of the film
and return to it at the end of the film, which might not be what
we want?
FIXME: The stage will appear to stop moving during filming.
"""
zStagePosition = QtCore.pyqtSignal(float)
def __init__(self, invert_signal = False, ao_fn = None,
microns_to_volts = None, **kwds):
super().__init__(**kwds)
self.ao_fn = ao_fn
self.film_z = None
self.maximum = self.getParameter("maximum")
self.microns_to_volts = microns_to_volts
self.minimum = self.getParameter("minimum")
self.invert_signal = invert_signal
self.ao_fn.filming.connect(self.handleFilming)
self.recenter()
def restrictZPos(self,z_pos):
#Ensure that all requested z positions are within the maximum and minimum range, working in units of microns
if (z_pos < self.minimum):
z_pos = self.minimum
if (z_pos > self.maximum):
z_pos = self.maximum
return z_pos
def micronsToVolt(self,z_pos):
#Takes in units of microns, outputs in volt after restricting to max and min of range
z_pos = self.restrictZPos(z_pos)
if self.invert_signal:
z_pos = 10-(z_pos*self.microns_to_volts)
else:
z_pos = z_pos*self.microns_to_volts
return z_pos
def getDaqWaveform(self, waveform):
waveform = np.array([self.micronsToVolt(x) for x in waveform])
return daqModule.DaqWaveform(source = self.ao_fn.getSource(),
waveform = waveform)
def goAbsolute(self, z_pos, invert = False):
if self.ao_fn.amFilming():
return
self.z_position = self.restrictZPos(z_pos)
self.ao_fn.output(self.micronsToVolt(z_pos))
self.zStagePosition.emit(self.z_position)
def goRelative(self, z_delta):
z_pos = self.z_position + z_delta
self.goAbsolute(z_pos)
def handleFilming(self, filming):
# Record current z position at the start of the film.
if filming:
self.film_z = self.z_position
# Return to the current z position at the end of the film.
else:
self.goAbsolute(self.film_z)
def METHOD_NAME(self):
return True
class VoltageZ(hardwareModule.HardwareModule):
"""
This is a Z-piezo stage in analog control mode.
"""
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.configuration = module_params.get("configuration")
self.z_stage_functionality = None
def cleanUp(self, qt_settings):
if self.z_stage_functionality is not None:
self.z_stage_functionality.goAbsolute(
self.z_stage_functionality.getMinimum())
def handleResponse(self, message, response):
if message.isType("get functionality"):
self.z_stage_functionality = VoltageZFunctionality(
ao_fn = response.getData()["functionality"],
parameters = self.configuration.get("parameters"),
microns_to_volts = self.configuration.get("microns_to_volts"))
def processMessage(self, message):
if message.isType("configure1"):
self.sendMessage(halMessage.HalMessage(
m_type = "get functionality",
data = {"name" : self.configuration.get("ao_fn_name")}))
elif message.isType("get functionality"):
if (message.getData()["name"] == self.module_name):
if self.z_stage_functionality is not None:
message.addResponse(
halMessage.HalMessageResponse(source = self.module_name,
data = {"functionality" : self.z_stage_functionality}))
|
988 |
test hide nodes
|
import pytest
import networkx as nx
class TestFilterFactory:
def test_no_filter(self):
nf = nx.filters.no_filter
assert nf()
assert nf(1)
assert nf(2, 1)
def METHOD_NAME(self):
f = nx.classes.filters.hide_nodes([1, 2, 3])
assert not f(1)
assert not f(2)
assert not f(3)
assert f(4)
assert f(0)
assert f("a")
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f)
def test_show_nodes(self):
f = nx.classes.filters.show_nodes([1, 2, 3])
assert f(1)
assert f(2)
assert f(3)
assert not f(4)
assert not f(0)
assert not f("a")
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f)
def test_hide_edges(self):
factory = nx.classes.filters.hide_edges
f = factory([(1, 2), (3, 4)])
assert not f(1, 2)
assert not f(3, 4)
assert not f(4, 3)
assert f(2, 3)
assert f(0, -1)
assert f("a", "b")
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_show_edges(self):
factory = nx.classes.filters.show_edges
f = factory([(1, 2), (3, 4)])
assert f(1, 2)
assert f(3, 4)
assert f(4, 3)
assert not f(2, 3)
assert not f(0, -1)
assert not f("a", "b")
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_hide_diedges(self):
factory = nx.classes.filters.hide_diedges
f = factory([(1, 2), (3, 4)])
assert not f(1, 2)
assert not f(3, 4)
assert f(4, 3)
assert f(2, 3)
assert f(0, -1)
assert f("a", "b")
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_show_diedges(self):
factory = nx.classes.filters.show_diedges
f = factory([(1, 2), (3, 4)])
assert f(1, 2)
assert f(3, 4)
assert not f(4, 3)
assert not f(2, 3)
assert not f(0, -1)
assert not f("a", "b")
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_hide_multiedges(self):
factory = nx.classes.filters.hide_multiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert not f(1, 2, 0)
assert not f(1, 2, 1)
assert f(1, 2, 2)
assert f(3, 4, 0)
assert not f(3, 4, 1)
assert not f(4, 3, 1)
assert f(4, 3, 0)
assert f(2, 3, 0)
assert f(0, -1, 0)
assert f("a", "b", 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_show_multiedges(self):
factory = nx.classes.filters.show_multiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert f(1, 2, 0)
assert f(1, 2, 1)
assert not f(1, 2, 2)
assert not f(3, 4, 0)
assert f(3, 4, 1)
assert f(4, 3, 1)
assert not f(4, 3, 0)
assert not f(2, 3, 0)
assert not f(0, -1, 0)
assert not f("a", "b", 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_hide_multidiedges(self):
factory = nx.classes.filters.hide_multidiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert not f(1, 2, 0)
assert not f(1, 2, 1)
assert f(1, 2, 2)
assert f(3, 4, 0)
assert not f(3, 4, 1)
assert f(4, 3, 1)
assert f(4, 3, 0)
assert f(2, 3, 0)
assert f(0, -1, 0)
assert f("a", "b", 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_show_multidiedges(self):
factory = nx.classes.filters.show_multidiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert f(1, 2, 0)
assert f(1, 2, 1)
assert not f(1, 2, 2)
assert not f(3, 4, 0)
assert f(3, 4, 1)
assert not f(4, 3, 1)
assert not f(4, 3, 0)
assert not f(2, 3, 0)
assert not f(0, -1, 0)
assert not f("a", "b", 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
|
989 |
assert span
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import sys
import zipfile
from unittest import mock
import botocore.session
from moto import mock_iam, mock_lambda # pylint: disable=import-error
from pytest import mark
from opentelemetry.instrumentation.botocore import BotocoreInstrumentor
from opentelemetry.instrumentation.botocore.extensions.lmbd import (
_LambdaExtension,
)
from opentelemetry.propagate import get_global_textmap, set_global_textmap
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.mock_textmap import MockTextMapPropagator
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.span import Span
def get_as_zip_file(file_name, content):
zip_output = io.BytesIO()
with zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr(file_name, content)
zip_output.seek(0)
return zip_output.read()
def return_headers_lambda_str():
pfunc = """
def lambda_handler(event, context):
print("custom log event")
headers = event.get('headers', event.get('attributes', {}))
return headers
"""
return pfunc
class TestLambdaExtension(TestBase):
def setUp(self):
super().setUp()
BotocoreInstrumentor().instrument()
session = botocore.session.get_session()
session.set_credentials(
access_key="access-key", secret_key="secret-key"
)
self.region = "us-west-2"
self.client = session.create_client("lambda", region_name=self.region)
self.iam_client = session.create_client("iam", region_name=self.region)
def tearDown(self):
super().tearDown()
BotocoreInstrumentor().uninstrument()
def METHOD_NAME(self, operation: str) -> Span:
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertEqual(operation, span.attributes[SpanAttributes.RPC_METHOD])
self.assertEqual("Lambda", span.attributes[SpanAttributes.RPC_SERVICE])
self.assertEqual("aws-api", span.attributes[SpanAttributes.RPC_SYSTEM])
return span
def assert_invoke_span(self, function_name: str) -> Span:
span = self.METHOD_NAME("Invoke")
self.assertEqual(
"aws", span.attributes[SpanAttributes.FAAS_INVOKED_PROVIDER]
)
self.assertEqual(
self.region, span.attributes[SpanAttributes.FAAS_INVOKED_REGION]
)
self.assertEqual(
function_name, span.attributes[SpanAttributes.FAAS_INVOKED_NAME]
)
return span
@staticmethod
def _create_extension(operation: str) -> _LambdaExtension:
mock_call_context = mock.MagicMock(operation=operation, params={})
return _LambdaExtension(mock_call_context)
@mock_lambda
def test_list_functions(self):
self.client.list_functions()
self.METHOD_NAME("ListFunctions")
@mock_iam
def _create_role_and_get_arn(self) -> str:
return self.iam_client.create_role(
RoleName="my-role",
AssumeRolePolicyDocument="some policy",
Path="/my-path/",
)["Role"]["Arn"]
def _create_lambda_function(self, function_name: str, function_code: str):
role_arn = self._create_role_and_get_arn()
self.client.create_function(
FunctionName=function_name,
Runtime="python3.8",
Role=role_arn,
Handler="lambda_function.lambda_handler",
Code={
"ZipFile": get_as_zip_file("lambda_function.py", function_code)
},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
@mark.skip(reason="Docker error, unblocking builds for now.")
@mark.skipif(
sys.platform == "win32",
reason="requires docker and Github CI Windows does not have docker installed by default",
)
@mock_lambda
def test_invoke(self):
previous_propagator = get_global_textmap()
try:
set_global_textmap(MockTextMapPropagator())
function_name = "testFunction"
self._create_lambda_function(
function_name, return_headers_lambda_str()
)
# 2 spans for create IAM + create lambda
self.assertEqual(2, len(self.memory_exporter.get_finished_spans()))
self.memory_exporter.clear()
response = self.client.invoke(
Payload=json.dumps({}),
FunctionName=function_name,
InvocationType="RequestResponse",
)
span = self.assert_invoke_span(function_name)
span_context = span.get_span_context()
# # assert injected span
headers = json.loads(response["Payload"].read().decode("utf-8"))
self.assertEqual(
str(span_context.trace_id),
headers[MockTextMapPropagator.TRACE_ID_KEY],
)
self.assertEqual(
str(span_context.span_id),
headers[MockTextMapPropagator.SPAN_ID_KEY],
)
finally:
set_global_textmap(previous_propagator)
def test_invoke_parse_arn(self):
function_name = "my_func"
arns = (
f"arn:aws:lambda:{self.region}:000000000000:function:{function_name}", # full arn
f"000000000000:{function_name}", # partial arn
f"arn:aws:lambda:{self.region}:000000000000:function:{function_name}:alias", # aliased arn
)
for arn in arns:
with self.subTest(arn=arn):
extension = self._create_extension("Invoke")
extension._call_context.params["FunctionName"] = arn
attributes = {}
extension.extract_attributes(attributes)
self.assertEqual(
function_name, attributes[SpanAttributes.FAAS_INVOKED_NAME]
)
|
990 |
ensure module
|
from typing import Optional
import numpy as np
import pytest
from numcodecs.abc import Codec
from numcodecs.compat import ensure_contiguous_ndarray_like
from numcodecs.registry import get_codec, register_codec
import zarr.codecs
from zarr.core import Array
from zarr.creation import array, empty, full, ones, open_array, zeros
from zarr.hierarchy import open_group, group
from zarr.storage import DirectoryStore, MemoryStore, Store, ZipStore
class CuPyCPUCompressor(Codec): # pragma: no cover
"""CPU compressor for CuPy arrays
This compressor converts CuPy arrays host memory before compressing
the arrays using `compressor`.
Parameters
----------
compressor : numcodecs.abc.Codec
The codec to use for compression and decompression.
"""
codec_id = "cupy_cpu_compressor"
def __init__(self, compressor: Codec = None):
self.compressor = compressor
def encode(self, buf):
import cupy
buf = cupy.asnumpy(ensure_contiguous_ndarray_like(buf))
if self.compressor:
buf = self.compressor.encode(buf)
return buf
def decode(self, chunk, out=None):
import cupy
if self.compressor:
cpu_out = None if out is None else cupy.asnumpy(out)
chunk = self.compressor.decode(chunk, cpu_out)
chunk = cupy.asarray(ensure_contiguous_ndarray_like(chunk))
if out is not None:
cupy.copyto(out, chunk.view(dtype=out.dtype), casting="no")
chunk = out
return chunk
def get_config(self):
cc_config = self.compressor.get_config() if self.compressor else None
return {
"id": self.codec_id,
"compressor_config": cc_config,
}
@classmethod
def from_config(cls, config):
cc_config = config.get("compressor_config", None)
compressor = get_codec(cc_config) if cc_config else None
return cls(compressor=compressor)
register_codec(CuPyCPUCompressor)
class MyArray(np.ndarray):
"""Dummy array class to test the `meta_array` argument
Useful when CuPy isn't available.
This class also makes some of the functions from the numpy
module available.
"""
testing = np.testing
@classmethod
def arange(cls, size):
ret = cls(shape=(size,), dtype="int64")
ret[:] = range(size)
return ret
@classmethod
def empty(cls, shape):
return cls(shape=shape)
def init_compressor(compressor) -> CuPyCPUCompressor:
if compressor:
compressor = getattr(zarr.codecs, compressor)()
return CuPyCPUCompressor(compressor)
def init_store(tmp_path, store_type) -> Optional[Store]:
if store_type is DirectoryStore:
return store_type(str(tmp_path / "store"))
if store_type is MemoryStore:
return MemoryStore()
return None
def METHOD_NAME(module):
if isinstance(module, str):
return pytest.importorskip(module)
return module
param_module_and_compressor = [
(MyArray, None),
("cupy", init_compressor(None)),
("cupy", init_compressor("Zlib")),
("cupy", init_compressor("Blosc")),
]
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
@pytest.mark.parametrize("store_type", [None, DirectoryStore, MemoryStore, ZipStore])
def test_array(tmp_path, module, compressor, store_type):
xp = METHOD_NAME(module)
store = init_store(tmp_path / "from_cupy_array", store_type)
a = xp.arange(100)
z = array(a, chunks=10, compressor=compressor, store=store, meta_array=xp.empty(()))
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(a, type(z[:]))
assert isinstance(z.meta_array, type(xp.empty(())))
xp.testing.assert_array_equal(a, z[:])
# with array-like
store = init_store(tmp_path / "from_list", store_type)
a = list(range(100))
z = array(a, chunks=10, compressor=compressor, store=store, meta_array=xp.empty(()))
assert (100,) == z.shape
assert np.asarray(a).dtype == z.dtype
xp.testing.assert_array_equal(a, z[:])
# with another zarr array
store = init_store(tmp_path / "from_another_store", store_type)
z2 = array(z, compressor=compressor, store=store, meta_array=xp.empty(()))
assert z.shape == z2.shape
assert z.chunks == z2.chunks
assert z.dtype == z2.dtype
xp.testing.assert_array_equal(z[:], z2[:])
store = init_store(tmp_path / "open_array", store_type)
a = xp.arange(100)
z = open_array(
store,
shape=a.shape,
dtype=a.dtype,
chunks=10,
compressor=compressor,
meta_array=xp.empty(()),
)
z[:] = a
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(a, type(z[:]))
assert isinstance(z.meta_array, type(xp.empty(())))
xp.testing.assert_array_equal(a, z[:])
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
def test_empty(module, compressor):
xp = METHOD_NAME(module)
z = empty(
100,
chunks=10,
compressor=compressor,
meta_array=xp.empty(()),
)
assert (100,) == z.shape
assert (10,) == z.chunks
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
def test_zeros(module, compressor):
xp = METHOD_NAME(module)
z = zeros(
100,
chunks=10,
compressor=compressor,
meta_array=xp.empty(()),
)
assert (100,) == z.shape
assert (10,) == z.chunks
xp.testing.assert_array_equal(np.zeros(100), z[:])
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
def test_ones(module, compressor):
xp = METHOD_NAME(module)
z = ones(
100,
chunks=10,
compressor=compressor,
meta_array=xp.empty(()),
)
assert (100,) == z.shape
assert (10,) == z.chunks
xp.testing.assert_array_equal(np.ones(100), z[:])
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
def test_full(module, compressor):
xp = METHOD_NAME(module)
z = full(
100,
chunks=10,
fill_value=42,
dtype="i4",
compressor=compressor,
meta_array=xp.empty(()),
)
assert (100,) == z.shape
assert (10,) == z.chunks
xp.testing.assert_array_equal(np.full(100, fill_value=42, dtype="i4"), z[:])
# nan
z = full(
100,
chunks=10,
fill_value=np.nan,
dtype="f8",
compressor=compressor,
meta_array=xp.empty(()),
)
assert np.all(np.isnan(z[:]))
@pytest.mark.parametrize("group_create_function", [group, open_group])
@pytest.mark.parametrize("module, compressor", param_module_and_compressor)
@pytest.mark.parametrize("store_type", [None, DirectoryStore, MemoryStore, ZipStore])
def test_group(tmp_path, group_create_function, module, compressor, store_type):
xp = METHOD_NAME(module)
store = init_store(tmp_path, store_type)
g = group_create_function(store, meta_array=xp.empty(()))
g.ones("data", shape=(10, 11), dtype=int, compressor=compressor)
a = g["data"]
assert a.shape == (10, 11)
assert a.dtype == int
assert isinstance(a, Array)
assert isinstance(a[:], type(xp.empty(())))
assert (a[:] == 1).all()
assert isinstance(g.meta_array, type(xp.empty(())))
|
991 |
tls ca ssl context
|
import contextlib
import importlib.util
import os
import socket
import ssl
from copy import deepcopy
from hashlib import md5
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Thread
from time import sleep
from uuid import uuid4
import pytest
try:
import trustme
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
HAVE_TRUSTME = True
except ImportError: # pragma: no cover
HAVE_TRUSTME = False
from uvicorn.config import LOGGING_CONFIG
from uvicorn.importer import import_from_string
# Note: We explicitly turn the propagate on just for tests, because pytest
# caplog not able to capture no-propagate loggers.
#
# And the caplog_for_logger helper also not work on test config cases, because
# when create Config object, Config.configure_logging will remove caplog.handler.
#
# The simple solution is set propagate=True before execute tests.
#
# See also: https://github.com/pytest-dev/pytest/issues/3697
LOGGING_CONFIG["loggers"]["uvicorn"]["propagate"] = True
@pytest.fixture
def tls_certificate_authority() -> "trustme.CA":
if not HAVE_TRUSTME:
pytest.skip("trustme not installed") # pragma: no cover
return trustme.CA()
@pytest.fixture
def tls_certificate(tls_certificate_authority: "trustme.CA") -> "trustme.LeafCert":
return tls_certificate_authority.issue_cert(
"localhost",
"127.0.0.1",
"::1",
)
@pytest.fixture
def tls_ca_certificate_pem_path(tls_certificate_authority: "trustme.CA"):
with tls_certificate_authority.cert_pem.tempfile() as ca_cert_pem:
yield ca_cert_pem
@pytest.fixture
def tls_ca_certificate_private_key_path(tls_certificate_authority: "trustme.CA"):
with tls_certificate_authority.private_key_pem.tempfile() as private_key:
yield private_key
@pytest.fixture
def tls_certificate_private_key_encrypted_path(tls_certificate):
private_key = serialization.load_pem_private_key(
tls_certificate.private_key_pem.bytes(),
password=None,
backend=default_backend(),
)
encrypted_key = private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"uvicorn password for the win"),
)
with trustme.Blob(encrypted_key).tempfile() as private_encrypted_key:
yield private_encrypted_key
@pytest.fixture
def tls_certificate_private_key_path(tls_certificate: "trustme.CA"):
with tls_certificate.private_key_pem.tempfile() as private_key:
yield private_key
@pytest.fixture
def tls_certificate_key_and_chain_path(tls_certificate: "trustme.LeafCert"):
with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def tls_certificate_server_cert_path(tls_certificate: "trustme.LeafCert"):
with tls_certificate.cert_chain_pems[0].tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def METHOD_NAME(tls_certificate_authority: "trustme.CA") -> ssl.SSLContext:
ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
tls_certificate_authority.configure_trust(ssl_ctx)
return ssl_ctx
@pytest.fixture(scope="package")
def reload_directory_structure(tmp_path_factory: pytest.TempPathFactory):
"""
This fixture creates a directory structure to enable reload parameter tests
The fixture has the following structure:
root
├── [app, app_first, app_second, app_third]
│ ├── css
│ │ └── main.css
│ ├── js
│ │ └── main.js
│ ├── src
│ │ └── main.py
│ └── sub
│ └── sub.py
├── ext
│ └── ext.jpg
└── main.py
"""
root = tmp_path_factory.mktemp("reload_directory")
apps = ["app", "app_first", "app_second", "app_third"]
root_file = root / "main.py"
root_file.touch()
dotted_file = root / ".dotted"
dotted_file.touch()
dotted_dir = root / ".dotted_dir"
dotted_dir.mkdir()
dotted_dir_file = dotted_dir / "file.txt"
dotted_dir_file.touch()
for app in apps:
app_path = root / app
app_path.mkdir()
dir_files = [
("src", ["main.py"]),
("js", ["main.js"]),
("css", ["main.css"]),
("sub", ["sub.py"]),
]
for directory, files in dir_files:
directory_path = app_path / directory
directory_path.mkdir()
for file in files:
file_path = directory_path / file
file_path.touch()
ext_dir = root / "ext"
ext_dir.mkdir()
ext_file = ext_dir / "ext.jpg"
ext_file.touch()
yield root
@pytest.fixture
def anyio_backend() -> str:
return "asyncio"
@pytest.fixture(scope="function")
def logging_config() -> dict:
return deepcopy(LOGGING_CONFIG)
@pytest.fixture
def short_socket_name(tmp_path, tmp_path_factory): # pragma: py-win32
max_sock_len = 100
socket_filename = "my.sock"
identifier = f"{uuid4()}-"
identifier_len = len(identifier.encode())
tmp_dir = Path("/tmp").resolve()
os_tmp_dir = Path(os.getenv("TMPDIR", "/tmp")).resolve()
basetemp = Path(
str(tmp_path_factory.getbasetemp()),
).resolve()
hash_basetemp = md5(
str(basetemp).encode(),
).hexdigest()
def make_tmp_dir(base_dir):
return TemporaryDirectory(
dir=str(base_dir),
prefix="p-",
suffix=f"-{hash_basetemp}",
)
paths = basetemp, os_tmp_dir, tmp_dir
for _num, tmp_dir_path in enumerate(paths, 1):
with make_tmp_dir(tmp_dir_path) as tmpd:
tmpd = Path(tmpd).resolve()
sock_path = str(tmpd / socket_filename)
sock_path_len = len(sock_path.encode())
if sock_path_len <= max_sock_len:
if max_sock_len - sock_path_len >= identifier_len: # pragma: no cover
sock_path = str(tmpd / "".join((identifier, socket_filename)))
yield sock_path
return
def sleep_touch(*paths: Path):
sleep(0.1)
for p in paths:
p.touch()
@pytest.fixture
def touch_soon():
threads = []
def start(*paths: Path):
thread = Thread(target=sleep_touch, args=paths)
thread.start()
threads.append(thread)
yield start
for t in threads:
t.join()
def _unused_port(socket_type: int) -> int:
"""Find an unused localhost port from 1024-65535 and return it."""
with contextlib.closing(socket.socket(type=socket_type)) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1]
# This was copied from pytest-asyncio.
# Ref.: https://github.com/pytest-dev/pytest-asyncio/blob/25d9592286682bc6dbfbf291028ff7a9594cf283/pytest_asyncio/plugin.py#L525-L527 # noqa: E501
@pytest.fixture
def unused_tcp_port() -> int:
return _unused_port(socket.SOCK_STREAM)
@pytest.fixture(
params=[
pytest.param(
"uvicorn.protocols.websockets.wsproto_impl:WSProtocol",
marks=pytest.mark.skipif(
not importlib.util.find_spec("wsproto"), reason="wsproto not installed."
),
),
"uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol",
]
)
def ws_protocol_cls(request: pytest.FixtureRequest):
return import_from_string(request.param)
|
992 |
decline if input dtype
|
import abc
import typing as t
import torch
import torch.fx
from torch.fx._compatibility import compatibility
from .shape_prop import TensorMetadata
from .tools_common import get_node_target, CALLABLE_NODE_OPS
__all__ = ['OperatorSupportBase', 'OperatorSupport', 'create_op_support', 'chain', 'OpSupports', 'any_chain']
# fx.Node.target typename, as returned by `get_node_target()`
TargetTypeName = str
# Arguments' dtypes for a given node, see `OperatorSupport`
SupportedArgumentDTypes = t.Optional[
t.Tuple[
t.Sequence[t.Sequence[torch.dtype]],
t.Dict[str, t.Sequence[torch.dtype]],
]
]
SupportDict = t.Mapping[TargetTypeName, SupportedArgumentDTypes]
@compatibility(is_backward_compatible=False)
class OperatorSupportBase(abc.ABC):
"""Interface for determining if a fx.Node is supported by a backend"""
@abc.abstractmethod
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
raise NotImplementedError()
@compatibility(is_backward_compatible=False)
class OperatorSupport(OperatorSupportBase):
"""
`_support_dict` maps node.target typename to supported inputs dtypes.
node.target typename is retrieved using helper function `get_node_target()`
If supported inputs dtypes is None, it means any dtype is supported, else
we should see a tuple like (([dtypes], ...), {"name":[dtypes], ...}).
The first tuple ([dtypes], ...) indicates what dtypes are supported for
inputs in node.args and the second dict {"name": [dtypes], ...} indicates
what dtypes are supported for inputs in node.kwargs.
For inputs in args, if we don't want to check it, we can put None there,
e.g. (None, [torch.float]) indicates that we don't care about the type of
the first input in args. And for inputs in kwargs, if not listed, will not
be checked.
"""
_support_dict: SupportDict
def __init__(
self,
support_dict: t.Optional[SupportDict] = None
):
self._support_dict = support_dict or {}
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
"""
Args:
`submodules`: mapping from module name to the module. This can be
retrieved by calling model.named_modules().
`node`: a Fx node that we want to determine whether it's supported.
Returns:
`is_supported`: whether the arg `node` is supported.
"""
if node.op not in CALLABLE_NODE_OPS:
return True
target = get_node_target(submodules, node)
# Target not found in _support_dict meaning that we don't support this op at all
if target not in self._support_dict:
return False
# The rule for target is None meaning that we accept any dtype
if self._support_dict[target] is None:
return True
args_dtypes, kwargs_dtypes = self._support_dict[target] # type: ignore[misc]
# Check args dtypes
for i, dtypes in enumerate(args_dtypes):
if len(node.args) <= i:
break
# None indicates we don't care about the dtype of args[i]
if dtypes is None:
continue
# If arg is not a node then we don't check it
if not isinstance(node.args[i], torch.fx.Node):
continue
arg_dtype = _get_arg_dtype(node.args[i]) # type: ignore[arg-type]
if arg_dtype not in dtypes:
return False
# Check kwargs dtypes
for k, dtypes in kwargs_dtypes.items():
if k not in node.kwargs:
continue
# If arg is not a node then we don't check it
if not isinstance(node.kwargs[k], torch.fx.Node):
continue
kwarg_dtype = _get_arg_dtype(node.kwargs[k]) # type: ignore[arg-type]
if kwarg_dtype not in dtypes:
return False
return True
# ======================================================================
# Functional interfaces and utils for defining basic operator support logic
# and composing them into more complex ones
# ======================================================================
IsNodeSupported = t.Callable[[t.Mapping[str, torch.nn.Module], torch.fx.Node], bool]
@compatibility(is_backward_compatible=False)
def create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase:
"""Wraps a `IsNodeSupported` function into an `OperatorSupportBase` instance
`IsNodeSupported` has the same call signature as
`OperatorSupportBase.is_node_supported`
"""
class FunctionalOperatorSupport(OperatorSupportBase):
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
return is_node_supported(submodules, node)
return FunctionalOperatorSupport()
@compatibility(is_backward_compatible=False)
def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
"""Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
instance by evaluating each input `OperatorSupportBase` instance, and returns False if
any of it reports False.
"""
def _chain(submods, node) -> bool:
return all(
x.is_node_supported(submods, node)
for x in op_support
)
return create_op_support(_chain)
@compatibility(is_backward_compatible=False)
def any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
"""Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
instance by evaluating each input `OperatorSupportBase` instance, and returns True if
any of it reports True.
"""
def _any_chain(submods, node) -> bool:
return any(
x.is_node_supported(submods, node)
for x in op_support
)
return create_op_support(_any_chain)
@compatibility(is_backward_compatible=False)
class OpSupports:
"""A set of atomic `OperatorSupportBase` instances that can be combined together
to form more complex operator support logic.
"""
@classmethod
def METHOD_NAME(cls, dtype: torch.dtype) -> OperatorSupportBase:
"""Report a node as non-supported, if any of its arguments is of dtype"""
def _decline_if_input_dtype(
submodules: t.Mapping[str, torch.nn.Module],
node: torch.fx.Node,
) -> bool:
for arg in node.all_input_nodes:
# escape dtype check for get_attr node
if arg.op == "get_attr":
continue
arg_dtype = _get_arg_dtype(arg)
if arg_dtype == dtype:
return False
return True
return create_op_support(_decline_if_input_dtype)
@classmethod
def decline_if_node_in_names(cls, disallow_set: t.Set[str]) -> OperatorSupportBase:
"""
If a node has a name that is in the disallow set, reported it as non-supported.
"""
def _decline_if_node_in_names(
submodules: t.Mapping[str, torch.nn.Module],
node: torch.fx.Node,
) -> bool:
if node.name in disallow_set:
return False
else:
return True
return create_op_support(_decline_if_node_in_names)
def _get_arg_dtype(arg: torch.fx.Node) -> t.Any:
assert isinstance(arg, torch.fx.Node)
tensor_meta = arg.meta.get("tensor_meta") # type: ignore[union-attr]
dtype = tensor_meta.dtype if isinstance(tensor_meta, TensorMetadata) else arg.meta["type"]
return dtype
|
993 |
test tumblr
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import gettestcases
from youtube_dl.extractor import (
FacebookIE,
gen_extractors,
YoutubeIE,
)
class TestAllURLsMatching(unittest.TestCase):
def setUp(self):
self.ies = gen_extractors()
def matching_ies(self, url):
return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic']
def assertMatch(self, url, ie_list):
self.assertEqual(self.matching_ies(url), ie_list)
def test_youtube_playlist_matching(self):
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
assertTab = lambda url: self.assertMatch(url, ['youtube:tab'])
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
assertPlaylist('PL63F0C78739B09958')
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
# Top tracks
assertTab('https://www.youtube.com/playlist?list=MCUS.20142101')
def test_youtube_matching(self):
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.cleanvideosearch.com/media/action/yt/watch?videoId=8v_4O44sfjM', ['youtube'])
def test_youtube_channel_matching(self):
assertChannel = lambda url: self.assertMatch(url, ['youtube:tab'])
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM')
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
def test_youtube_user_matching(self):
self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab'])
def test_youtube_feeds(self):
self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab'])
self.assertMatch('https://www.youtube.com/feed/history', ['youtube:tab'])
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab'])
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab'])
def test_youtube_search_matching(self):
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
def test_facebook_matching(self):
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793'))
def test_no_duplicates(self):
ies = gen_extractors()
for tc in gettestcases(include_onlymatching=True):
url = tc['url']
for ie in ies:
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
else:
self.assertFalse(
ie.suitable(url),
'%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name']))
def test_keywords(self):
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
self.assertMatch(':ytsubscriptions', ['youtube:subscriptions'])
self.assertMatch(':ythistory', ['youtube:history'])
def test_vimeo_matching(self):
self.assertMatch('https://vimeo.com/channels/tributes', ['vimeo:channel'])
self.assertMatch('https://vimeo.com/channels/31259', ['vimeo:channel'])
self.assertMatch('https://vimeo.com/channels/31259/53576664', ['vimeo'])
self.assertMatch('https://vimeo.com/user7108434', ['vimeo:user'])
self.assertMatch('https://vimeo.com/user7108434/videos', ['vimeo:user'])
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
# https://github.com/ytdl-org/youtube-dl/issues/1930
def test_soundcloud_not_matching_sets(self):
self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set'])
def METHOD_NAME(self):
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr'])
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr'])
def test_pbs(self):
# https://github.com/ytdl-org/youtube-dl/issues/2350
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs'])
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs'])
def test_no_duplicated_ie_names(self):
name_accu = collections.defaultdict(list)
for ie in self.ies:
name_accu[ie.IE_NAME.lower()].append(type(ie).__name__)
for (ie_name, ie_list) in name_accu.items():
self.assertEqual(
len(ie_list), 1,
'Multiple extractors with the same IE_NAME "%s" (%s)' % (ie_name, ', '.join(ie_list)))
if __name__ == '__main__':
unittest.main()
|
994 |
add reinstall
|
import sys
import rpm
from rpm._rpm import ts as TransactionSetCore
# TODO: migrate relevant documentation from C-side
class TransactionSet(TransactionSetCore):
_probFilter = 0
def _wrapSetGet(self, attr, val):
oval = getattr(self, attr)
setattr(self, attr, val)
return oval
def setVSFlags(self, flags):
return self._wrapSetGet('_vsflags', flags)
def getVSFlags(self):
return self._vsflags
def setVfyFlags(self, flags):
return self._wrapSetGet('_vfyflags', flags)
def getVfyFlags(self):
return self._vfyflags
def getVfyLevel(self):
return self._vfylevel
def setVfyLevel(self, flags):
return self._wrapSetGet('_vfylevel', flags)
def setColor(self, color):
return self._wrapSetGet('_color', color)
def setPrefColor(self, color):
return self._wrapSetGet('_prefcolor', color)
def setFlags(self, flags):
return self._wrapSetGet('_flags', flags)
def setProbFilter(self, ignoreSet):
return self._wrapSetGet('_probFilter', ignoreSet)
def parseSpec(self, specfile):
return rpm.spec(specfile)
def getKeys(self):
keys = []
for te in self:
keys.append(te.Key())
# Backwards compatibility goo - WTH does this return a *tuple* ?!
if not keys:
return None
else:
return tuple(keys)
def _f2hdr(self, item):
if isinstance(item, str):
with open(item) as f:
header = self.hdrFromFdno(f)
elif isinstance(item, rpm.hdr):
header = item
else:
header = self.hdrFromFdno(item)
return header
def _i2hdrs(self, item):
hdrs = []
# match iterators are passed on as-is
if isinstance(item, rpm.mi):
hdrs = item
elif isinstance(item, rpm.hdr):
hdrs.append(item)
elif isinstance(item, (int, str)):
if isinstance(item, int):
dbi = rpm.RPMDBI_PACKAGES
else:
dbi = rpm.RPMDBI_LABEL
for h in self.dbMatch(dbi, item):
hdrs.append(h)
if not hdrs:
raise rpm.error("package not installed")
else:
raise TypeError("invalid type %s" % type(item))
return hdrs
def addInstall(self, item, key, how="u"):
header = self._f2hdr(item)
if how not in ['u', 'i']:
raise ValueError('how argument must be "u" or "i"')
upgrade = (how == "u")
if not TransactionSetCore.addInstall(self, header, key, upgrade):
if upgrade:
raise rpm.error("adding upgrade to transaction failed")
else:
raise rpm.error("adding install to transaction failed")
def METHOD_NAME(self, item, key):
header = self._f2hdr(item)
if not TransactionSetCore.METHOD_NAME(self, header, key):
raise rpm.error("adding reinstall to transaction failed")
def addErase(self, item):
hdrs = self._i2hdrs(item)
for h in hdrs:
if not TransactionSetCore.addErase(self, h):
raise rpm.error("adding erasure to transaction failed")
def addRestore(self, item):
hdrs = self._i2hdrs(item)
for h in hdrs:
if not TransactionSetCore.addErase(self, h):
raise rpm.error("adding restore to transaction failed")
def run(self, callback, data):
rc = TransactionSetCore.run(self, callback, data, self._probFilter)
# crazy backwards compatibility goo: None for ok, list of problems
# if transaction didn't complete and empty list if it completed
# with errors
if rc == 0:
return None
res = []
if rc > 0:
for prob in self.problems():
item = ("%s" % prob, (prob.type, prob._str, prob._num))
res.append(item)
return res
def check(self, *args, **kwds):
TransactionSetCore.check(self, *args, **kwds)
# compatibility: munge problem strings into dependency tuples of doom
res = []
for p in self.problems():
# is it anything we need to care about?
if p.type == rpm.RPMPROB_CONFLICT:
sense = rpm.RPMDEP_SENSE_CONFLICTS
elif p.type == rpm.RPMPROB_REQUIRES:
sense = rpm.RPMDEP_SENSE_REQUIRES
else:
continue
# strip arch, split to name, version, release
nevr = p.altNEVR.rsplit('.', 1)[0]
n, v, r = nevr.rsplit('-', 2)
# extract the dependency information
needs = p._str.split()
needname = needs[0]
needflags = rpm.RPMSENSE_ANY
if len(needs) == 3:
needop = needs[1]
if '<' in needop:
needflags |= rpm.RPMSENSE_LESS
if '=' in needop:
needflags |= rpm.RPMSENSE_EQUAL
if '>' in needop:
needflags |= rpm.RPMSENSE_GREATER
needver = needs[2]
else:
needver = ""
res.append(((n, v, r),
(needname, needver), needflags, p.key, sense))
return res
def hdrCheck(self, blob):
res, msg = TransactionSetCore.hdrCheck(self, blob)
# generate backwards compatibly broken exceptions
if res == rpm.RPMRC_NOKEY:
raise rpm.error("public key not available")
elif res == rpm.RPMRC_NOTTRUSTED:
raise rpm.error("public key not trusted")
elif res != rpm.RPMRC_OK:
raise rpm.error(msg)
def hdrFromFdno(self, fd):
res, h = TransactionSetCore.hdrFromFdno(self, fd)
# generate backwards compatibly broken exceptions
if res == rpm.RPMRC_NOKEY:
raise rpm.error("public key not available")
elif res == rpm.RPMRC_NOTTRUSTED:
raise rpm.error("public key not trusted")
elif res != rpm.RPMRC_OK:
raise rpm.error("error reading package header")
return h
|
995 |
test string
|
import pytest
pytestmark = pytest.mark.parameters_handler
import bipedal_locomotion_framework.bindings.parameters_handler as blf
import numpy as np
def test_bool():
handler = blf.StdParametersHandler()
handler.set_parameter_bool(name="my_bool", value=True)
assert handler.get_parameter_bool(name="my_bool") is True
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_bool")
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_bool")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_bool")
def test_int():
handler = blf.StdParametersHandler()
handler.set_parameter_int(name="my_int", value=42)
assert handler.get_parameter_int(name="my_int") == 42
with pytest.raises(ValueError):
handler.get_parameter_bool(name="my_int")
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_int")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_int")
def test_float():
handler = blf.StdParametersHandler()
handler.set_parameter_float(name="my_float", value=3.1415)
assert handler.get_parameter_float(name="my_float") == pytest.approx(3.1415)
with pytest.raises(ValueError):
handler.get_parameter_bool(name="my_float")
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_float")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_float")
def METHOD_NAME():
handler = blf.StdParametersHandler()
handler.set_parameter_string(name="my_string", value="foo")
assert handler.get_parameter_string(name="my_string") == "foo"
with pytest.raises(ValueError):
handler.get_parameter_bool(name="my_string")
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_string")
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_string")
def test_vector_bool():
handler = blf.StdParametersHandler()
handler.set_parameter_vector_bool(name="my_vector_bool",value= [True, False, True])
assert handler.get_parameter_vector_bool(name="my_vector_bool") == [True, False, True]
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_bool")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_bool")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_bool")
def test_vector_int():
handler = blf.StdParametersHandler()
handler.set_parameter_vector_int(name="my_vector_int", value=[-1, 2, 10])
assert handler.get_parameter_vector_int(name="my_vector_int") == [-1, 2, 10]
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_int")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_int")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_int")
def test_vector_float():
handler = blf.StdParametersHandler()
handler.set_parameter_vector_float(name="my_vector_float",
value=[-3.14, 2.7182, 42.0])
assert handler.get_parameter_vector_float(name="my_vector_float") == \
pytest.approx([-3.14, 2.7182, 42.0])
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_float")
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_float")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_float")
def test_vector_string():
handler = blf.StdParametersHandler()
handler.set_parameter_vector_string(name="my_vector_string",
value=["foo", "bar", "bipedal", "locomotion"])
assert handler.get_parameter_vector_string(name="my_vector_string") == \
["foo", "bar", "bipedal", "locomotion"]
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_string")
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_string")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_string")
def test_vector_mixed():
handler = blf.StdParametersHandler()
# 1. Mixed vector: store as more general type float
handler.set_parameter_vector_float(name="to_float", value=[42.0, 1, -3.14, False])
assert handler.get_parameter_vector_float(name="to_float") == \
pytest.approx([42.0, 1.0, -3.14, 0.0])
# 2. Mixed vector: store as more general type int
handler.set_parameter_vector_float(name="to_int", value=[42, 1, -3, False])
assert handler.get_parameter_vector_float(name="to_int") == \
pytest.approx([42, 1, -3, 0])
# 3. Mixed vector: store as less general type int
with pytest.raises(TypeError):
handler.set_parameter_vector_int(name="to_int_fail",
value=[42.0, 1, -3.14, False])
def test_clear():
handler = blf.StdParametersHandler()
handler.set_parameter_bool(name="my_bool1", value=False)
handler.set_parameter_bool(name="my_bool2", value=True)
handler.set_parameter_float(name="my_float", value=-42.42)
handler.set_parameter_vector_string(name="my_vector_string", value=["bar", "foo"])
handler.clear()
with pytest.raises(ValueError):
_ = handler.get_parameter_bool(name="my_bool1")
with pytest.raises(ValueError):
_ = handler.get_parameter_bool(name="my_bool2")
with pytest.raises(ValueError):
_ = handler.get_parameter_float(name="my_float")
with pytest.raises(ValueError):
_ = handler.get_parameter_vector_string(name="my_float")
|
996 |
get build fuzzers step
|
#!/usr/bin/env python3
#
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Does fuzzbench runs on Google Cloud Build."""
import logging
import os
import sys
import build_lib
import build_project
FUZZBENCH_BUILD_TYPE = 'coverage'
FUZZBENCH_PATH = '/fuzzbench'
def get_engine_project_image(fuzzing_engine, project):
"""Returns the name of an image used to build |project| with
|fuzzing_engine|."""
return f'gcr.io/oss-fuzz-base/{fuzzing_engine}/{project.name}'
def get_env(project, build):
"""Gets the environment for fuzzbench/oss-fuzz-on-demand."""
env = build_project.get_env(project.fuzzing_language, build)
env.append(f'FUZZBENCH_PATH={FUZZBENCH_PATH}')
env.append('FORCE_LOCAL=1')
env.append(f'PROJECT={project.name}')
env.append('OSS_FUZZ_ON_DEMAND=1')
env.append('OUT=/workspace/out')
env.extend([
'FUZZ_TARGET=vulnerable', f'BENCHMARK={project.name}',
'EXPERIMENT_TYPE=bug'
])
return env
def METHOD_NAME(fuzzing_engine, project, env, build):
"""Returns the build_fuzzers step to build |project| with |fuzzing_engine|,
for fuzzbench/oss-fuzz-on-demand."""
steps = []
engine_dockerfile_path = os.path.join(FUZZBENCH_PATH, 'fuzzers',
fuzzing_engine, 'builder.Dockerfile')
build_args = [
'build', '--build-arg', f'parent_image=gcr.io/oss-fuzz/{project.name}',
'--tag',
get_engine_project_image(fuzzing_engine,
project), '--file', engine_dockerfile_path,
os.path.join(FUZZBENCH_PATH, 'fuzzers')
]
engine_step = [
{
'name': 'gcr.io/cloud-builders/docker',
'args': build_args,
'volumes': [{
'name': 'fuzzbench_path',
'path': FUZZBENCH_PATH,
}],
},
]
steps.append(engine_step)
compile_project_step = {
'name':
get_engine_project_image(fuzzing_engine, project),
'env':
env,
'volumes': [{
'name': 'fuzzbench_path',
'path': FUZZBENCH_PATH,
}],
'args': [
'bash',
'-c',
# Remove /out to make sure there are non instrumented binaries.
# `cd /src && cd {workdir}` (where {workdir} is parsed from the
# Dockerfile). Container Builder overrides our workdir so we need
# to add this step to set it back.
(f'ls /fuzzbench && rm -r /out && cd /src && cd {project.workdir} && '
f'mkdir -p {build.out} && compile'),
],
}
steps.append(compile_project_step)
return steps
def get_build_steps( # pylint: disable=too-many-locals, too-many-arguments
project_name, project_yaml, dockerfile_lines, image_project,
base_images_project, config):
"""Returns build steps for project."""
del base_images_project
project = build_project.Project(project_name, project_yaml, dockerfile_lines,
image_project)
if project.disabled:
logging.info('Project "%s" is disabled.', project.name)
return []
config = build_project.Config(config.testing, None, config.repo,
config.branch, config.parallel, config.upload)
# TODO(metzman): Make this a command line argument
fuzzing_engine = 'libfuzzer'
steps = [
{
'args': [
'clone', 'https://github.com/google/fuzzbench', '--depth', '1',
FUZZBENCH_PATH
],
'name': 'gcr.io/cloud-builders/git',
'volumes': [{
'name': 'fuzzbench_path',
'path': FUZZBENCH_PATH,
}],
},
{
'name': 'gcr.io/cloud-builders/docker',
'args': ['pull', 'gcr.io/oss-fuzz-base/base-builder-fuzzbench']
},
{ # TODO(metzman): Don't overwrite base-builder
'name':
'gcr.io/cloud-builders/docker',
'args': [
'tag', 'gcr.io/oss-fuzz-base/base-builder-fuzzbench',
'gcr.io/oss-fuzz-base/base-builder'
]
},
]
steps += build_lib.get_project_image_steps(project.name,
project.image,
project.fuzzing_language,
config=config)
build = build_project.Build(fuzzing_engine, 'address', 'x86_64')
env = get_env(project, build)
steps += METHOD_NAME(fuzzing_engine, project, env, build)
run_fuzzer_step = {
'name':
get_engine_project_image(fuzzing_engine, project),
'env':
env,
'volumes': [{
'name': 'fuzzbench_path',
'path': FUZZBENCH_PATH,
}],
'args': [
'bash',
'-c',
(f'ls /fuzzbench && cd {build.out} && ls {build.out} && '
'fuzzbench_run_fuzzer'),
],
}
steps.append(run_fuzzer_step)
build = build_project.Build('coverage', 'address', 'x86_64')
env = get_env(project, build)
env.append(f'FUZZER={fuzzing_engine}')
steps += METHOD_NAME('coverage', project, env, build)
steps += [{
'args': ['fuzzbench_measure'],
'env': env,
'name': get_engine_project_image('coverage', project),
'volumes': [{
'name': 'fuzzbench_path',
'path': FUZZBENCH_PATH,
}],
}]
return steps
def main():
"""Build and run fuzzbench for OSS-Fuzz projects."""
return build_project.build_script_main('Does a FuzzBench run.',
get_build_steps, FUZZBENCH_BUILD_TYPE)
if __name__ == '__main__':
sys.exit(main())
|
997 |
label for value
|
import logging
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from .. import settings as filer_settings
from ..models import File
from ..settings import ICON_CSS_LIB
from ..utils.compatibility import truncate_words
from ..utils.model_label import get_model_label
logger = logging.getLogger(__name__)
class AdminFileWidget(ForeignKeyRawIdWidget):
choices = None
def render(self, name, value, attrs=None, renderer=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id', 'id_image_x')
related_url = None
change_url = ''
if value:
try:
file_obj = File.objects.get(pk=value)
related_url = file_obj.logical_folder.get_admin_directory_listing_url_path()
change_url = file_obj.get_admin_change_url()
except Exception as e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while rendering file widget: %s', e)
if filer_settings.FILER_DEBUG:
raise
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
params['_pick'] = 'file'
if params:
lookup_url = '?' + urlencode(sorted(params.items()))
else:
lookup_url = ''
if 'class' not in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
# rendering the super for ForeignKeyRawIdWidget on purpose here because
# we only need the input and none of the other stuff that
# ForeignKeyRawIdWidget adds
hidden_input = super(ForeignKeyRawIdWidget, self).render(name, value, attrs) # grandparent super
context = {
'hidden_input': hidden_input,
'lookup_url': '{}{}'.format(related_url, lookup_url),
'change_url': change_url,
'object': obj,
'lookup_name': name,
'id': css_id,
'admin_icon_delete': ('admin/img/icon-deletelink.svg'),
}
html = render_to_string('admin/filer/widgets/admin_file.html', context)
return mark_safe(html)
def METHOD_NAME(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
if value:
try:
# the next line may never bee reached
key = self.rel.get_related_field().name
obj = self.rel.model._default_manager.get(**{key: value})
except ObjectDoesNotExist:
obj = None
else:
obj = None
return obj
class Media:
extra = '' if settings.DEBUG else '.min'
css = {
'all': (
'filer/css/admin_filer.css',
) + ICON_CSS_LIB,
}
js = (
'admin/js/vendor/jquery/jquery%s.js' % extra,
'admin/js/jquery.init.js',
'filer/js/libs/dropzone.min.js',
'filer/js/addons/dropzone.init.js',
'filer/js/addons/popup_handling.js',
'filer/js/addons/widget.js',
)
class AdminFileFormField(forms.ModelChoiceField):
widget = AdminFileWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
kwargs.pop('widget', None)
super().__init__(queryset, widget=self.widget(rel, site), *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFileField(models.ForeignKey):
default_form_class = AdminFileFormField
default_model_class = File
def __init__(self, **kwargs):
to = kwargs.pop('to', None)
dfl = get_model_label(self.default_model_class)
if to and get_model_label(to).lower() != dfl.lower():
msg = "In {}: ForeignKey must point to {}; instead passed {}"
warnings.warn(msg.format(self.__class__.__name__, dfl, to), SyntaxWarning)
kwargs['to'] = dfl # hard-code `to` to model `filer.File`
super().__init__(**kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': self.default_form_class,
'rel': self.remote_field,
}
defaults.update(kwargs)
return super().formfield(**defaults)
|
998 |
filter
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import re
try:
from functools import lru_cache
except ImportError:
from .compat import lru_cache
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def _norm_paths(path, norm_paths, sep):
if norm_paths is None:
path = re.sub(r'\/', sep or os.sep, path) # cached internally
elif norm_paths:
path = os.path.normcase(path)
return path
def fnmatch(name, pat, norm_paths=True, case_sensitive=True, sep=None):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
:param slashes:
:param norm_paths:
A tri-state boolean:
when true, invokes `os.path,.normcase()` on both paths,
when `None`, just equalize slashes/backslashes to `os.sep`,
when false, does not touch paths at all.
Note that a side-effect of `normcase()` on *Windows* is that
it converts to lower-case all matches of `?glob()` functions.
:param case_sensitive:
defines the case-sensitiviness of regex doing the matches
:param sep:
in case only slahes replaced, what sep-char to substitute with;
if false, `os.sep` is used.
Notice that by default, `normcase()` causes insensitive matching
on *Windows*, regardless of `case_insensitive` param.
Set ``norm_paths=None, case_sensitive=False`` to preserve
verbatim mathces.
"""
name, pat = [_norm_paths(p, norm_paths, sep)
for p in (name, pat)]
return fnmatchcase(name, pat, case_sensitive=case_sensitive)
@lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat, case_sensitive):
if isinstance(pat, bytes):
pat_str = pat.decode('ISO-8859-1')
res_str = translate(pat_str)
res = res_str.encode('ISO-8859-1')
else:
res = translate(pat)
flags = 0 if case_sensitive else re.IGNORECASE
return re.compile(res, flags).match
def METHOD_NAME(names, pat, norm_paths=True, case_sensitive=True, sep=None):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = _norm_paths(pat, norm_paths, sep)
match = _compile_pattern(pat, case_sensitive)
for name in names:
m = match(_norm_paths(name, norm_paths, sep))
if m:
result.append((name,
tuple(_norm_paths(p, norm_paths, sep) for p in m.groups())))
return result
def fnmatchcase(name, pat, case_sensitive=True):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat, case_sensitive)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '(.*)'
elif c == '?':
res = res + '(.)'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s([%s])' % (res, stuff)
else:
res = res + re.escape(c)
return '(?ms)' + res + '\Z'
|
999 |
test address is valid
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from pyqrllib.pyqrllib import hstr2bin
from unittest import TestCase
from qrl.core.misc import logger
from qrl.core.AddressState import AddressState
from qrl.core.MultiSigAddressState import MultiSigAddressState
from qrl.core.OptimizedAddressState import OptimizedAddressState
from qrl.core.txs.multisig.MultiSigCreate import MultiSigCreate
from qrl.core.State import State
from tests.misc.helper import set_qrl_dir, get_bob_xmss, get_alice_xmss, get_random_xmss
logger.initialize_default()
class TestMultiSigAddressState(TestCase):
def setUp(self):
with set_qrl_dir('no_data'):
self.state = State()
def test_generate_multi_sig_address(self):
creation_tx_hash = bytes(hstr2bin("5a4c37ef7e5b7cc5a2a58ab730269ed8"
"f4cbf08a005dc3508e31465535e1d6bb"))
address = MultiSigAddressState.generate_multi_sig_address(creation_tx_hash)
expected_address = bytes(hstr2bin("1100003674370317e1cac0ca13f896ab5b6472a"
"261ba0d2b2961d3adba1b9060f6e8f7fe2088fb"))
self.assertEqual(address, expected_address)
self.assertFalse(OptimizedAddressState.address_is_valid(address))
def METHOD_NAME(self):
address = bytes(hstr2bin("110000000000000000000000000000000000000"
"000000000000000000000000000000000000000"))
self.assertFalse(OptimizedAddressState.address_is_valid(address))
def test_get_multi_sig_address_state_by_address(self):
alice_xmss = get_alice_xmss()
bob_xmss = get_bob_xmss()
random_xmss = get_random_xmss()
signatories = [alice_xmss.address, bob_xmss.address]
weights = [20, 20]
threshold = 21
multi_sig_tx = MultiSigCreate.create(signatories,
weights,
threshold,
0,
random_xmss.pk)
multi_sig_tx.sign(random_xmss)
multi_sig_address_state = MultiSigAddressState.get_default(multi_sig_tx.txhash,
signatories,
weights,
threshold)
AddressState.put_address_state(self.state, multi_sig_address_state)
multi_sig_address_state2 = MultiSigAddressState.get_multi_sig_address_state_by_address(
self.state._db,
MultiSigAddressState.generate_multi_sig_address(multi_sig_tx.txhash))
self.assertEqual(multi_sig_address_state.pbdata, multi_sig_address_state2.pbdata)
def test_put_multi_sig_addresses_state(self):
alice_xmss = get_alice_xmss()
bob_xmss = get_bob_xmss()
random_xmss = get_random_xmss()
signatories = [alice_xmss.address, bob_xmss.address]
weights = [20, 20]
threshold = 21
multi_sig_tx = MultiSigCreate.create(signatories,
weights,
threshold,
0,
random_xmss.pk)
multi_sig_tx.sign(random_xmss)
multi_sig_address_state = MultiSigAddressState.get_default(multi_sig_tx.txhash,
signatories,
weights,
threshold)
multi_sig_addresses_state = {multi_sig_address_state.address: multi_sig_address_state}
AddressState.put_addresses_state(self.state, multi_sig_addresses_state)
multi_sig_address_state2 = MultiSigAddressState.get_multi_sig_address_state_by_address(
self.state._db,
MultiSigAddressState.generate_multi_sig_address(multi_sig_tx.txhash))
self.assertEqual(multi_sig_address_state.pbdata, multi_sig_address_state2.pbdata)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.