id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
1,100 |
fileout
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
import os
import glob
import codecs
import contextlib
from datetime import datetime
from .output import out
from .util import unquote, getFilename, urlopen, strsize
from .events import getHandler
# Maximum content size for images
MaxImageBytes = 1024 * 1024 * 20 # 20 MB
# RFC 1123 format, as preferred by RFC 2616
RFC_1123_DT_STR = "%a, %d %b %Y %H:%M:%S GMT"
class ComicStrip(object):
"""A list of comic image URLs."""
def __init__(self, scraper, strip_url, image_urls, text=None):
"""Store the image URL list."""
self.scraper = scraper
self.strip_url = strip_url
self.image_urls = image_urls
self.text = text
def getImages(self):
"""Get a list of image downloaders."""
for image_url in self.image_urls:
yield self.getDownloader(image_url)
def getDownloader(self, url):
"""Get an image downloader."""
filename = self.scraper.namer(url, self.strip_url)
if filename is None:
filename = url.rsplit('/', 1)[1]
return ComicImage(self.scraper, url, self.strip_url, filename,
text=self.text)
class ComicImage(object):
"""A comic image downloader."""
ChunkBytes = 1024 * 100 # 100KB
def __init__(self, scraper, url, referrer, filename, text=None):
"""Set URL and filename."""
self.scraper = scraper
self.referrer = referrer
self.url = url
filename = getFilename(filename)
self.filename, self.ext = os.path.splitext(filename)
self.text = text
def connect(self, lastchange=None):
"""Connect to host and get meta information."""
headers = {}
if lastchange:
headers['If-Modified-Since'] = lastchange.strftime(RFC_1123_DT_STR)
self.urlobj = urlopen(self.url, self.scraper.session,
referrer=self.referrer,
max_content_bytes=MaxImageBytes, stream=True,
headers=headers)
if self.urlobj.status_code == 304: # Not modified
return
content_type = unquote(self.urlobj.headers.get(
'content-type', 'application/octet-stream'))
content_type = content_type.split(';', 1)[0]
if '/' in content_type:
maintype, subtype = content_type.split('/', 1)
else:
maintype = content_type
subtype = None
if maintype != 'image' and content_type not in (
'application/octet-stream', 'application/x-shockwave-flash'):
raise IOError('content type %r is not an image at %s' % (
content_type, self.url))
# Always use mime type for file extension if it is sane.
if maintype == 'image':
self.ext = '.' + subtype.replace('jpeg', 'jpg')
self.contentLength = int(self.urlobj.headers.get('content-length', 0))
out.debug(u'... filename = %r, ext = %r, contentLength = %d' % (
self.filename, self.ext, self.contentLength))
def save(self, basepath):
"""Save comic URL to filename on disk."""
fnbase = self._fnbase(basepath)
exist = [x for x in glob.glob(fnbase + ".*") if not x.endswith(".txt")]
out.info(u"Get image URL %s" % self.url, level=1)
if len(exist) == 1:
lastchange = os.path.getmtime(exist[0])
self.connect(datetime.utcfromtimestamp(lastchange))
if self.urlobj.status_code == 304: # Not modified
self._exist_err(exist[0])
return exist[0], False
else:
self.connect()
fn = fnbase + self.ext
# compare with >= since content length could be the compressed size
if os.path.isfile(fn) and os.path.getsize(fn) >= self.contentLength:
self._exist_err(fn)
return fn, False
out.debug(u'Writing comic to file %s...' % fn)
with self.METHOD_NAME(fn) as f:
for chunk in self.urlobj.iter_content(self.ChunkBytes):
f.write(chunk)
if self.text:
fntext = fnbase + ".txt"
out.debug(u'Writing comic text to file %s...' % fntext)
with self.METHOD_NAME(fntext, encoding='utf-8') as f:
f.write(self.text)
getHandler().comicDownloaded(self, fn)
return fn, True
@contextlib.contextmanager
def METHOD_NAME(self, filename, encoding=None):
"""Write content to given filename. Checks for zero-sized files.
If encoding is given writes to a codec.open() file."""
def getfp(filename, encoding):
"""Get open file object."""
if encoding:
return codecs.open(filename, 'w', encoding)
return open(filename, 'wb')
try:
with getfp(filename, encoding) as fp:
yield fp
size = fp.tell()
except Exception:
if os.path.isfile(filename):
os.remove(filename)
raise
else:
out.info(u"Saved %s (%s)." % (filename, strsize(size)))
def _exist_err(self, fn):
out.info(u'Skipping existing file "%s".' % fn)
def _fnbase(self, basepath):
'''Determine the target base name of this comic file and make sure the
directory exists.'''
comicdir = self.scraper.get_download_dir(basepath)
if not os.path.isdir(comicdir):
os.makedirs(comicdir)
return os.path.join(comicdir, self.filename)
|
1,101 |
test fan in metrics not called by
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests handlers defined within main.py."""
from datetime import datetime
import mock
import os
import re
import unittest
import urlparse
import webapp2
import webtest
from google.appengine.ext import testbed
from bigquery_slots_monitoring import config
from bigquery_slots_monitoring import main
class HandlersTest(unittest.TestCase):
"""Tests handlers are set as expected."""
def setUp(self):
self.app = webtest.TestApp(main.app)
@mock.patch.object(main.CopyMetrics, 'get', return_value=webapp2.Response())
def testHandlersCopyMetrics(self, mock_get):
self.app.get('/CopyMetrics')
mock_get.assert_called_once_with()
@mock.patch.object(main.FanInMetrics, 'get', return_value=webapp2.Response())
def testHandlersFanInMetrics(self, mock_get):
self.app.get('/FanInMetrics')
mock_get.assert_called_once_with()
class FanInMetricsTest(unittest.TestCase):
"""Tests FanInMetrics GET handler logic."""
def setUp(self):
self.app = webtest.TestApp(main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(
root_path=os.path.join(os.path.dirname(__file__), '../'))
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def METHOD_NAME(self):
self.app.get('/FanInMetrics', status=403)
@mock.patch.object(main.metrics, 'create_custom_metrics')
@mock.patch.object(
main.metrics, 'get_projects', return_value=['project1', 'project2'])
def testFanInMetrics_Valid(self, mock_create_metrics, mock_get_projects):
self.app.get(
'/FanInMetrics',
headers={'X-Appengine-Cron': 'Some cron'},
status=200)
mock_create_metrics.assert_called_once_with(config.BILLING_ACCOUNT)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertEqual(len(tasks), 2)
self.assertEqual(tasks[0].headers['X-AppEngine-QueueName'], 'copy-metrics')
self.assertRegexpMatches(tasks[0].name, '^project1.*')
self.assertEqual(tasks[0].method, 'GET')
url = urlparse.urlparse(tasks[0].url)
query = urlparse.parse_qs(url.query)
self.assertEqual(url.path, '/CopyMetrics')
self.assertDictEqual(
query,
{
'src_project': ['project1'],
'dst_project': [config.PROJECT_ID],
})
self.assertEqual(tasks[1].headers['X-AppEngine-QueueName'], 'copy-metrics')
self.assertRegexpMatches(tasks[1].name, '^project2.*')
self.assertEqual(tasks[1].method, 'GET')
url = urlparse.urlparse(tasks[1].url)
query = urlparse.parse_qs(url.query)
self.assertEqual(url.path, '/CopyMetrics')
self.assertDictEqual(
query,
{
'src_project': ['project2'],
'dst_project': [config.PROJECT_ID],
})
class CopyMetricsTest(unittest.TestCase):
"""Tests CopyMetrics GET handler logic."""
def setUp(self):
self.app = webtest.TestApp(main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def testCopyMetrics_NotCalledByCloudTasks(self):
self.app.get('/CopyMetrics', status=403)
def testCopyMetrics_MissingParameters(self):
self.app.get(
'/CopyMetrics',
headers={'X-AppEngine-QueueName': 'SomeQueue'},
status=400)
@mock.patch.object(main.metrics, 'copy_metrics')
def testCopyMetrics_Valid(self, mock_copy_metrics):
mocked_utcnow_value = datetime(2018, 1, 1, 12, 30, 30, 30)
main.datetime = mock.MagicMock()
main.datetime.utcnow.return_value = mocked_utcnow_value
payload = {
'src_project': 'srcProject',
'dst_project': 'dstProject'
}
self.app.get(
'/CopyMetrics',
payload,
headers={'X-AppEngine-QueueName': 'SomeQueue'},
status=200)
mock_copy_metrics.assert_called_with(
payload['src_project'], payload['dst_project'], mocked_utcnow_value)
|
1,102 |
on ready
|
# This example builds on the concepts of the app_commands/basic.py example
# It's suggested to look at that one to understand certain concepts first.
from typing import Literal, Union, NamedTuple
from enum import Enum
import discord
from discord import app_commands
MY_GUILD = discord.Object(id=0) # replace with your guild id
class MyClient(discord.Client):
def __init__(self):
super().__init__(intents=discord.Intents.default())
self.tree = app_commands.CommandTree(self)
async def setup_hook(self):
self.tree.copy_global_to(guild=MY_GUILD)
await self.tree.sync(guild=MY_GUILD)
client = MyClient()
@client.event
async def METHOD_NAME():
print(f'Logged in as {client.user} (ID: {client.user.id})')
print('------')
# A transformer is a class that specifies how a parameter in your code
# should behave both when used on Discord and when you receive it from Discord.
# There are a few built-in transformers, this example will show these along with
# creating your own for maximum flexibility.
# The first built-in transformer is app_commands.Range
# It works on `str`, `int`, and `float` options and tells you
# the maximum and minimum values (or length in the case of `str`) allowed
@client.tree.command()
@app_commands.describe(first='The first number to add', second='The second number to add')
async def add(
interaction: discord.Interaction,
# This makes it so the first parameter can only be between 0 to 100.
first: app_commands.Range[int, 0, 100],
# This makes it so the second parameter must be over 0, with no maximum limit.
second: app_commands.Range[int, 0, None],
):
"""Adds two numbers together"""
await interaction.response.send_message(f'{first} + {second} = {first + second}', ephemeral=True)
# Other transformers include regular type hints that are supported by Discord
# Examples of these include int, str, float, bool, User, Member, Role, and any channel type.
# Since there are a lot of these, for brevity only a channel example will be included.
# This command shows how to only show text and voice channels to a user using the Union type hint
# combined with the VoiceChannel and TextChannel types.
@client.tree.command(name='channel-info')
@app_commands.describe(channel='The channel to get info of')
async def channel_info(interaction: discord.Interaction, channel: Union[discord.VoiceChannel, discord.TextChannel]):
"""Shows basic channel info for a text or voice channel."""
embed = discord.Embed(title='Channel Info')
embed.add_field(name='Name', value=channel.name, inline=True)
embed.add_field(name='ID', value=channel.id, inline=True)
embed.add_field(
name='Type',
value='Voice' if isinstance(channel, discord.VoiceChannel) else 'Text',
inline=True,
)
embed.set_footer(text='Created').timestamp = channel.created_at
await interaction.response.send_message(embed=embed)
# In order to support choices, the library has a few ways of doing this.
# The first one is using a typing.Literal for basic choices.
# On Discord, this will show up as two choices, Buy and Sell.
# In the code, you will receive either 'Buy' or 'Sell' as a string.
@client.tree.command()
@app_commands.describe(action='The action to do in the shop', item='The target item')
async def shop(interaction: discord.Interaction, action: Literal['Buy', 'Sell'], item: str):
"""Interact with the shop"""
await interaction.response.send_message(f'Action: {action}\nItem: {item}')
# The second way to do choices is via an Enum from the standard library
# On Discord, this will show up as four choices: apple, banana, cherry, and dragonfruit
# In the code, you will receive the appropriate enum value.
class Fruits(Enum):
apple = 0
banana = 1
cherry = 2
dragonfruit = 3
@client.tree.command()
@app_commands.describe(fruit='The fruit to choose')
async def fruit(interaction: discord.Interaction, fruit: Fruits):
"""Choose a fruit!"""
await interaction.response.send_message(repr(fruit))
# You can also make your own transformer by inheriting from app_commands.Transformer
class Point(NamedTuple):
x: int
y: int
# The default transformer takes in a string option and you can transform
# it into any value you'd like.
#
# Transformers also support various other settings such as overriding
# properties like `choices`, `max_value`, `min_value`, `type`, or `channel_types`.
# However, this is outside of the scope of this example so check the documentation
# for more information.
class PointTransformer(app_commands.Transformer):
async def transform(self, interaction: discord.Interaction, value: str) -> Point:
(x, _, y) = value.partition(',')
return Point(x=int(x.strip()), y=int(y.strip()))
@client.tree.command()
async def graph(
interaction: discord.Interaction,
# In order to use the transformer, you should use Transform to tell the
# library to use it.
point: app_commands.Transform[Point, PointTransformer],
):
await interaction.response.send_message(str(point))
# For more basic transformers for your own types without too much repetition,
# a concept known as "inline transformers" is supported. This allows you to use
# a classmethod to have a string based transformer. It's only useful
# if you only care about transforming a string to a class and nothing else.
class Point3D(NamedTuple):
x: int
y: int
z: int
# This is the same as the above transformer except inline
@classmethod
async def transform(cls, interaction: discord.Interaction, value: str):
x, y, z = value.split(',')
return cls(x=int(x.strip()), y=int(y.strip()), z=int(z.strip()))
@client.tree.command()
async def graph3d(interaction: discord.Interaction, point: Point3D):
await interaction.response.send_message(str(point))
client.run('token')
|
1,103 |
wenner src list
|
import numpy as np
import matplotlib.pyplot as plt
from . import receivers
from . import sources
from .survey import Survey
# Import geometric_factor to make it available through
# SimPEG.resistivity.utils.geometric_factor (to ensure backward compatibility)
from ..utils import geometric_factor # noqa: F401
def METHOD_NAME(n_electrodes, a_spacing, in2D=False, plotIt=False):
"""
Source list for a Wenner Array
Parameters
----------
n_electrodes : int
Number of electrodes in the Wenner array
a_spacing : float
Wenner array spacing parameter *a*
in2D : bool, default=``False``
If ``True``, create 2D sources
plotIt : bool, default=``False``
If ``True``, plot the Wenner geometry
Returns
-------
list of SimPEG.electromagnetics.static.resistivity.sources.Dipole
List of sources and their associate receivers for the Wenner survey
"""
elocs = np.arange(0, a_spacing * n_electrodes, a_spacing)
elocs -= (n_electrodes * a_spacing - a_spacing) / 2
space = 1
WENNER = np.zeros((0,), dtype=int)
for _ in range(n_electrodes):
for jj in range(n_electrodes):
test = np.r_[jj, jj + space, jj + space * 2, jj + space * 3]
if np.any(test >= n_electrodes):
break
WENNER = np.r_[WENNER, test]
space += 1
WENNER = WENNER.reshape((-1, 4))
if plotIt:
for i, s in enumerate("rbkg"):
plt.plot(elocs[WENNER[:, i]], s + ".")
plt.show()
# Create sources and receivers
i = 0
if in2D:
def getLoc(ii, abmn):
return np.r_[elocs[WENNER[ii, abmn]], 0]
else:
def getLoc(ii, abmn):
return np.r_[elocs[WENNER[ii, abmn]], 0, 0]
source_list = []
for i in range(WENNER.shape[0]):
rx = receivers.Dipole(
getLoc(i, 1).reshape([1, -1]), getLoc(i, 2).reshape([1, -1])
)
src = sources.Dipole([rx], getLoc(i, 0), getLoc(i, 3))
source_list += [src]
return source_list
def _mini_pole_pole(survey, verbose=False):
"""Function to miniaturize a survey for use in DCSimulation.
Miniaturizes the survey into the minimum number of unique pole-pole electrode
combinations for AM, AN, BM, BN pairs (also taking into account reciprocity),
for use in a DCSimulation only.
"""
A = survey.locations_a
B = survey.locations_b
M = survey.locations_m
N = survey.locations_n
elecs, inverse = np.unique(np.r_[A, B, M, N], axis=0, return_inverse=True)
inv_A, inv_B, inv_M, inv_N = inverse.reshape(4, -1)
dipole_tx = inv_A != inv_B
dipole_rx = inv_M != inv_N
AM = np.sort(np.c_[inv_A, inv_M])
AN = np.sort(np.c_[inv_A[dipole_rx], inv_N[dipole_rx]])
BM = np.sort(np.c_[inv_B[dipole_tx], inv_M[dipole_tx]])
BN = np.sort(np.c_[inv_B[dipole_tx & dipole_rx], inv_N[dipole_tx & dipole_rx]])
unique_pole_poles, pole_pole_inv = np.unique(
np.r_[AM, AN, BM, BN], axis=0, return_inverse=True
)
inv_AM, pole_pole_inv = pole_pole_inv[: len(AM)], pole_pole_inv[len(AM) :]
inv_AN, pole_pole_inv = pole_pole_inv[: len(AN)], pole_pole_inv[len(AN) :]
inv_BM, inv_BN = pole_pole_inv[: len(BM)], pole_pole_inv[len(BM) :]
if verbose:
print(f"There are {unique_pole_poles.shape[0]} unique pole-pole combinations.")
unique_sources = []
last_src = None
i_d = 0
while i_d < len(unique_pole_poles):
if last_src != unique_pole_poles[i_d, 0]:
last_src = unique_pole_poles[i_d, 0]
rxs = []
else:
while (
i_d < len(unique_pole_poles) and last_src == unique_pole_poles[i_d, 0]
):
rxs.append(unique_pole_poles[i_d, 1])
i_d += 1
rxs = np.array(rxs, dtype=int)
rxs = receivers.Pole(elecs[rxs])
unique_sources.append(sources.Pole([rxs], elecs[last_src]))
dipoles = [dipole_rx, dipole_tx]
invs = [inv_AM, inv_AN, inv_BM, inv_BN]
mini_survey = Survey(unique_sources)
return dipoles, invs, mini_survey
|
1,104 |
tasks common settings
|
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List
from argilla.server.daos.backend.mappings.stopwords import english
from argilla.server.settings import settings
MULTILINGUAL_STOP_ANALYZER_REF = "multilingual_stop_analyzer"
class mappings:
@staticmethod
def keyword_field(
enable_text_search: bool = False,
):
"""Mappings config for keyword field"""
mapping = {
"type": "keyword",
}
if enable_text_search:
text_field = mappings.text_field(with_wordcloud=False)
text_field_fields = text_field.pop("fields", {})
mapping["fields"] = {"text": text_field, **text_field_fields}
return mapping
@staticmethod
def path_match_keyword_template(
path: str,
enable_text_search_in_keywords: bool = False,
):
"""Dynamic template mappings config for keyword field based on path match"""
return {
"path_match": path,
"match_mapping_type": "string",
"mapping": mappings.keyword_field(
enable_text_search=enable_text_search_in_keywords,
),
}
@staticmethod
def text_field(with_wordcloud: bool = True):
"""Mappings config for textual field"""
default_analyzer = settings.default_es_search_analyzer
exact_analyzer = settings.exact_es_search_analyzer
mappings = {
"type": "text",
"analyzer": default_analyzer,
"fields": {
"exact": {
"type": "text",
"analyzer": exact_analyzer,
},
},
}
if with_wordcloud:
mappings["fields"]["wordcloud"] = {
"type": "text",
"fielddata": True,
"fielddata_frequency_filter": {
"min": 0.001,
"max": 0.1,
"min_segment_size": 500,
},
"analyzer": MULTILINGUAL_STOP_ANALYZER_REF,
}
return mappings
@staticmethod
def source(includes: List[str] = None, excludes: List[str] = None):
"""Source configuration with included and excluded fields"""
source = {}
if includes:
source["includes"] = includes
if excludes:
source["excludes"] = excludes
return source
@staticmethod
def nested_field():
"""Nested field mapping basic configuration"""
return {"type": "nested", "include_in_root": True}
@staticmethod
def decimal_field():
return {"type": "float"}
@staticmethod
def protected_non_searchable_field():
return {"type": "object", "enabled": False}
@classmethod
def dynamic_field(cls):
return {"dynamic": True, "type": "object"}
def configure_multilingual_stop_analyzer(settings: Dict[str, Any], supported_langs: List[str] = None):
lang2elastic_stop = {
"en": english.STOPWORDS,
"es": "_spanish_",
"fr": "_french_",
"de": "_german_",
}
supported_langs = supported_langs or [lang for lang in lang2elastic_stop]
def get_value_with_defaults(data: dict, key: str, default):
prop = data.get(key)
if prop is None:
data[key] = default
return data[key]
analysis = get_value_with_defaults(settings, "analysis", {})
filter = get_value_with_defaults(analysis, "filter", {})
analyzer = get_value_with_defaults(analysis, "analyzer", {})
filters = []
for lang in supported_langs:
stopwords = lang2elastic_stop.get(lang)
if stopwords:
filter[lang] = {
"type": "stop",
"stopwords": stopwords,
}
filters.append(lang)
analyzer[MULTILINGUAL_STOP_ANALYZER_REF] = {
"tokenizer": "lowercase",
"filter": filters,
}
return settings
def extended_analyzer():
"""Extended analyzer (used only in `word` field). Deprecated"""
return {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase", "asciifolding"],
}
def METHOD_NAME():
"""Common index settings"""
es_settings = {
"number_of_shards": settings.es_records_index_shards,
"number_of_replicas": settings.es_records_index_replicas,
}
configure_multilingual_stop_analyzer(settings=es_settings)
return es_settings
def dynamic_metrics_text():
return {
"metrics.*": mappings.path_match_keyword_template(
path="metrics.*",
enable_text_search_in_keywords=False,
)
}
def dynamic_metadata_text():
return {
"metadata.*": mappings.path_match_keyword_template(
path="metadata.*",
enable_text_search_in_keywords=True,
)
}
def dynamic_annotations_text(path: str):
path = f"{path}.*"
return {path: mappings.path_match_keyword_template(path=path, enable_text_search_in_keywords=True)}
def tasks_common_mappings():
"""Commons index mappings"""
return {
# TODO(@frascuchon): verify min es version that support meta fields
# "_meta": {"version.min": "0.10"},
"dynamic": "strict",
"properties": {
"id": mappings.keyword_field(),
"text": mappings.text_field(),
# TODO(@frascuchon): Enable prediction and annotation
# so we can build extra metrics based on these fields
"prediction": {"type": "object", "enabled": False},
"annotation": {"type": "object", "enabled": False},
"predictions": mappings.dynamic_field(),
"annotations": mappings.dynamic_field(),
"status": mappings.keyword_field(),
"event_timestamp": {"type": "date_nanos"},
"last_updated": {"type": "date_nanos"},
"annotated_by": mappings.keyword_field(enable_text_search=True),
"predicted_by": mappings.keyword_field(enable_text_search=True),
"metrics": mappings.dynamic_field(),
"metadata": mappings.dynamic_field(),
"vectors": mappings.dynamic_field(),
},
"dynamic_templates": [
dynamic_metadata_text(),
dynamic_metrics_text(),
dynamic_annotations_text(path="predictions"),
dynamic_annotations_text(path="annotations"),
],
}
|
1,105 |
test load running
|
from datetime import timedelta
from typing import List, Dict, Tuple, Any
from pytest import fixture, mark
from resotocore.db.runningtaskdb import RunningTaskData, RunningTaskDb, RunningTaskStepInfo
from resotocore.ids import TaskId, SubscriberId, TaskDescriptorId
from resotocore.message_bus import ActionDone
from resotocore.task.model import Subscriber
from resotocore.task.task_description import RunningTask, Workflow
from resotocore.util import utc, utc_str
now = utc()
@fixture
def instances() -> List[RunningTaskData]:
messages = [ActionDone(str(a), TaskId("test"), "bla", SubscriberId("sf")) for a in range(0, 10)]
state_data = {"test": 1}
return [
RunningTaskData(
TaskId(f"task_{a}"),
TaskDescriptorId("task_123"),
"task_123",
Workflow.__name__,
messages,
"start",
state_data,
[RunningTaskStepInfo(f"step_{a}", False, now, now) for a in range(0, 3)],
task_started_at=now,
task_duration=timedelta(seconds=10),
done=a > 5,
has_info=a > 6,
has_error=a > 7,
)
for a in range(0, 10)
]
@mark.asyncio
async def METHOD_NAME(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
not_done = list(filter(lambda x: not x.done, instances))
assert not_done.sort() == [sub async for sub in running_task_db.all_running()].sort()
@mark.asyncio
async def test_last(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
running_tasks = list(filter(lambda x: x.done, instances))
running_tasks.sort(key=lambda x: x.task_started_at, reverse=True)
done_task = next(iter(running_tasks), None)
assert done_task
last_done = await running_task_db.last(descriptor_id=done_task.task_descriptor_id)
assert last_done
assert done_task.id == last_done.id
assert await running_task_db.last()
@mark.asyncio
async def test_load(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
loaded = [sub async for sub in running_task_db.all()]
assert instances.sort() == loaded.sort()
@mark.asyncio
async def test_filtered(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
async def filtered_list(**kwargs: Any) -> List[TaskId]:
async with await running_task_db.filtered(**kwargs) as crsr:
return [elem.id async for elem in crsr]
assert len(await filtered_list()) == len(instances)
assert len(await filtered_list(limit=1)) == 1
assert len(await filtered_list(descriptor_id="task_123")) == len(instances)
assert await filtered_list(task_id=TaskId("task_1")) == [TaskId("task_1")]
assert len(await filtered_list(started_after=now + timedelta(minutes=1))) == 0
assert len(await filtered_list(started_after=now - timedelta(minutes=1))) == 10
assert len(await filtered_list(started_before=now + timedelta(minutes=1))) == 10
assert len(await filtered_list(started_before=now - timedelta(minutes=1))) == 0
assert len(await filtered_list(with_info=True)) == 3
assert len(await filtered_list(with_info=False)) == 7
assert len(await filtered_list(with_error=True)) == 2
assert len(await filtered_list(with_error=False)) == 8
@mark.asyncio
async def test_aggregated(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
res = await running_task_db.aggregated_history()
assert res == {
"task_123": {"count": 10, "last_run": utc_str(now), "runs_with_errors": 2, "average_duration": "10s"}
}
@mark.asyncio
async def test_update(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
# multiple updates should work as expected
await running_task_db.update_many(instances)
await running_task_db.update_many(instances)
await running_task_db.update_many(instances)
loaded = [sub async for sub in running_task_db.all()]
assert instances.sort() == loaded.sort()
@mark.asyncio
async def test_delete(running_task_db: RunningTaskDb, instances: List[RunningTaskData]) -> None:
await running_task_db.update_many(instances)
remaining = list(instances)
for _ in instances:
sub = remaining.pop()
await running_task_db.delete_value(sub)
loaded = [sub async for sub in running_task_db.all()]
assert remaining.sort() == loaded.sort()
assert len([sub async for sub in running_task_db.all()]) == 0
@mark.asyncio
async def test_update_state(
running_task_db: RunningTaskDb,
workflow_instance: Tuple[RunningTask, Subscriber, Subscriber, Dict[str, List[Subscriber]]],
) -> None:
wi, _, _, _ = workflow_instance
task_id = TaskId("test")
subscriber_id = SubscriberId("sf")
first = ActionDone("start_collect", task_id, "bla", subscriber_id)
second = ActionDone("collect", task_id, "bla", subscriber_id)
third = ActionDone("collect_done", task_id, "bla", subscriber_id)
async def assert_state(current: str, message_count: int) -> RunningTaskData:
state: RunningTaskData = await running_task_db.get(wi.id) # type: ignore
assert state.current_state_name == current
assert len(state.received_messages) == message_count
return state
await running_task_db.insert(wi)
await assert_state(wi.current_state.name, 6)
wi.machine.set_state("start")
await running_task_db.update_state(wi, first)
await assert_state("start", 7)
wi.machine.set_state("collect")
await running_task_db.update_state(wi, second)
await assert_state("collect", 8)
wi.machine.set_state("done")
await running_task_db.update_state(wi, third)
last = await assert_state("done", 9)
assert last.received_messages[-3:] == [first, second, third]
|
1,106 |
test connect tls ok
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
from typing import Callable # noqa: F401
import mock
import pytest
from datadog_checks.base.stubs.aggregator import AggregatorStub # noqa: F401
from datadog_checks.base.stubs.datadog_agent import DatadogAgentStub # noqa: F401
from datadog_checks.rethinkdb import RethinkDBCheck
from datadog_checks.rethinkdb.types import Instance # noqa: F401
from .assertions import assert_metrics, assert_service_checks
from .cluster import temporarily_disconnect_server
from .common import (
HEROES_TABLE_SERVERS,
MALFORMED_VERSION_STRING_PARAMS,
RAW_VERSION,
SERVER_PORTS,
TLS_CLIENT_CERT,
TLS_SERVER,
)
from .types import ServerName # noqa: F401
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
class TestCheck:
METRICS_COLLECTION_MOCK_TARGET = 'datadog_checks.rethinkdb.check.RethinkDBCheck.collect_metrics'
def test_default(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
check = RethinkDBCheck('rethinkdb', {}, [instance])
dd_run_check(check)
assert_metrics(aggregator)
assert_service_checks(aggregator, instance)
def test_connect_proxy_ok(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
instance = instance.copy()
instance['port'] = SERVER_PORTS['proxy']
check = RethinkDBCheck('rethinkdb', {}, [instance])
dd_run_check(check)
assert_metrics(aggregator, is_proxy=True)
assert_service_checks(aggregator, instance)
def METHOD_NAME(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
instance = instance.copy()
instance['port'] = SERVER_PORTS[TLS_SERVER]
instance['tls_ca_cert'] = TLS_CLIENT_CERT
check = RethinkDBCheck('rethinkdb', {}, [instance])
dd_run_check(check)
assert_metrics(aggregator)
assert_service_checks(aggregator, instance)
def test_no_credentials_ok(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
instance = instance.copy()
# RethinkDB will default to 'admin' w/o password.
# Should work assuming admin account in our test cluster doesn't have a password.
instance.pop('username')
instance.pop('password')
check = RethinkDBCheck('rethinkdb', {}, [instance])
dd_run_check(check)
assert_metrics(aggregator)
assert_service_checks(aggregator, instance)
@pytest.mark.parametrize('server_with_data', list(HEROES_TABLE_SERVERS))
def test_disconnected_data_server_ok(self, dd_run_check, aggregator, instance, server_with_data):
# type: (Callable, AggregatorStub, Instance, ServerName) -> None
# Simulate the scenario where one of the servers in the cluster is down, but not the one we're
# connecting to.
check = RethinkDBCheck('rethinkdb', {}, [instance])
with temporarily_disconnect_server(server_with_data):
dd_run_check(check)
assert_metrics(aggregator, disconnected_servers={server_with_data})
assert_service_checks(aggregator, instance, disconnected_servers={server_with_data})
def test_connection_failure(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
instance = copy.deepcopy(instance)
instance['host'] = 'doesnotexist'
check = RethinkDBCheck('rethinkdb', {}, [instance])
with pytest.raises(Exception, match='Could not connect'):
dd_run_check(check)
assert_service_checks(aggregator, instance, connect_status=RethinkDBCheck.CRITICAL)
def test_metric_collection_failure(self, dd_run_check, aggregator, instance):
# type: (Callable, AggregatorStub, Instance) -> None
with mock.patch(self.METRICS_COLLECTION_MOCK_TARGET, side_effect=Exception('Horrible failure')):
check = RethinkDBCheck('rethinkdb', {}, [instance])
with pytest.raises(Exception, match='Horrible failure'):
dd_run_check(check)
assert_service_checks(aggregator, instance, connect_status=RethinkDBCheck.CRITICAL)
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
class TestVersionMetadata:
VERSION_MOCK_TARGET = 'datadog_checks.rethinkdb.queries_impl.get_version_metadata'
@pytest.mark.skipif(not RAW_VERSION, reason='Requires RAW_VERSION to be set')
def test_default(self, instance, dd_run_check, datadog_agent):
# type: (Instance, Callable, DatadogAgentStub) -> None
raw_version = RAW_VERSION
version, _, build = raw_version.partition('~')
major, minor, patch = version.split('.')
metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw_version,
}
check_id = 'test'
check = RethinkDBCheck('rethinkdb', {}, [instance])
check.check_id = check_id
dd_run_check(check)
datadog_agent.assert_metadata(check_id, metadata)
@pytest.mark.integration
@pytest.mark.parametrize('malformed_version_string', MALFORMED_VERSION_STRING_PARAMS)
def test_malformed(self, instance, dd_run_check, datadog_agent, malformed_version_string):
# type: (Instance, Callable, DatadogAgentStub, str) -> None
with mock.patch(self.VERSION_MOCK_TARGET, return_value=[(malformed_version_string,)]):
check_id = 'test'
check = RethinkDBCheck('rethinkdb', {}, [instance])
check.check_id = check_id
dd_run_check(check)
datadog_agent.assert_metadata(check_id, {})
@pytest.mark.integration
def test_failure(self, instance, dd_run_check, datadog_agent):
# type: (Instance, Callable, DatadogAgentStub) -> None
with mock.patch(self.VERSION_MOCK_TARGET, side_effect=ValueError('Oops!')):
check_id = 'test'
check = RethinkDBCheck('rethinkdb', {}, [instance])
check.check_id = check_id
dd_run_check(check)
datadog_agent.assert_metadata(check_id, {})
|
1,107 |
instance cache metric wildcards
|
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def METHOD_NAME(field, value):
return True
def instance_cache_shared_labels(field, value):
return True
def instance_collect_counters_with_distributions(field, value):
return False
def instance_collect_histogram_buckets(field, value):
return True
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_health_service_check(field, value):
return True
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_extra_metrics(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_histogram_buckets_as_distributions(field, value):
return False
def instance_hostname_format(field, value):
return get_default_field_value(field, value)
def instance_hostname_label(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_include_labels(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return get_default_field_value(field, value)
def instance_non_cumulative_histogram_buckets(field, value):
return False
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_raw_line_filters(field, value):
return get_default_field_value(field, value)
def instance_raw_metric_prefix(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_rename_labels(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_share_labels(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tag_by_endpoint(field, value):
return True
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_telemetry(field, value):
return False
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_use_latest_spec(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_process_start_time(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
|
1,108 |
list videos
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import str
import re
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import resolver_proxy, web_utils
from resources.lib.menu_utils import item_post_treatment
# TODO
# Add Replays/Serie TV (required account)
# Live
URL_ROOT = 'https://www.nessma.tv'
URL_LIVE = URL_ROOT + '/ar/live'
URL_REPLAY = URL_ROOT + '/ar/replays'
URL_VIDEOS = URL_ROOT + '/ar/videos'
GENERIC_HEADERS = {'User-Agent': web_utils.get_random_ua()}
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
"""
item = Listitem()
item.label = 'الفيديوهات'
item.set_callback(METHOD_NAME, item_id=item_id, page='1')
item_post_treatment(item)
yield item
item = Listitem()
item.label = 'مشاهدة الحلقات'
item.set_callback(list_programs, item_id=item_id)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, **kwargs):
"""
Build progams listing
- Le JT
- ...
"""
resp = urlquick.get(URL_REPLAY, headers=GENERIC_HEADERS, max_age=-1)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='col-sm-3']"):
if program_datas.find('.//img').get('alt') is not None:
program_title = program_datas.find('.//img').get('alt')
program_image = program_datas.find('.//img').get('src')
program_url = program_datas.find('.//a').get('href')
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_videos_replays, item_id=item_id, program_url=program_url, page='1')
item_post_treatment(item)
yield item
@Route.register
def list_videos_replays(plugin, item_id, program_url, page, **kwargs):
params = {'page': page}
resp = urlquick.get(program_url, params=params, headers=GENERIC_HEADERS, max_age=-1)
root = resp.parse()
if root.find(".//div[@class='row replaynessma-cats row-eq-height ']") is not None:
root2 = resp.parse("div", attrs={"class": "row replaynessma-cats row-eq-height "})
for video_datas in root2.iterfind(".//article"):
video_title = video_datas.find('.//h3/a').text
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url, item_id=item_id, video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
else:
for video_datas in root.iterfind(".//div[@class='col-sm-3']"):
video_title = video_datas.find('.//img').get('alt')
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url, item_id=item_id, video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id, program_url=program_url, page=str(int(page) + 1))
@Route.register
def METHOD_NAME(plugin, item_id, page, **kwargs):
params = {'page': page}
resp = urlquick.get(URL_VIDEOS, params=params, headers=GENERIC_HEADERS, max_age=-1)
root = resp.parse()
for video_datas in root.iterfind(".//div[@class='col-sm-4']"):
if video_datas.find('.//img') is not None:
video_title = video_datas.find('.//img').get('alt')
video_image = video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url, item_id=item_id, video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
yield Listitem.next_page(item_id=item_id, page=str(int(page) + 1))
@Resolver.register
def get_video_url(plugin, item_id, video_url, download_mode=False, **kwargs):
resp = urlquick.get(video_url, headers=GENERIC_HEADERS, max_age=-1)
video_id = re.compile(r'youtube\.com\/embed\/(.*.)\?').findall(resp.text)[0]
return resolver_proxy.get_stream_youtube(plugin, video_id, download_mode)
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_LIVE, headers=GENERIC_HEADERS, max_age=-1)
live_id = re.compile(r'dailymotion.com/embed/video/(.*?)[\?\"]').findall(resp.text)[0]
return resolver_proxy.get_stream_dailymotion(plugin, live_id, False)
|
1,109 |
delay response
|
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=unused-variable
import asyncio
import logging
import time
from collections.abc import Coroutine
import pytest
import simcore_service_webserver
from aiohttp import web
from pytest_simcore.helpers.utils_assert import assert_status
from servicelib.aiohttp.application import create_safe_application
from simcore_service_webserver._constants import APP_SETTINGS_KEY
from simcore_service_webserver.application_settings import setup_settings
from simcore_service_webserver.diagnostics._healthcheck import (
HEALTH_LATENCY_PROBE,
HealthCheckError,
assert_healthy_app,
)
from simcore_service_webserver.diagnostics.plugin import setup_diagnostics
from simcore_service_webserver.diagnostics.settings import DiagnosticsSettings
from simcore_service_webserver.rest.plugin import setup_rest
from simcore_service_webserver.security.plugin import setup_security
from tenacity import before_log, retry, stop_after_attempt, wait_fixed
from yarl import URL
logger = logging.getLogger(__name__)
def health_check_path(api_version_prefix) -> URL:
return URL(f"/{api_version_prefix}/health")
async def health_check_emulator(
client,
health_check_path,
*,
min_num_checks=2,
start_period: int = 0,
timeout: int = 30,
interval: int = 30,
retries: int = 3,
):
# Follows docker's health check protocol
# SEE https://docs.docker.com/engine/reference/builder/#healthcheck
checkpoint: Coroutine = client.get(health_check_path)
check_count = 0
@retry(
wait=wait_fixed(interval),
stop=stop_after_attempt(retries),
before=before_log(logger, logging.WARNING),
)
async def _check_entrypoint():
nonlocal check_count
check_count += 1
resp = await asyncio.wait_for(checkpoint, timeout=timeout)
assert resp.status == web.HTTPOk.status_code
await asyncio.sleep(start_period)
while check_count < min_num_checks:
await _check_entrypoint()
await asyncio.sleep(interval)
SLOW_HANDLER_DELAY_SECS = 2.0 # secs
@pytest.fixture
def mock_environment(mock_env_devel_environment: dict[str, str], monkeypatch):
monkeypatch.setenv("AIODEBUG_SLOW_DURATION_SECS", f"{SLOW_HANDLER_DELAY_SECS / 10}")
monkeypatch.setenv("DIAGNOSTICS_MAX_TASK_DELAY", f"{SLOW_HANDLER_DELAY_SECS}")
monkeypatch.setenv("DIAGNOSTICS_MAX_AVG_LATENCY", f"{2.0}")
monkeypatch.setenv("DIAGNOSTICS_START_SENSING_DELAY", f"{0}")
monkeypatch.setenv("SC_HEALTHCHECK_TIMEOUT", "2m")
@pytest.fixture
def client(
event_loop: asyncio.AbstractEventLoop,
unused_tcp_port_factory,
aiohttp_client,
api_version_prefix,
mock_environment: None,
):
routes = web.RouteTableDef()
@routes.get("/error")
async def unexpected_error(request: web.Request):
msg = "boom shall produce 500"
raise Exception(msg) # pylint: disable=broad-exception-raised
@routes.get(r"/fail")
async def expected_failure(request: web.Request):
raise web.HTTPServiceUnavailable
@routes.get(r"/slow")
async def blocking_slow(request: web.Request):
time.sleep(SLOW_HANDLER_DELAY_SECS * 1.1)
return web.json_response({"data": True, "error": None})
@routes.get(r"/cancel")
async def cancelled_task(request: web.Request):
task: asyncio.Task = request.app.loop.create_task(asyncio.sleep(10))
task.cancel() # raise CancelledError
@routes.get(r"/timeout/{secs}")
async def time_out(request: web.Request):
secs = float(request.match_info.get("secs", 0))
await asyncio.wait_for(
asyncio.sleep(10 * secs), timeout=secs
) # raises TimeOutError
@routes.get(r"/delay/{secs}")
async def METHOD_NAME(request: web.Request):
secs = float(request.match_info.get("secs", 0))
await asyncio.sleep(secs) # non-blocking slow
return web.json_response({"data": True, "error": None})
# -----
main = {"port": unused_tcp_port_factory(), "host": "localhost"}
cfg = {
"main": main,
"rest": {"enabled": True, "version": api_version_prefix},
"diagnostics": {"enabled": True},
}
app = create_safe_application(cfg)
# activates some sub-modules
assert setup_settings(app)
setup_security(app)
setup_rest(app)
setup_diagnostics(app)
settings: DiagnosticsSettings = app[APP_SETTINGS_KEY].WEBSERVER_DIAGNOSTICS
assert settings.DIAGNOSTICS_MAX_AVG_LATENCY == 2.0
app.router.add_routes(routes)
return event_loop.run_until_complete(
aiohttp_client(app, server_kwargs={key: main[key] for key in ("host", "port")})
)
def test_diagnostics_setup(client):
app = client.app
assert len(app.middlewares) == 3
assert "monitor" in app.middlewares[0].__middleware_name__
assert "error" in app.middlewares[1].__middleware_name__
assert "envelope" in app.middlewares[2].__middleware_name__
async def test_healthy_app(client, api_version_prefix):
resp = await client.get(f"/{api_version_prefix}/health")
data, error = await assert_status(resp, web.HTTPOk)
assert data
assert not error
assert data["name"] == "simcore_service_webserver"
assert data["version"] == simcore_service_webserver._meta.__version__
async def test_unhealthy_app_with_slow_callbacks(client, api_version_prefix):
resp = await client.get(f"/{api_version_prefix}/health")
await assert_status(resp, web.HTTPOk)
resp = await client.get("/slow") # emulates a very slow handle!
await assert_status(resp, web.HTTPOk)
resp = await client.get(f"/{api_version_prefix}/health")
await assert_status(resp, web.HTTPServiceUnavailable)
async def test_diagnose_on_unexpected_error(client):
resp = await client.get("/error")
assert resp.status == web.HTTPInternalServerError.status_code
assert_healthy_app(client.app)
async def test_diagnose_on_failure(client):
resp = await client.get("/fail")
assert resp.status == web.HTTPServiceUnavailable.status_code
assert_healthy_app(client.app)
async def test_diagnose_on_response_delays(client):
settings: DiagnosticsSettings = client.app[APP_SETTINGS_KEY].WEBSERVER_DIAGNOSTICS
tmax = settings.DIAGNOSTICS_MAX_AVG_LATENCY
coros = [client.get(f"/delay/{1.1*tmax}") for _ in range(10)]
resps = await asyncio.gather(*coros)
for resp in resps:
await assert_status(resp, web.HTTPOk)
# monitoring
latency_observed = client.app[HEALTH_LATENCY_PROBE].value()
assert latency_observed > tmax
# diagnostics
with pytest.raises(HealthCheckError):
assert_healthy_app(client.app)
def test_read_prometheus_counter():
# TODO move to test_prometheus_utils.py in servicelib
from prometheus_client import Counter
counter = Counter(
"my_fullname_counter", "description", labelnames=("name", "surname")
)
def get_total():
total_count = 0
for metric in counter.collect():
for sample in metric.samples:
if sample.name.endswith("_total"):
total_count += sample.value
return total_count
counter.labels("pedro", "crespo").inc()
counter.labels("juan", "crespo").inc()
counter.labels("pedro", "valero").inc()
assert get_total() == 3
counter.labels("pedro", "crespo").inc()
counter.labels("pedro", "crespo").inc()
assert get_total() == 5
|
1,110 |
tokenizer
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a number of text preprocessing utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import six
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
from .categorical_vocabulary import CategoricalVocabulary # pylint: disable=g-bad-import-order
try:
# pylint: disable=g-import-not-at-top
import cPickle as pickle
except ImportError:
# pylint: disable=g-import-not-at-top
import pickle
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def METHOD_NAME(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield TOKENIZER_RE.findall(value)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
class ByteProcessor(object):
"""Maps documents into sequence of ids for bytes.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self, max_document_length):
self.max_document_length = max_document_length
def fit(self, x):
"""Does nothing. No fitting required."""
pass
def fit_transform(self, x):
"""Calls transform."""
return self.transform(x)
# pylint: disable=no-self-use
def reverse(self, x):
"""Reverses output of transform back to text.
Args:
x: iterator or matrix of integers. Document representation in bytes.
Yields:
Iterators of utf-8 strings.
"""
for data in x:
document = np.trim_zeros(data.astype(np.int8), trim='b').tostring()
try:
yield document.decode('utf-8')
except UnicodeDecodeError:
yield ''
def transform(self, x):
"""Transforms input documents into sequence of ids.
Args:
x: iterator or list of input documents.
Documents can be bytes or unicode strings, which will be encoded as
utf-8 to map to bytes. Note, in Python2 str and bytes is the same type.
Yields:
iterator of byte ids.
"""
if six.PY3:
# For Python3 defined buffer as memoryview.
buffer_or_memoryview = memoryview
else:
buffer_or_memoryview = buffer # pylint: disable=undefined-variable
for document in x:
if isinstance(document, six.text_type):
document = document.encode('utf-8')
document_mv = buffer_or_memoryview(document)
buff = np.frombuffer(document_mv[:self.max_document_length],
dtype=np.uint8)
yield np.pad(buff, (0, self.max_document_length - len(buff)), 'constant')
class VocabularyProcessor(object):
"""Maps documents to sequences of word ids.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
max_document_length,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None):
"""Initializes a VocabularyProcessor instance.
Args:
max_document_length: Maximum length of documents.
if documents are longer, they will be trimmed, if shorter - padded.
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
self.max_document_length = max_document_length
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary()
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = METHOD_NAME
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
for tokens in self._tokenizer(raw_documents):
for token in tokens:
self.vocabulary_.add(token)
if self.min_frequency > 0:
self.vocabulary_.trim(self.min_frequency)
self.vocabulary_.freeze()
return self
def fit_transform(self, raw_documents, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of words.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
self.fit(raw_documents)
return self.transform(raw_documents)
def transform(self, raw_documents):
"""Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Args:
raw_documents: An iterable which yield either str or unicode.
Yields:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
for tokens in self._tokenizer(raw_documents):
word_ids = np.zeros(self.max_document_length, np.int64)
for idx, token in enumerate(tokens):
if idx >= self.max_document_length:
break
word_ids[idx] = self.vocabulary_.get(token)
yield word_ids
def reverse(self, documents):
"""Reverses output of vocabulary mapping to words.
Args:
documents: iterable, list of class ids.
Yields:
Iterator over mapped in words documents.
"""
for item in documents:
output = []
for class_id in item:
output.append(self.vocabulary_.reverse(class_id))
yield ' '.join(output)
def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
with gfile.Open(filename, 'rb') as f:
return pickle.loads(f.read())
|
1,111 |
build model
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional
import torch
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.utils import logging
try:
from megatron.core import parallel_state
from megatron.core.enums import ModelType
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelType = ApexGuardDefaults()
HAVE_MEGATRON_CORE = False
try:
from apex.transformer.tensor_parallel.layers import set_defaults_if_not_set_tensor_model_parallel_attributes
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# Apex's `build model' refactored to call Megatron-Core classes
def METHOD_NAME(
model_provider_func: Callable[[Any, Dict[str, Any]], torch.nn.Module],
wrap_with_ddp: bool = True,
virtual_pipeline_model_parallel_size: Optional[int] = None,
model_type: ModelType = ModelType.encoder_or_decoder,
on_cpu: bool = False,
*args: Any,
**kwargs: Any,
) -> List[torch.nn.Module]:
"""Build the model satisfying pipeline model parallel requirements.
This function sets `pre_process` and `post_process` to `**kwargs` and pass `*args` and `**kwargs` to
`model_provider_func`.
Args:
model_provider_func: A function which takes `*args` and `**kwargs` and returns a `nn.Module`.
wrap_with_ddp: If :obj:`True`, wrap the instantiated model
with `torch.nn.parallel.distributed.DistributedDataParallel`, a.k.a. `DDP`.
virtual_pipeline_model_parallel_size: Specify when using interleaving scheduling pipeline model parallel.
model_type:
*args: arguments for model provider func
**kwargs: Keyword arguments for model provider func
Returns:
a list of `nn.Module`(s). If `virtual_pipeline_model_parallel_size` is not None,
the list has multiple models, otherwise one.
"""
if model_type is None:
model_type = ModelType.encoder_or_decoder
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and virtual_pipeline_model_parallel_size is not None
):
model = []
parallel_state.set_virtual_pipeline_model_parallel_world_size(virtual_pipeline_model_parallel_size)
for i in range(virtual_pipeline_model_parallel_size):
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
model.append(
model_provider_func(
*args,
**kwargs,
pre_process=parallel_state.is_pipeline_first_stage(),
post_process=parallel_state.is_pipeline_last_stage(),
)
)
else:
if model_type == ModelType.encoder_or_decoder:
model = model_provider_func(
*args,
**kwargs,
pre_process=parallel_state.is_pipeline_first_stage(),
post_process=parallel_state.is_pipeline_last_stage(),
)
elif model_type == ModelType.encoder_and_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
# `add_encoder` & `add_decoder` logic.
add_encoder, add_decoder = True, True
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
split_rank = parallel_state.get_pipeline_model_parallel_split_rank()
if split_rank is None:
raise RuntimeError("Split rank needs to be specified for model with both encoder and decoder.")
rank = parallel_state.get_pipeline_model_parallel_rank()
world_size = parallel_state.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = rank == (split_rank - 1) or rank == (world_size - 1)
add_encoder = parallel_state.is_pipeline_stage_before_split()
add_decoder = parallel_state.is_pipeline_stage_after_split()
model = model_provider_func(
*args,
**kwargs,
pre_process=pre_process,
post_process=post_process,
add_encoder=add_encoder,
add_decoder=add_decoder,
)
else:
raise ValueError(f"Unrecognized ModelType '{model_type}'")
if not isinstance(model, list):
model = [model]
for model_module in model:
model_module.model_type = model_type
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if parallel_state.model_parallel_is_initialized() and parallel_state.get_data_parallel_rank() == 0:
msg = " > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
parallel_state.get_tensor_model_parallel_rank(),
parallel_state.get_pipeline_model_parallel_rank(),
_calc_number_of_params(model),
)
logging.info(msg)
# GPU allocation.
if not on_cpu:
for model_module in model:
model_module.cuda(torch.cuda.current_device())
if wrap_with_ddp:
i = torch.cuda.current_device()
model = [
torch.nn.parallel.distributed.DistributedDataParallel(
model_module, device_ids=[i], output_device=i, process_group=parallel_state.get_data_parallel_group(),
)
for model_module in model
]
return model
def _calc_number_of_params(model: List[torch.nn.Module]) -> int:
assert isinstance(model, list)
return sum([sum([p.nelement() for p in model_module.parameters()]) for model_module in model])
|
1,112 |
remove rows
|
######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Contains a table model with an empty last row.
"""
from PySide6.QtCore import Qt, Slot, QModelIndex
from .minimal_table_model import MinimalTableModel
class EmptyRowModel(MinimalTableModel):
"""A table model with a last empty row."""
def __init__(self, parent=None, header=None):
"""Init class."""
super().__init__(parent, header=header)
self.default_row = {} # A row of default values to put in any newly inserted row
self.force_default = False # Whether or not default values are editable
self._fetched = False
self.dataChanged.connect(self._handle_data_changed)
self.rowsInserted.connect(self._handle_rows_inserted)
def canFetchMore(self, _parent):
return not self._fetched
def fetchMore(self, parent):
self.insertRows(self.rowCount(), 1, parent)
self._fetched = True
def flags(self, index):
"""Return default flags except if forcing defaults."""
if self.force_default:
try:
name = self.header[index.column()]
if name in self.default_row:
return super().flags(index) & ~Qt.ItemIsEditable
except IndexError:
pass
return super().flags(index)
def set_default_row(self, **kwargs):
"""Set default row data."""
self.default_row = kwargs
def clear(self):
self._fetched = False
super().clear()
def reset_model(self, main_data=None):
self._fetched = False
super().reset_model(main_data)
@Slot(QModelIndex, QModelIndex, list)
def _handle_data_changed(self, top_left, bottom_right, roles=None):
"""Insert a new last empty row in case the previous one has been filled
with any data other than the defaults."""
if roles is None:
roles = list()
if roles and Qt.ItemDataRole.EditRole not in roles:
return
last_row = self.rowCount() - 1
for column in range(self.columnCount()):
try:
field = self.header[column]
except IndexError:
field = None
data = self._main_data[last_row][column]
default = self.default_row.get(field)
if (data or default) and data != default:
self.insertRows(self.rowCount(), 1)
break
def METHOD_NAME(self, row, count, parent=QModelIndex()):
"""Don't remove the last empty row."""
if row + count == self.rowCount():
count -= 1
return super().METHOD_NAME(row, count, parent)
@Slot(QModelIndex, int, int)
def _handle_rows_inserted(self, parent, first, last):
"""Handle rowsInserted signal."""
self.set_rows_to_default(first, last)
def set_rows_to_default(self, first, last=None):
"""Set default data in newly inserted rows."""
if last is None:
last = first
if first >= self.rowCount() or last < 0:
return
default_row = []
for column in range(self.columnCount()):
try:
field = self.header[column]
except IndexError:
field = None
default = self.default_row.get(field)
default_row.append(default)
for row in range(first, last + 1):
self._main_data[row] = default_row.copy()
top_left = self.index(first, 0)
bottom_right = self.index(last, self.columnCount() - 1)
self.dataChanged.emit(top_left, bottom_right)
|
1,113 |
push
|
import html.parser as base
import sys
from html.entities import entitydefs
__all__ = ["HTMLParser"]
omit_start = ["body", "tbody", "head", "html"]
single = [
"area",
"base",
"basefont",
"br",
"col",
"frame",
"hr",
"img",
"input",
"isindex",
"link",
"meta",
"param",
]
single = frozenset(single)
heading = ["h1", "h2", "h3", "h4", "h5", "h6"]
fontstyle = ["tt", "i", "b", "u", "s", "strike", "big", "small"]
phrase = [
"em",
"strong",
"dfn",
"code",
"samp",
"kbd",
"var",
"cite",
"abbr",
"acronym",
]
special = [
"a",
"img",
"applet",
"object",
"font",
"basefont",
"br",
"script",
"map",
"q",
"sub",
"sup",
"span",
"bdo",
"iframe",
]
formctrl = ["input", "select", "textarea", "label", "button"]
lists = ["ul", "ol", " dir", "menu"]
head_misc = ["script", "style", "meta", "link", "object"]
pre_exclusion = [
"img",
"object",
"applet",
"big",
"small",
"sub",
"sup",
"font",
"basefont",
]
block = (
[
"p",
"pre",
"dl",
"div",
"center",
"noscript",
"noframes",
"blockquote",
"form",
"isindex",
"hr",
"table",
"fieldset",
"address",
]
+ heading
+ lists
)
inline = fontstyle + phrase + special + formctrl
flow = block + inline
html_content = ["head", "body"]
head_content = ["title", "isindex", "base"]
def setify(d):
return dict([(key, frozenset(val)) for key, val in d.items()])
def omit(allowed, tags):
result = {}
for k, v in allowed.items():
for t in tags:
if t in v:
v = v.union(allowed[t])
result[k] = v
return result
allowed = {
"a": inline,
"abbr": inline,
"acronym": inline,
"address": inline + ["p"],
"applet": flow + ["param"],
"b": inline,
"bdo": inline,
"big": inline,
"blockquote": flow,
"body": flow + ["ins", "del"],
"button": flow,
"caption": inline,
"center": flow,
"cite": inline,
"code": inline,
"colgroup": ["col"],
"dd": flow,
"del": flow,
"dfn": inline,
"dir": ["li"],
"div": flow,
"dl": ["dt", "dd"],
"dt": inline,
"em": inline,
"fieldset": flow + ["legend"],
"font": inline,
"form": flow,
"frameset": ["frameset", "frame", "noframes"],
"h1": inline,
"h2": inline,
"h3": inline,
"h4": inline,
"h5": inline,
"h6": inline,
"head": head_content + head_misc,
"html": html_content,
"i": inline,
"iframe": flow,
"ins": flow,
"kbd": inline,
"label": inline,
"legend": inline,
"li": flow,
"map": block + ["area"],
"menu": ["li"],
"noframes": flow,
"noscript": flow,
"object": flow + ["param"],
"ol": ["li"],
"optgroup": ["option"],
"option": [],
"p": inline,
"pre": inline,
"q": inline,
"s": inline,
"samp": inline,
"script": [],
"select": ["optgroup", "option"],
"small": inline,
"span": inline,
"strike": inline,
"strong": inline,
"style": [],
"sub": inline,
"sup": inline,
"table": ["caption", "col", "colgroup", "thead", "tfoot", "tbody"],
"tbody": ["tr"],
"td": flow,
"textarea": [],
"tfoot": ["tr"],
"th": flow,
"thead": ["tr"],
"title": [],
"tr": ["th", "td"],
"tt": inline,
"u": inline,
"ul": ["li"],
"var": inline,
}
allowed = setify(allowed)
allowed = omit(allowed, omit_start)
excluded = {
"a": ["a"],
"button": formctrl + ["a", "form", "isindex", "fieldset", "iframe"],
"dir": block,
"form": ["form"],
"label": ["label"],
"menu": block,
"pre": pre_exclusion,
}
excluded = setify(excluded)
# Don't show mobile TOC menu JS code among <script> tag on cli man page
masked = ["script"]
class HTMLParser(base.HTMLParser):
def __init__(self, entities=None):
base.HTMLParser.__init__(self)
self.tag_stack = []
self.excluded = frozenset()
self.excluded_stack = []
self.data = []
self.data_stack = []
self.decls = []
if entities:
self.entities = entities
else:
self.entities = {}
def top(self):
if self.tag_stack == []:
return None
else:
return self.tag_stack[-1][0]
def pop(self):
self.excluded = self.excluded_stack.pop()
data = self.data
self.data = self.data_stack.pop()
(tag, attrs) = self.tag_stack.pop()
if tag not in masked:
self.append((tag, attrs, data))
return tag
def METHOD_NAME(self, tag, attrs):
self.tag_stack.append((tag, attrs))
self.excluded_stack.append(self.excluded)
if tag in excluded:
self.excluded = self.excluded.union(excluded[tag])
self.data_stack.append(self.data)
self.data = []
def append(self, item):
self.data.append(item)
def is_allowed(self, tag):
return tag not in self.excluded and tag in allowed[self.top()]
def handle_starttag(self, tag, attrs):
if self.tag_stack != []:
while not self.is_allowed(tag):
self.pop()
if tag not in single:
self.METHOD_NAME(tag, attrs)
else:
self.append((tag, attrs, None))
def handle_entityref(self, name):
if name in self.entities:
self.handle_data(self.entities[name])
elif name in entitydefs:
self.handle_data(entitydefs[name])
else:
sys.stderr.write("unrecognized entity: %s\n" % name)
def handle_charref(self, name):
sys.stderr.write("unsupported character reference <%s>" % name)
def handle_data(self, data):
self.append(data)
def handle_endtag(self, tag):
while True:
if self.pop() == tag:
break
def handle_decl(self, decl):
self.decls.append(decl)
|
1,114 |
update
|
import os
import shutil
import unittest
import numpy as np
from tensorflow import keras
from neural_compressor import mix_precision
from neural_compressor.config import MixedPrecisionConfig
from neural_compressor.data import DataLoader, Datasets
def build_sequential_model():
# Create Keras model
model = keras.Sequential(
[
keras.layers.InputLayer(input_shape=(28, 28), name="input"),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation="softmax", name="output"),
]
)
# Print model architecture
model.summary()
# Compile model with optimizer
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.save("./models/saved_model")
return
# Define a customized Metric function
from neural_compressor.metric import BaseMetric
class MyMetric(BaseMetric):
def __init__(self, *args):
self.pred_list = []
self.label_list = []
self.samples = 0
def METHOD_NAME(self, predict, label):
self.pred_list.extend(np.argmax(predict, axis=1))
self.label_list.extend(label)
self.samples += len(label)
def reset(self):
self.pred_list = []
self.label_list = []
self.samples = 0
def result(self):
correct_num = np.sum(np.array(self.pred_list) == np.array(self.label_list))
return correct_num / self.samples
class MyMetric_keras(MyMetric):
def __init__(self, *args):
super(MyMetric_keras, self).__init__(*args)
class TestMixedPrecisionWithKerasModel(unittest.TestCase):
@classmethod
def setUpClass(self):
os.environ["FORCE_FP16"] = "1"
os.environ["FORCE_BF16"] = "1"
build_sequential_model()
@classmethod
def tearDownClass(self):
del os.environ["FORCE_FP16"]
del os.environ["FORCE_BF16"]
shutil.rmtree("./models", ignore_errors=True)
shutil.rmtree("./nc_workspace", ignore_errors=True)
def test_mixed_precision_with_keras_model(self):
# use dummy dataset for UT test
dataset = Datasets("tensorflow")["dummy"](shape=(10, 28, 28), low=0.0, high=1.0, label=True)
dataloader = DataLoader(framework="tensorflow", dataset=dataset)
config = MixedPrecisionConfig()
q_model = mix_precision.fit(
model="./models/saved_model", conf=config, eval_dataloader=dataloader, eval_metric=MyMetric()
)
# Optional, run quantized model
import tensorflow as tf
with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.import_graph_def(q_model.graph_def, name="")
out = sess.run(["Identity:0"], feed_dict={"input:0": dataset.dataset})
print("Inference is done.")
found_cast = False
for i in q_model.graph_def.node:
if i.op == "Cast":
found_cast = True
break
self.assertEqual(found_cast, True)
def test_mixed_precision_with_keras_adaptor(self):
# use dummy dataset for UT test
dataset = Datasets("tensorflow")["dummy"](shape=(10, 28, 28), low=0.0, high=1.0, label=True)
dataloader = DataLoader(framework="tensorflow", dataset=dataset)
# add backend='itex' to run on keras adaptor
config = MixedPrecisionConfig(backend="itex")
bf16_model = mix_precision.fit(
model="./models/saved_model", config=config, eval_dataloader=dataloader, eval_metric=MyMetric_keras()
)
bf16_policy = keras.mixed_precision.Policy("mixed_bfloat16")
# bf16_model.model is an obj of tf.keras.Model
model_policy = bf16_model.model.dtype_policy
conv2d_layer_policy = bf16_model.model.get_layer("conv2d").dtype_policy
self.assertEqual(model_policy.compute_dtype, bf16_policy.compute_dtype)
self.assertEqual(conv2d_layer_policy.compute_dtype, bf16_policy.compute_dtype)
if __name__ == "__main__":
unittest.main()
|
1,115 |
get pg lossless profiles
|
import logging
import pytest
import re
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.utilities import wait_until
from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert
from tests.generic_config_updater.gu_utils import apply_patch, expect_op_success
from tests.generic_config_updater.gu_utils import generate_tmpfile, delete_tmpfile
from tests.generic_config_updater.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload
pytestmark = [
pytest.mark.topology('any'),
]
logger = logging.getLogger(__name__)
READ_APPL_DB_TIMEOUT = 480
READ_APPL_DB_INTERVAL = 20
@pytest.fixture(scope="module")
def ensure_dut_readiness(duthost):
"""
Setup/teardown fixture for dynamic threshold config update test
Args:
duthost: DUT host object
"""
verify_orchagent_running_or_assert(duthost)
create_checkpoint(duthost)
yield
try:
verify_orchagent_running_or_assert(duthost)
logger.info("Rolled back to original checkpoint")
rollback_or_reload(duthost)
finally:
delete_checkpoint(duthost)
def ensure_application_of_updated_config(duthost, value, pg_lossless_profiles):
"""
Ensures application of the JSON patch config update by verifying dynamic threshold value presence in DB
Args:
duthost: DUT host object
value: expected value of dynamic threshold
pg_lossless_profiles: all pg_lossless buffer profiles stored on the device
"""
def _confirm_value_in_appl_db_and_asic_db():
for pg_lossless_profile in pg_lossless_profiles:
# Retrieve dynamic_th from APPL_DB
dynamic_th_in_appl_db = duthost.shell("sonic-db-cli APPL_DB hget BUFFER_PROFILE_"
"TABLE:{} dynamic_th".format(pg_lossless_profile))["stdout"]
if dynamic_th_in_appl_db != value:
return False
# Retrieve dynamic_th from ASIC_DB
ingress_lossless_pool_oid = duthost.shell("sonic-db-cli COUNTERS_DB hget COUNTERS_BUFFER_POOL_NAME_MAP "
"ingress_lossless_pool")["stdout"]
buffer_pool_keys = duthost.shell("redis-cli -n 1 KEYS ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE:"
"oid*")["stdout_lines"]
for buffer_pool in buffer_pool_keys:
pool_oid = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_"
"POOL_ID".format(buffer_pool))["stdout"]
if pool_oid == ingress_lossless_pool_oid:
xoff_val = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_"
"XOFF_TH".format(buffer_pool))["stdout"]
dynamic_th_in_asic_db = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_"
"ATTR_SHARED_DYNAMIC_TH".format(buffer_pool))["stdout"]
# Dynamic threshold values are a mismatch for pg_lossless profiles
if dynamic_th_in_asic_db != value and len(xoff_val) > 0:
return False
return True
pytest_assert(
wait_until(READ_APPL_DB_TIMEOUT, READ_APPL_DB_INTERVAL, 0, _confirm_value_in_appl_db_and_asic_db),
"ASIC_DB or APPL_DB does not properly reflect new dynamic threshold expected value: {}".format(value)
)
def METHOD_NAME(duthost):
"""
Retrieves all pg_lossless buffer profiles that are present on the device. Ex. pg_lossless_100000_40m_profile
Args:
duthost: DUT host object
"""
pg_lossless_profiles_str = duthost.shell("redis-cli -n 0 KEYS *BUFFER_PROFILE_TABLE:pg_lossless*")["stdout_lines"]
pg_lossless_profiles_lst = []
for pg_lossless_profile_str in pg_lossless_profiles_str:
# Regex search for pg_lossless profiles
match = re.search(r"pg_lossless(.*)", pg_lossless_profile_str)
if match:
pg_lossless_profile = match.group()
else:
continue
pg_lossless_profiles_lst.append(pg_lossless_profile)
return pg_lossless_profiles_lst if len(pg_lossless_profiles_lst) > 0 else None
@pytest.mark.parametrize("operation", ["replace"])
def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation):
pg_lossless_profiles = METHOD_NAME(duthost)
pytest_require(pg_lossless_profiles, "DUT has no pg_lossless buffer profiles")
new_dynamic_th = "2"
json_patch = []
for pg_lossless_profile in pg_lossless_profiles:
individual_patch = {
"op": "{}".format(operation),
"path": "/BUFFER_PROFILE/{}/dynamic_th".format(pg_lossless_profile),
"value": new_dynamic_th
}
json_patch.append(individual_patch)
tmpfile = generate_tmpfile(duthost)
logger.info("tmpfile {} created for json patch of updating dynamic threshold and operation: {}"
.format(tmpfile, operation))
logger.info("value to be added to json patch: {}".format(new_dynamic_th))
try:
output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile)
expect_op_success(duthost, output)
ensure_application_of_updated_config(duthost, new_dynamic_th, pg_lossless_profiles)
logger.info("Config successfully updated and verified.")
finally:
delete_tmpfile(duthost, tmpfile)
|
1,116 |
get script
|
# -*- coding: UTF-8 -*-
#----------------------
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from time import time
try:
from thread import start_new_thread, allocate_lock
except ImportError:
from _thread import start_new_thread, allocate_lock
from time import sleep, time
import threading
try:
import cPickle as pickle
except:
import pickle
from MythTV import OrdDict
try:
import Queue
except ImportError:
import queue as Queue
import lxml
import lxml.html
import os
import sys
import stat
from builtins import input
BASEURL = 'https://www.mythtv.org/wiki'
def getScripts():
return Script.getAll()
def getPage(**kwargs):
url = "{0}?{1}".format(BASEURL,
'&'.join(['{0}={1}'.format(k,v) for k,v in list(kwargs.items())]))
return lxml.html.parse(urlopen(url)).getroot()
def getWhatLinksHere(page):
root = getPage(title='Special:WhatLinksHere',
limit='500', target=page)
links = []
for link in root.xpath("//ul[@id='mw-whatlinkshere-list']/li"):
links.append('_'.join(link.find('a').text.split(' ')))
return links
class Script( object ):
_cache = None
_queue = Queue.Queue()
_pool = []
_running = True
_valid = False
_xp_info = lxml.etree.XPath("//span[@id='script-info']/text()")
_xp_names = lxml.etree.XPath("//div[@id='bodyContent']/div/div[@style='background: #EFEFEF; border: 1px dashed black; padding: 5px 5px 5px 5px;']/p/b/text()")
_xp_code = lxml.etree.XPath("//div[@id='bodyContent']/div/div[@style='background: #EFEFEF; border: 1px dashed black; padding: 5px 5px 5px 5px;']/pre/text()")
_xp_cat = lxml.etree.XPath("//div[@id='mw-normal-catlinks']/ul/li/a/text()")
@classmethod
def getAll(cls, refresh=False):
cls._running = True
scripts = []
try:
for link in getWhatLinksHere('Template:Script_info'):
scripts.append(cls(link))
cls._wait()
except KeyboardInterrupt:
cls._running = False
return []
cls._dumpCache()
scripts = [s for s in scripts if s.isValid()]
#scripts.sort() ### does not work in python3
scripts.sort(key=lambda s: s.url)
return scripts
@classmethod
def processQueue(cls):
while cls._running:
try:
script = cls._queue.get(False, 0.1)
except Queue.Empty:
sleep(0.1)
continue
script.processPage()
cls._queue.task_done()
@classmethod
def _wait(cls):
cls._queue.join()
cls._running = False
cls._pool = []
def __cmp__(self, other):
if self.info.name < other.info.name:
return -1
elif self.info.name > other.info.name:
return 1
else:
return 0
def __repr__(self):
return '<Script {0} at {1}>'.format(self.url, hex(id(self)))
def __init__(self, url, refresh=False, pool=4):
self.url = url
if self._cache is None:
self._loadCache(refresh)
if (url in self._cache) and not refresh:
self._fromCache(url)
return
self._queue.put(self)
if len(self._pool) == 0:
while pool:
pool -= 1
t = threading.Thread(target=self.processQueue)
t.start()
self._pool.append(t)
def isValid(self): return self._valid
def processPage(self):
try:
etree = getPage(title=self.url)
self.getInfo(etree)
self.METHOD_NAME(etree)
self.getCategory(etree)
self._toCache(self.url)
self._valid = True
except:
pass
def getInfo(self, etree):
text = self._xp_info(etree)[0].strip().split('\n')
for i in reversed(list(range(len(text)))):
if '=' not in text[i]:
text[i-1] += text.pop(i)
self.info = OrdDict([a.split('=') for a in text])
if self.info.webpage == 'none':
self.info.webpage = self.url
def METHOD_NAME(self, etree):
if 'http://' in self.info.file:
fp = urlopen(self.info.file)
code = fp.read()
fp.close()
name = self.info.file.rsplit('/',1)[1].split('?',1)[0]
self.code = {name: code}
if self.info.name == 'unnamed':
self.info.name = name
else:
names = self._xp_names(etree)
code = self._xp_code(etree)
name = ''
size = 0
for i in list(range(len(code))):
names[i] = str(names[i])
if size < len(code[i]):
size = len(code[i])
name = names[i]
code[i] = code[i].lstrip()
code[i] = code[i].replace('\xa0',' ')
self.code = dict(list(zip(names,code)))
if self.info.name == 'unnamed':
self.info.name = name
def getCategory(self, etree):
self.category = [str(c) for c in self._xp_cat(etree)]
def saveScript(self, name, path):
fd = open(path,'w')
fd.write(self.code[name])
fd.close()
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def _toCache(self, title):
self._cache[title] = OrdDict(())
self._cache[title].info = self.info
self._cache[title].code = self.code
self._cache[title].category = self.category
self._cache[title].time = time()
def _fromCache(self, title):
if time()-self._cache[title].time > 1800:
# cache has expired
return False
self.info = self._cache[title].info
self.code = self._cache[title].code
self.category = self._cache[title].category
self._valid = True
return True
@classmethod
def _loadCache(cls, refresh):
if refresh:
cls._cache = {}
return
if (sys.version_info[0] == 2):
path = '/tmp/mythwikiscripts.pickle'
fmode = 'r'
else:
path = '/tmp/mythwikiscripts.pickle3'
fmode = 'rb'
if os.access(path, os.F_OK):
try:
fd = open(path, fmode)
cls._cache = pickle.load(fd)
fd.close()
except:
os.remove(path)
cls._cache = {}
else:
cls._cache = {}
@classmethod
def _dumpCache(cls):
### XXX ToDo allign pickle protocol versions
try:
if (sys.version_info[0] == 2):
path = '/tmp/mythwikiscripts.pickle'
fd = open(path,'w')
cls._cache = pickle.dump(cls._cache,fd)
else:
path = '/tmp/mythwikiscripts.pickle3'
fd = open(path,'wb')
cls._cache = pickle.dump(cls._cache,fd, protocol=1)
fd.close()
except:
os.remove(path)
raise
|
1,117 |
set up
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Unit tests for the split_fasta function.
"""
import unittest as ut
import os
import anvio
from anvio.errors import FilesNPathsError
from anvio.utils import split_fasta
from anvio.fastalib import ReadFasta
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2019, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Ryan Moore"
__email__ = "[email protected]"
class SplitFastaTestCase(ut.TestCase):
def METHOD_NAME(self):
self.this_dir = os.path.dirname(os.path.realpath(__file__))
self.test_files = os.path.join(self.this_dir, "test_files")
self.not_fasta = os.path.join(self.test_files, "not_a_fasta.txt")
self.empty_fasta = os.path.join(self.test_files, "empty.fasta")
self.single_seq_fasta = os.path.join(self.test_files, "one_sequence.fasta")
self.five_seq_fasta = os.path.join(self.test_files, "five_sequences.fasta")
def test_non_existent_file_raises_error(self):
self.assertRaises(FilesNPathsError, split_fasta, "arstoien.fasta")
def test_non_fasta_file_raises_error(self):
self.assertRaises(FilesNPathsError, split_fasta, self.not_fasta)
def test_empty_fasta_file_raises_error(self):
self.assertRaises(FilesNPathsError, split_fasta, self.empty_fasta)
def test_single_fasta_gives_one_split(self):
out_files = split_fasta(self.single_seq_fasta)
expected_out_file = os.path.join(self.test_files, f'{self.single_seq_fasta}.0')
self.assertEqual(out_files, [expected_out_file])
self.assertTrue(os.path.exists(expected_out_file))
fasta = ReadFasta(expected_out_file)
self.assertEqual(fasta.ids, ['seq1 apple'])
self.assertEqual(fasta.sequences, ['AA'])
fasta.close()
os.remove(expected_out_file)
def test_fasta_splitting(self):
parts = 2
expected_out_files = [os.path.join(self.test_files, f'{self.five_seq_fasta}.{i}') for i in range(parts)]
out_files = split_fasta(self.five_seq_fasta, parts=parts)
self.assertEqual(out_files, expected_out_files)
fasta = ReadFasta(out_files[0])
self.assertEqual(fasta.ids, ['seq1 apple', 'seq2 banana'])
self.assertEqual(fasta.sequences, ['AA', 'ACAC'])
fasta.close()
fasta = ReadFasta(out_files[1])
self.assertEqual(fasta.ids, ['seq3 cat', 'seq4 dog', 'seq5 extra'])
self.assertEqual(fasta.sequences, ['ACTACT', 'ACTGACTG', 'ACTGAACTGA'])
fasta.close()
for f in out_files:
os.remove(f)
def test_more_parts_than_sequences(self):
parts = 10
num_sequences = 5
expected_out_files = [os.path.join(self.test_files, f'{self.five_seq_fasta}.{i}') for i in range(num_sequences)]
out_files = split_fasta(self.five_seq_fasta, parts=parts)
self.assertEqual(out_files, expected_out_files)
for f in out_files:
os.remove(f)
def test_custom_prefix(self):
parts = 1
file_name_prefix = 'silly'
out_files = split_fasta(self.five_seq_fasta, parts=parts, file_name_prefix=file_name_prefix, output_dir=self.this_dir)
expected_out_files = [os.path.join(self.this_dir, 'silly.0')]
self.assertEqual(out_files, expected_out_files)
for f in out_files:
os.remove(f)
def test_shuffle_mode(self):
parts = 2
out_files = split_fasta(self.five_seq_fasta, parts=parts, shuffle=True)
fasta = ReadFasta(out_files[0])
self.assertEqual(fasta.ids, ['seq1 apple', 'seq3 cat', 'seq5 extra'])
self.assertEqual(fasta.sequences, ['AA', 'ACTACT', 'ACTGAACTGA'])
fasta.close()
fasta = ReadFasta(out_files[1])
self.assertEqual(fasta.ids, ['seq2 banana', 'seq4 dog'])
self.assertEqual(fasta.sequences, ['ACAC', 'ACTGACTG'])
fasta.close()
for f in out_files:
os.remove(f)
|
1,118 |
test create person with departments
|
# -*- coding: UTF-8 -*-
from tests.base import ApiDBTestCase
from zou.app.models.department import Department
from zou.app.models.person import Person
from zou.app.utils import fields
from operator import itemgetter
class PersonTestCase(ApiDBTestCase):
def setUp(self):
super(PersonTestCase, self).setUp()
self.generate_fixture_person(
first_name="Ema",
last_name="Doe",
desktop_login="ema.doe",
email="[email protected]",
)
self.generate_fixture_person(
first_name="Jérémy",
last_name="Utêfœuit",
desktop_login="jeremy.utf8",
email="[email protected]",
)
self.generate_fixture_person()
def test_repr(self):
self.assertEqual(str(self.person), "<Person John Doe>")
self.person.first_name = "Léon"
self.assertEqual(str(self.person), "<Person Léon Doe>")
def test_get_persons(self):
persons = self.get("data/persons")
self.assertEqual(len(persons), 4)
self.assertEqual(persons[0]["type"], "Person")
def test_present(self):
person = self.get_first("data/persons")
person_model = Person.get(person["id"])
person_dict = person_model.present_minimal()
self.assertEqual(person_dict["departments"], [])
person_dict = person_model.present_minimal(relations=True)
self.assertEqual(person_dict["departments"], [])
def test_get_person(self):
person = self.get_first("data/persons")
person_again = self.get("data/persons/%s" % person["id"])
self.assertEqual(person, person_again)
person_with_relations = self.get(
"data/persons/%s?relations=true" % person["id"]
)
self.assertTrue("departments" in person_with_relations)
self.get_404("data/persons/%s" % fields.gen_uuid())
def test_create_person(self):
data = {
"first_name": "John2",
"last_name": "Doe",
"email": "[email protected]",
}
self.person = self.post("data/persons/new", data)
self.assertIsNotNone(self.person["id"])
persons = self.get("data/persons")
self.assertEqual(len(persons), 5)
def test_create_too_much_person(self):
from zou.app import config
config.USER_LIMIT = 4
data = {
"first_name": "John3",
"last_name": "Doe",
"email": "[email protected]",
}
resp = self.post("data/persons/new", data, 400)
self.assertEqual(resp["limit"], 4)
config.USER_LIMIT = 100
def test_create_person_with_no_data(self):
data = {}
self.person = self.post("data/persons/new", data, 400)
def test_create_person_with_wrong_data(self):
data = {
"name": "John Doe",
"first_name": "John",
"last_name": "Doe",
}
self.person = self.post("data/persons/new", data, 400)
def METHOD_NAME(self):
self.generate_fixture_department()
departments = [
str(department.id) for department in Department.query.all()
]
data = {
"first_name": "John2",
"last_name": "Doe",
"email": "[email protected]",
"departments": departments,
}
person = self.post("data/persons/new", data)
self.assertIsNotNone(person["id"])
self.assertEqual(
set(person["departments"]),
set(departments),
)
created_person = Person.get(person["id"])
self.assertEqual(
set(
str(department.id) for department in created_person.departments
),
set(departments),
)
def test_update_person(self):
person = self.get_first("data/persons")
data = {
"first_name": "Johnny",
}
self.put("data/persons/%s" % person["id"], data)
person_again = self.get("data/persons/%s" % person["id"])
self.assertEqual(data["first_name"], person_again["first_name"])
self.put_404("data/persons/%s" % fields.gen_uuid(), data)
def test_update_person_with_departments(self):
self.generate_fixture_department()
person = self.get_first("data/persons")
departments = [
str(department.id) for department in Department.query.all()
]
data = {
"first_name": "Johnny",
"departments": departments,
}
self.put("data/persons/%s" % person["id"], data)
person_again = Person.get(person["id"])
self.assertEqual(
set(str(department.id) for department in person_again.departments),
set(departments),
)
def test_set_active_when_too_much_person(self):
from zou.app import config
config.USER_LIMIT = 3
persons = self.get("data/persons")
person = [
person for person in persons if person["id"] != self.user["id"]
][0]
data = {"active": False}
self.put("data/persons/%s" % person["id"], data, 200)
data = {"active": True}
self.put("data/persons/%s" % person["id"], data, 400)
config.USER_LIMIT = 100
data = {"active": True}
self.put("data/persons/%s" % person["id"], data)
def test_delete_person(self):
persons = self.get("data/persons")
self.assertEqual(len(persons), 4)
persons = sorted(persons, key=itemgetter("email"))
person = persons[1]
self.delete("data/persons/%s" % person["id"])
persons = self.get("data/persons")
self.assertEqual(len(persons), 3)
self.delete_404("data/persons/%s" % fields.gen_uuid())
persons = self.get("data/persons")
self.assertEqual(len(persons), 3)
def test_force_delete(self):
self.generate_fixture_task_status_todo()
self.generate_shot_suite()
self.generate_assigned_task()
self.generate_fixture_comment()
self.person_id = str(self.person.id)
self.get("data/persons/%s" % self.person_id)
self.delete("data/persons/%s" % self.person_id, 400)
self.delete("data/persons/%s?force=true" % self.person_id)
self.get("data/persons/%s" % self.person_id, 404)
|
1,119 |
teller1
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
from functools import partial
import random
import numpy as np
class TestTransposeOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
x86_places = [
Place(TargetType.X86, PrecisionType.FP32, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=x86_places)
arm_places = [
Place(TargetType.ARM, PrecisionType.FP32, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=arm_places)
# opencl having diffs , big diff
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
# self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP16,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"kunlunxin_xtcl", "nvidia_tensorrt", "intel_openvino"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
N = draw(st.integers(min_value=1, max_value=4))
C = draw(st.integers(min_value=1, max_value=17))
H = draw(st.integers(min_value=1, max_value=17))
W = draw(st.integers(min_value=1, max_value=17))
in_shape = draw(st.sampled_from([[N, C, H, W], []]))
# tranpose only support float32
# so we only feed input np.float
in_dtype = draw(st.sampled_from([np.float32]))
use_mkldnn_data = False
target = self.get_target()
if (target == "X86"):
use_mkldnn_data = True
axis_int32_data = draw(
st.lists(
st.integers(
min_value=0, max_value=3), min_size=3, max_size=4))
if (len(axis_int32_data) == 3):
assume(
sorted(axis_int32_data) == [0, 1, 2] and
axis_int32_data != [0, 1, 2])
in_shape = draw(st.sampled_from([[C, H, W]]))
elif (len(axis_int32_data) == 4):
assume(
sorted(axis_int32_data) == [0, 1, 2, 3] and
axis_int32_data != [0, 1, 2, 3])
if in_shape == []:
axis_int32_data = []
def generate_X_data():
return np.random.normal(0.0, 5.0, in_shape).astype(in_dtype)
if (target == "Metal" and len(axis_int32_data) == 4):
for i in range(4):
for j in range(4):
if i != j:
assume(in_shape[axis_int32_data.index(i)] *
(in_shape[axis_int32_data.index(j)] + 3
) / 4 <= 2048)
transpose_op = OpConfig(
type="transpose",
inputs={"X": ["X_data"]},
outputs={"Out": ["output_data"]},
attrs={
"axis": axis_int32_data,
"data_format": "AnyLayout",
"use_mkldnn": use_mkldnn_data,
})
program_config = ProgramConfig(
ops=[transpose_op],
weights={},
inputs={
"X_data": TensorConfig(data_gen=partial(generate_X_data)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
target_str = self.get_target()
if target_str == "Metal":
atol, rtol = 5e-4, 5e-4
return self.get_predictor_configs(), ["transpose"], (atol, rtol)
def add_ignore_pass_case(self):
def METHOD_NAME(program_config, predictor_config):
x_shape = list(program_config.inputs["X_data"].shape)
if predictor_config.target() == TargetType.Metal:
return True
self.add_ignore_check_case(
METHOD_NAME, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff on metal. We need to fix it as soon as possible."
)
def _teller2(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_shape = program_config.inputs["X_data"].shape
axis = program_config.ops[0].attrs["axis"]
if len(in_shape) == 1 or axis[0] != 0:
return True
self.add_ignore_check_case(
_teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' or 'axis[0] != 0' on nvidia_tensorrt."
)
def _teller3(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["X_data"].shape)
if target_type not in [
TargetType.ARM, TargetType.Host, TargetType.X86
]:
if len(in_x_shape) == 0:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host/X86/Metal/OpenCL) now.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
1,120 |
format metadata
|
import os
import shlex
import subprocess
from pathlib import Path
import click
import requests
import yaml
import prefect_server
from prefect_server import config
HASURA_DIR = Path(prefect_server.__file__).parents[2] / "services" / "hasura"
@click.group()
def hasura():
"""
Commands for working with Hasura
"""
def apply_hasura_metadata(endpoint=None, metadata_path=None, verbose=True):
if endpoint is None:
endpoint = f"http://{config.hasura.host}:{config.hasura.port}"
if metadata_path is None:
metadata_path = HASURA_DIR / "migrations" / "metadata.yaml"
endpoint = os.path.join(endpoint, "v1", "query")
with open(metadata_path, "r") as f:
metadata = yaml.load(f, Loader=yaml.SafeLoader)
response = requests.post(
endpoint,
json={"type": "replace_metadata", "args": {"metadata": metadata}},
)
try:
response.raise_for_status()
if verbose:
click.secho(f"Applied Hasura metadata from {metadata_path}")
except Exception as exc:
# echo the error
click.secho(
f"\nError applying Hasura metadata from {metadata_path}",
fg="red",
bold=True,
)
raise click.ClickException(f"Failed to apply Hasura metadata! Error: {exc}")
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
@click.option(
"--metadata-path",
"-m",
help="The metadata path",
default=HASURA_DIR / "migrations" / "metadata.yaml",
show_default=False,
)
def apply_metadata(endpoint, metadata_path):
"""
Applies Hasura metadata. Overwrites any existing schema.
"""
response = None
try:
click.secho("\nApplying Hasura metadata...")
apply_hasura_metadata(endpoint=endpoint, metadata_path=metadata_path)
click.secho("\nFinished!", fg="green")
except Exception as exc:
click.secho("\nCould not apply metadata!", bg="red", bold=True)
if response is not None:
raise click.ClickException(response.content)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
@click.option(
"--metadata-path",
"-m",
help="The metadata path",
default=HASURA_DIR / "migrations" / "metadata.yaml",
show_default=False,
)
def export_metadata(endpoint, metadata_path):
"""
Exports Hasura metadata. Overwrites any existing schema.
"""
response = None
try:
click.secho("\nExporting Hasura metadata...")
endpoint = os.path.join(endpoint, "v1", "query")
response = requests.post(
endpoint,
json={"type": "export_metadata", "args": {}},
)
response.raise_for_status()
with open(metadata_path, "w") as f:
yaml.dump(response.json(), f, default_flow_style=False)
click.secho("\nFinished!", fg="green")
except Exception as exc:
click.secho("\nCould not export metadata!", bg="red", bold=True)
if response is not None:
raise click.ClickException(response.content)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
def drop_inconsistent_metadata(endpoint):
"""
Drops inconsistent metadata from Hasura, including any tables or columns that are referenced but not found in the database.
"""
response = None
try:
click.secho("\nDropping inconsistent Hasura metadata...")
endpoint = os.path.join(endpoint, "v1", "query")
response = requests.post(
endpoint,
json={"type": "drop_inconsistent_metadata", "args": {}},
)
response.raise_for_status()
click.secho("\nFinished!", fg="green")
except Exception as exc:
click.secho("\nCould not drop inconsistent metadata!", bg="red", bold=True)
if response is not None:
raise click.ClickException(response.content)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
def clear_metadata(endpoint):
"""
Clear Hasura metadata. Overwrites any existing schema.
"""
response = None
try:
click.secho("\nClearing Hasura metadata...")
endpoint = os.path.join(endpoint, "v1", "query")
response = requests.post(
endpoint,
json={"type": "clear_metadata", "args": {}},
)
response.raise_for_status()
click.secho("\nFinished!", fg="green")
except Exception as exc:
click.secho("\nCould not clear metadata!", bg="red", bold=True)
if response is not None:
raise click.ClickException(response.content)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
def reload_metadata(endpoint):
"""
Reloads Hasura metadata. Helpful if the Postgres schema has changed.
"""
response = None
try:
click.secho("\nReloading Hasura metadata...")
endpoint = os.path.join(endpoint, "v1", "query")
response = requests.post(
endpoint,
json={"type": "reload_metadata", "args": {}},
)
response.raise_for_status()
click.secho("\nFinished!", fg="green")
except Exception as exc:
click.secho("\nCould not reload metadata!", bg="red", bold=True)
if response is not None:
raise click.ClickException(response.content)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--endpoint",
"-e",
help="The GraphQL Engine URL",
default=f"http://{config.hasura.host}:{config.hasura.port}",
)
def console(endpoint):
"""
Opens the Hasura console
Note: requires installing the Hasura CLI. See https://docs.hasura.io/graphql/manual/hasura-cli/install-hasura-cli.html
"""
try:
cmd = shlex.split(f"hasura console --endpoint {endpoint} --skip-update-check")
subprocess.check_output(cmd, cwd=HASURA_DIR)
except Exception as exc:
click.secho("\nCould not open console!", bg="red", bold=True)
raise click.ClickException(exc)
@hasura.command()
@click.option(
"--metadata-path",
"-m",
help="The metadata path",
default=HASURA_DIR / "migrations" / "metadata.yaml",
show_default=False,
)
def METHOD_NAME(metadata_path):
"""
Sorts and reformats the Hasura metadata
"""
with open(metadata_path, "r") as f:
data = yaml.safe_load(f)
with open(metadata_path, "w") as g:
yaml.dump(
prefect_server.utilities.tests.yaml_sorter(data),
g,
default_flow_style=False,
)
click.secho("Metadata file sorted.", fg="green")
|
1,121 |
colorize
|
#!/usr/bin/env python3
# Copyright (c) 2017 Kurt Jacobson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from copy import copy
from logging import Formatter
PREFIX = '\033['
SUFFIX = '\033[0m'
COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'bgred': 41,
'bggrey': 100
}
MAPPING = {
'VERBOSE': 'bggrey',
'DEBUG': 'blue',
'INFO': 'cyan',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bgred',
}
# Returns `text` warped in ASCII escape codes to produce `color`
def METHOD_NAME(text, color=None):
seq = COLORS.get(color, 37) # default to white
return '{0}{1}m{2}{3}'.format(PREFIX, seq, text, SUFFIX)
# Matches only the first `color<text>` occurrence
# ^(.*?)<([^)]+)>
# Matches all `color<text>` occurrences, both take the same number of steps
# ([^<\s]+)<([^>]+)>
# (\w+)<([^>]+)>
RE = re.compile(r'(\w+)<([^>]+)>')
class ColoredFormatter(Formatter):
def __init__(self, pattern):
Formatter.__init__(self, pattern)
# Override the Formatter's format method to add ASCII colors
# to the levelname and any marked words in the log message.
def format(self, record):
colored_record = copy(record)
# Add colors to levelname
levelname = colored_record.levelname
color = MAPPING.get(levelname, 'white')
colored_record.levelname = METHOD_NAME(levelname, color)
# Add colors to tagged message text
msg = colored_record.getMessage()
plain_msg, color_msg = self.color_words(msg)
record.msg = plain_msg
colored_record.msg = color_msg
return Formatter.format(self, colored_record)
# Replace `color<message>` in the log message with ASCII codes to colorize
# the word or phrase in the terminal log handler. Also return a cleaned
# version of the message with the tags removed for use by the file handler.
def color_words(self, raw_msg):
plain_msg = color_msg = raw_msg
if '<' in raw_msg: # If no tag don't try to match
iterator = RE.finditer(raw_msg)
if iterator:
for match in iterator:
group = match.group()
color = match.group(1)
word = match.group(2)
# Could be optimized, but will rarely have more than one match
color_msg = color_msg.replace(group, METHOD_NAME(word, color))
plain_msg = plain_msg.replace(group, word)
return plain_msg, color_msg
# ----------------------- E X A M P L E -----------------------
if __name__ == '__main__':
import logging
# Create logger
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# Add console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
cf = ColoredFormatter("[%(name)s][%(levelname)s] %(message)s (%(filename)s:%(lineno)d)")
ch.setFormatter(cf)
log.addHandler(ch)
# Add file handler
fh = logging.FileHandler('demo.log')
fh.setLevel(logging.DEBUG)
ff = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(ff)
log.addHandler(fh)
# Log some stuff
log.debug("Logging demo has green<STARTED>")
log.info("Logging to yellow<demo.log> in the script dir")
log.warning("This is my last warning, red<take heed>")
log.error("This is an error")
log.critical("He's dead, Jim")
# Example exception logging
try:
print(False + "True")
except Exception as e:
log.debug('That did not work!', exc_info=e)
|
1,122 |
list for each reverse
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
"""
Linked Lists
------------
The ``drgn.helpers.linux.list`` module provides helpers for working with the
doubly-linked list implementations (``struct list_head`` and ``struct
hlist_head``) in :linux:`include/linux/list.h`.
"""
from typing import Iterator, Union
from drgn import NULL, Object, Type, container_of
from drgn.helpers import ValidationError
__all__ = (
"hlist_empty",
"hlist_for_each",
"hlist_for_each_entry",
"list_count_nodes",
"list_empty",
"list_first_entry",
"list_first_entry_or_null",
"list_for_each",
"list_for_each_entry",
"list_for_each_entry_reverse",
"list_for_each_reverse",
"list_is_singular",
"list_last_entry",
"list_next_entry",
"list_prev_entry",
"validate_list",
"validate_list_for_each",
"validate_list_for_each_entry",
)
def list_empty(head: Object) -> bool:
"""
Return whether a list is empty.
:param head: ``struct list_head *``
"""
head = head.read_()
return head.next == head
def list_is_singular(head: Object) -> bool:
"""
Return whether a list has only one element.
:param head: ``struct list_head *``
"""
head = head.read_()
next = head.next
return next != head and next == head.prev
def list_count_nodes(head: Object) -> int:
"""
Return the number of nodes in a list.
:param head: ``struct list_head *``
"""
return sum(1 for _ in list_for_each(head))
def list_first_entry(head: Object, type: Union[str, Type], member: str) -> Object:
"""
Return the first entry in a list.
The list is assumed to be non-empty.
See also :func:`list_first_entry_or_null()`.
:param head: ``struct list_head *``
:param type: Entry type.
:param member: Name of list node member in entry type.
:return: ``type *``
"""
return container_of(head.next, type, member)
def list_first_entry_or_null(
head: Object, type: Union[str, Type], member: str
) -> Object:
"""
Return the first entry in a list or ``NULL`` if the list is empty.
See also :func:`list_first_entry()`.
:param head: ``struct list_head *``
:param type: Entry type.
:param member: Name of list node member in entry type.
:return: ``type *``
"""
head = head.read_()
pos = head.next.read_()
if pos == head:
return NULL(head.prog_, head.prog_.pointer_type(head.prog_.type(type)))
else:
return container_of(pos, type, member)
def list_last_entry(head: Object, type: Union[str, Type], member: str) -> Object:
"""
Return the last entry in a list.
The list is assumed to be non-empty.
:param head: ``struct list_head *``
:param type: Entry type.
:param member: Name of list node member in entry type.
:return: ``type *``
"""
return container_of(head.prev, type, member)
def list_next_entry(pos: Object, member: str) -> Object:
"""
Return the next entry in a list.
:param pos: ``type*``
:param member: Name of list node member in entry type.
:return: ``type *``
"""
return container_of(getattr(pos, member).next, pos.type_.type, member)
def list_prev_entry(pos: Object, member: str) -> Object:
"""
Return the previous entry in a list.
:param pos: ``type*``
:param member: Name of list node member in entry type.
:return: ``type *``
"""
return container_of(getattr(pos, member).prev, pos.type_.type, member)
def list_for_each(head: Object) -> Iterator[Object]:
"""
Iterate over all of the nodes in a list.
:param head: ``struct list_head *``
:return: Iterator of ``struct list_head *`` objects.
"""
head = head.read_()
pos = head.next.read_()
while pos != head:
yield pos
pos = pos.next.read_()
def METHOD_NAME(head: Object) -> Iterator[Object]:
"""
Iterate over all of the nodes in a list in reverse order.
:param head: ``struct list_head *``
:return: Iterator of ``struct list_head *`` objects.
"""
head = head.read_()
pos = head.prev.read_()
while pos != head:
yield pos
pos = pos.prev.read_()
def list_for_each_entry(
type: Union[str, Type], head: Object, member: str
) -> Iterator[Object]:
"""
Iterate over all of the entries in a list.
:param type: Entry type.
:param head: ``struct list_head *``
:param member: Name of list node member in entry type.
:return: Iterator of ``type *`` objects.
"""
type = head.prog_.type(type)
for pos in list_for_each(head):
yield container_of(pos, type, member)
def list_for_each_entry_reverse(
type: Union[str, Type], head: Object, member: str
) -> Iterator[Object]:
"""
Iterate over all of the entries in a list in reverse order.
:param type: Entry type.
:param head: ``struct list_head *``
:param member: Name of list node member in entry type.
:return: Iterator of ``type *`` objects.
"""
type = head.prog_.type(type)
for pos in METHOD_NAME(head):
yield container_of(pos, type, member)
def validate_list(head: Object) -> None:
"""
Validate that the ``next`` and ``prev`` pointers in a list are consistent.
:param head: ``struct list_head *``
:raises ValidationError: if the list is invalid
"""
for _ in validate_list_for_each(head):
pass
def validate_list_for_each(head: Object) -> Iterator[Object]:
"""
Like :func:`list_for_each()`, but validates the list like
:func:`validate_list()` while iterating.
:param head: ``struct list_head *``
:raises ValidationError: if the list is invalid
"""
head = head.read_()
pos = head.next.read_()
while pos != head:
yield pos
next = pos.next.read_()
next_prev = next.prev.read_()
if next_prev != pos:
raise ValidationError(
f"{pos.format_(dereference=False, symbolize=False)}"
f" next {next.format_(dereference=False, symbolize=False, type_name=False)}"
f" has prev {next_prev.format_(dereference=False, symbolize=False, type_name=False)}"
)
pos = next
def validate_list_for_each_entry(
type: Union[str, Type], head: Object, member: str
) -> Iterator[Object]:
"""
Like :func:`list_for_each_entry()`, but validates the list like
:func:`validate_list()` while iterating.
:param type: Entry type.
:param head: ``struct list_head *``
:param member: Name of list node member in entry type.
:raises ValidationError: if the list is invalid
"""
type = head.prog_.type(type)
for pos in validate_list_for_each(head):
yield container_of(pos, type, member)
def hlist_empty(head: Object) -> bool:
"""
Return whether a hash list is empty.
:param head: ``struct hlist_head *``
"""
return not head.first
def hlist_for_each(head: Object) -> Iterator[Object]:
"""
Iterate over all of the nodes in a hash list.
:param head: ``struct hlist_head *``
:return: Iterator of ``struct hlist_node *`` objects.
"""
pos = head.first.read_()
while pos:
yield pos
pos = pos.next.read_()
def hlist_for_each_entry(
type: Union[str, Type], head: Object, member: str
) -> Iterator[Object]:
"""
Iterate over all of the entries in a hash list.
:param type: Entry type.
:param head: ``struct hlist_head *``
:param member: Name of list node member in entry type.
:return: Iterator of ``type *`` objects.
"""
type = head.prog_.type(type)
for pos in hlist_for_each(head):
yield container_of(pos, type, member)
|
1,123 |
retrieve authorization token
|
from tapiriik.settings import WEB_ROOT, TRAINASONE_SERVER_URL, TRAINASONE_CLIENT_SECRET, TRAINASONE_CLIENT_ID
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, Lap
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.fit import FITIO
from tapiriik.services.tcx import TCXIO
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
from urllib.parse import urlencode
import calendar
import dateutil.parser
import requests
import os
import logging
import pytz
import re
import time
import json
logger = logging.getLogger(__name__)
class TrainAsONEService(ServiceBase):
# XXX need to normalise API paths - some url contains additional /api as direct to main server
ID = "trainasone"
DisplayName = "TrainAsONE"
DisplayAbbreviation = "TAO"
AuthenticationType = ServiceAuthenticationType.OAuth
AuthenticationNoFrame = True # iframe too small
LastUpload = None
SupportsHR = SupportsCadence = SupportsTemp = SupportsPower = True
SupportsActivityDeletion = False
SupportedActivities = ActivityType.List() # All
def UserUploadedActivityURL(self, uploadId):
raise NotImplementedError
# XXX need to include user id
# return TRAINASONE_SERVER_URL + "/activities/view?targetUserId=%s&activityId=%s" % uploadId
def WebInit(self):
params = {'scope':'SYNCHRONIZE_ACTIVITIES',
'client_id':TRAINASONE_CLIENT_ID,
'response_type':'code',
'redirect_uri':WEB_ROOT + reverse("oauth_return", kwargs={"service": "trainasone"})}
self.UserAuthorizationURL = TRAINASONE_SERVER_URL + "/oauth/authorise?" + urlencode(params)
def _apiHeaders(self, authorization):
return {"Authorization": "Bearer " + authorization["OAuthToken"]}
def METHOD_NAME(self, req, level):
code = req.GET.get("code")
params = {"grant_type": "authorization_code", "code": code, "client_id": TRAINASONE_CLIENT_ID, "client_secret": TRAINASONE_CLIENT_SECRET, "redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "trainasone"})}
response = requests.post(TRAINASONE_SERVER_URL + "/oauth/token", data=params)
if response.status_code != 200:
raise APIException("Invalid code")
data = response.json()
authorizationData = {"OAuthToken": data["access_token"]}
id_resp = requests.get(TRAINASONE_SERVER_URL + "/api/sync/user", headers=self._apiHeaders(authorizationData))
return (id_resp.json()["id"], authorizationData)
def RevokeAuthorization(self, serviceRecord):
resp = requests.post(TRAINASONE_SERVER_URL + "/api/oauth/revoke", data={"token": serviceRecord.Authorization["OAuthToken"]}, headers=self._apiHeaders(serviceRecord.Authorization))
if resp.status_code != 204 and resp.status_code != 200:
raise APIException("Unable to deauthorize TAO auth token, status " + str(resp.status_code) + " resp " + resp.text)
pass
def DownloadActivityList(self, serviceRecord, exhaustive=False):
allItems = []
if exhaustive:
pageUri = TRAINASONE_SERVER_URL + "/api/sync/activities?pageSize=200"
else:
pageUri = TRAINASONE_SERVER_URL + "/api/sync/activities"
while True:
response = requests.get(pageUri, headers=self._apiHeaders(serviceRecord.Authorization))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to retrieve activity list", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve activity list " + str(response) + " " + response.text)
data = response.json()
allItems += data["activities"]
if not exhaustive or "next" not in data or data["next"] is None:
break
pageUri = TRAINASONE_SERVER_URL + data["next"]
activities = []
exclusions = []
for act in allItems:
try:
activity = self._populateActivity(act)
except KeyError as e:
exclusions.append(APIExcludeActivity("Missing key in activity data " + str(e), activity_id=act["activityId"], user_exception=UserException(UserExceptionType.Corrupt)))
continue
logger.debug("\tActivity s/t " + str(activity.StartTime))
activity.ServiceData = {"id": act["activityId"]}
activities.append(activity)
return activities, exclusions
def _populateActivity(self, rawRecord):
''' Populate the 1st level of the activity object with all details required for UID from API data '''
activity = UploadedActivity()
activity.StartTime = dateutil.parser.parse(rawRecord["start"])
activity.EndTime = activity.StartTime + timedelta(seconds=rawRecord["duration"])
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=rawRecord["distance"])
activity.GPS = rawRecord["hasGps"]
activity.Stationary = not rawRecord["hasGps"]
activity.CalculateUID()
return activity
def DownloadActivity(self, serviceRecord, activity):
activity_id = activity.ServiceData["id"]
# Switch URL to /api/sync/activity/fit/ once FITIO.Parse() available
resp = requests.get(TRAINASONE_SERVER_URL + "/api/sync/activity/tcx/" + activity_id, headers=self._apiHeaders(serviceRecord.Authorization))
try:
TCXIO.Parse(resp.content, activity)
except ValueError as e:
raise APIExcludeActivity("TCX parse error " + str(e), user_exception=UserException(UserExceptionType.Corrupt))
return activity
def UploadActivity(self, serviceRecord, activity):
# Upload the workout as a .FIT file
uploaddata = FITIO.Dump(activity)
headers = self._apiHeaders(serviceRecord.Authorization)
headers['Content-Type'] = 'application/octet-stream'
resp = requests.post(TRAINASONE_SERVER_URL + "/api/sync/activity/fit", data=uploaddata, headers=headers)
if resp.status_code != 200:
raise APIException(
"Error uploading activity - " + str(resp.status_code),
block=False)
responseJson = resp.json()
if not responseJson["id"]:
raise APIException(
"Error uploading activity - " + resp.Message,
block=False)
activityId = responseJson["id"]
return activityId
def DeleteCachedData(self, serviceRecord):
pass # No cached data...
|
1,124 |
test clean mediapackages harvest jobs pending and
|
"""Tests for clean_mediapackages command."""
from io import StringIO
from unittest import mock
from django.core.management import call_command
from django.test import TestCase
from marsha.core.management.commands import clean_mediapackages
class CleanMediapackagesTest(TestCase):
"""Test clean_mediapackages command."""
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def test_clean_mediapackages_no_mediapackage(
self, mock_mediapackage_channels, mock_medialive_indexed_channels
):
"""Command should do nothing when there is no mediapackage to process."""
out = StringIO()
mock_mediapackage_channels.return_value = []
mock_medialive_indexed_channels.return_value = {}
call_command("clean_mediapackages", stdout=out)
self.assertEqual("", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def test_clean_mediapackages_related_medialive(
self, mock_mediapackage_channels, mock_medialive_indexed_channels
):
"""Command should do nothing when there is related medialives."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}, {"Id": "MP2"}]
mock_medialive_indexed_channels.return_value = {
"MP1": {"Id": "ML1", "Name": "MP1"},
"MP2": {"Id": "ML2", "Name": "MP2"},
}
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertIn("Processing mediapackage channel MP2", out.getvalue())
self.assertNotIn("Mediapackage channel MP1 deleted", out.getvalue())
self.assertNotIn("Mediapackage channel MP2 deleted", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "list_mediapackage_channel_harvest_jobs")
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def test_clean_mediapackages_harvest_job_pending(
self,
mock_mediapackage_channels,
mock_medialive_indexed_channels,
mock_harvest_jobs,
):
"""Command should do nothing when there is a pending harvest job."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}]
mock_medialive_indexed_channels.return_value = {}
mock_harvest_jobs.return_value = [{"Status": "PENDING"}]
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertNotIn("Mediapackage channel MP1 deleted", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "delete_mediapackage_channel")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channel_harvest_jobs")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
def test_clean_mediapackages_harvest_job_failed(
self,
mock_medialive_indexed_channels,
mock_mediapackage_channels,
mock_harvest_jobs,
mock_delete_mediapackage,
):
"""Command should delete channel when only a failed harvest job exists."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}]
mock_medialive_indexed_channels.return_value = {}
mock_harvest_jobs.return_value = [{"Status": "FAILED"}]
mock_delete_mediapackage.return_value = ["EP1", "EP2"]
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertIn("Mediapackage channel endpoint EP1 deleted", out.getvalue())
self.assertIn("Mediapackage channel endpoint EP2 deleted", out.getvalue())
self.assertIn("Mediapackage channel MP1 deleted", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "list_mediapackage_channel_harvest_jobs")
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def test_clean_mediapackages_harvest_jobs_failed_and_pending(
self,
mock_mediapackage_channels,
mock_medialive_indexed_channels,
mock_harvest_jobs,
):
"""Command should do nothing when failed and pending harvest job exists."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}]
mock_medialive_indexed_channels.return_value = {}
mock_harvest_jobs.return_value = [
{"Status": "FAILED"},
{"Status": "PENDING"},
]
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertNotIn("Mediapackage channel MP1 deleted", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "list_mediapackage_channel_harvest_jobs")
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def METHOD_NAME(
self,
mock_mediapackage_channels,
mock_medialive_indexed_channels,
mock_harvest_jobs,
):
"""Command should do nothing when pending and failed harvest job exists."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}]
mock_medialive_indexed_channels.return_value = {}
mock_harvest_jobs.return_value = [
{"Status": "PENDING"},
{"Status": "FAILED"},
]
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertNotIn("Mediapackage channel MP1 deleted", out.getvalue())
out.close()
@mock.patch.object(clean_mediapackages, "delete_mediapackage_channel")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channel_harvest_jobs")
@mock.patch.object(clean_mediapackages, "list_indexed_medialive_channels")
@mock.patch.object(clean_mediapackages, "list_mediapackage_channels")
def test_clean_mediapackages_no_harvest_job(
self,
mock_mediapackage_channels,
mock_medialive_indexed_channels,
mock_harvest_jobs,
mock_delete_mediapackage,
):
"""Command should delete channel when no harvest job exists."""
out = StringIO()
mock_mediapackage_channels.return_value = [{"Id": "MP1"}]
mock_medialive_indexed_channels.return_value = {}
mock_harvest_jobs.return_value = []
mock_delete_mediapackage.return_value = ["EP1", "EP2"]
call_command("clean_mediapackages", stdout=out)
self.assertIn("Processing mediapackage channel MP1", out.getvalue())
self.assertIn("Mediapackage channel endpoint EP1 deleted", out.getvalue())
self.assertIn("Mediapackage channel endpoint EP2 deleted", out.getvalue())
self.assertIn("Mediapackage channel MP1 deleted", out.getvalue())
out.close()
|
1,125 |
timing
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 deepset GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Dict, Tuple
import numpy as np
from scipy.special import expit
from abc import abstractmethod
from copy import deepcopy
from functools import wraps
from time import perf_counter
from pipelines.schema import Document, Answer, Span
from pipelines.nodes.base import BaseComponent
class BaseReader(BaseComponent):
return_no_answers: bool
outgoing_edges = 1
query_count = 0
query_time = 0
@abstractmethod
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
pass
@abstractmethod
def predict_batch(self, query_doc_list: List[dict], top_k: Optional[int] = None, batch_size: Optional[int] = None):
pass
@staticmethod
def _calc_no_answer(
no_ans_gaps: Sequence[float], best_score_answer: float, use_confidence_scores: bool = True
) -> Tuple[Answer, float]:
# "no answer" scores and positive answers scores are difficult to compare, because
# + a positive answer score is related to one specific document
# - a "no answer" score is related to all input documents
# Thus we compute the "no answer" score relative to the best possible answer and adjust it by
# the most significant difference between scores.
# Most significant difference: a model switching from predicting an answer to "no answer" (or vice versa).
# No_ans_gap is a list of this most significant difference per document
no_ans_gap_array = np.array(no_ans_gaps)
max_no_ans_gap = np.max(no_ans_gap_array)
# all passages "no answer" as top score
if np.sum(no_ans_gap_array < 0) == len(no_ans_gap_array):
no_ans_score = (
best_score_answer - max_no_ans_gap
) # max_no_ans_gap is negative, so it increases best pos score
else: # case: at least one passage predicts an answer (positive no_ans_gap)
no_ans_score = best_score_answer - max_no_ans_gap
no_ans_prediction = Answer(
answer="",
type="extractive",
score=float(expit(np.asarray(no_ans_score) / 8))
if use_confidence_scores
else no_ans_score, # just a pseudo prob for now or old score,
context=None,
offsets_in_context=[Span(start=0, end=0)],
offsets_in_document=[Span(start=0, end=0)],
document_id=None,
meta=None,
)
return no_ans_prediction, max_no_ans_gap
@staticmethod
def add_doc_meta_data_to_answer(documents: List[Document], answer):
# Add corresponding document_name and more meta data, if the answer contains the document_id
if answer.meta is None:
answer.meta = {}
# get meta from doc
meta_from_doc = {}
for doc in documents:
if doc.id == answer.document_id:
meta_from_doc = deepcopy(doc.meta)
break
# append to "own" meta
answer.meta.update(meta_from_doc)
return answer
def run(
self, query: str, documents: List[Document], top_k: Optional[int] = None, add_isolated_node_eval: bool = False
): # type: ignore
self.query_count += 1
if documents:
predict = self.METHOD_NAME(self.predict, "query_time")
results = predict(query=query, documents=documents, top_k=top_k)
else:
results = {"answers": []}
# Add corresponding document_name and more meta data, if an answer contains the document_id
results["answers"] = [
BaseReader.add_doc_meta_data_to_answer(documents=documents, answer=answer) for answer in results["answers"]
]
return results, "output_1"
def run_batch(self, query_doc_list: List[Dict], top_k: Optional[int] = None):
"""A unoptimized implementation of running Reader queries in batch"""
self.query_count += len(query_doc_list)
results = []
if query_doc_list:
for qd in query_doc_list:
q = qd["queries"]
docs = qd["docs"]
predict = self.METHOD_NAME(self.predict, "query_time")
result = predict(query=q, documents=docs, top_k=top_k)
results.append(result)
else:
results = [{"answers": [], "query": ""}]
return {"results": results}, "output_1"
def METHOD_NAME(self, fn, attr_name):
"""Wrapper method used to time functions."""
@wraps(fn)
def wrapper(*args, **kwargs):
if attr_name not in self.__dict__:
self.__dict__[attr_name] = 0
tic = perf_counter()
ret = fn(*args, **kwargs)
toc = perf_counter()
self.__dict__[attr_name] += toc - tic
return ret
return wrapper
def print_time(self):
print("Reader (Speed)")
print("---------------")
if not self.query_count:
print("No querying performed via Retriever.run()")
else:
print(f"Queries Performed: {self.query_count}")
print(f"Query time: {self.query_time}s")
print(f"{self.query_time / self.query_count} seconds per query")
|
1,126 |
test clean names strip underscores
|
import pandas as pd
import pytest
from janitor.errors import JanitorError
@pytest.mark.functions
def test_clean_names_method_chain(dataframe):
"""Tests clean_names default args in a method chain."""
df = dataframe.clean_names()
expected_columns = [
"a",
"bell_chart",
"decorated_elephant",
"animals@#$%^",
"cities",
]
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_clean_names_special_characters(dataframe):
"""Tests clean_names `remove_special` parameter."""
df = dataframe.clean_names(remove_special=True)
expected_columns = [
"a",
"bell_chart",
"decorated_elephant",
"animals",
"cities",
]
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_clean_names_uppercase(dataframe):
"""Tests clean_names `case_type` parameter = upper."""
df = dataframe.clean_names(case_type="upper", remove_special=True)
expected_columns = [
"A",
"BELL_CHART",
"DECORATED_ELEPHANT",
"ANIMALS",
"CITIES",
]
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_clean_names_original_columns(dataframe):
"""Tests clean_names `preserve_original_columns` parameter."""
df = dataframe.clean_names(preserve_original_columns=True)
expected_columns = [
"a",
"Bell__Chart",
"decorated-elephant",
"animals@#$%^",
"cities",
]
assert set(df.original_columns) == set(expected_columns)
@pytest.mark.functions
def test_multiindex_clean_names(multiindex_dataframe):
"""Tests clean_names default args on a multi-index dataframe."""
df = multiindex_dataframe.clean_names()
levels = [
["a", "bell_chart", "decorated_elephant"],
["b", "normal_distribution", "r_i_p_rhino"],
]
codes = [[0, 1, 2], [0, 1, 2]]
expected_columns = pd.MultiIndex(levels=levels, codes=codes)
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
@pytest.mark.parametrize(
"strip_underscores", ["both", True, "right", "r", "left", "l"]
)
def METHOD_NAME(
multiindex_dataframe,
strip_underscores,
):
"""Tests clean_names `strip_underscores` param on a multi-index
dataframe.
"""
if strip_underscores in ["right", "r"]:
df = multiindex_dataframe.rename(columns=lambda x: x + "_")
elif strip_underscores in ["left", "l"]:
df = multiindex_dataframe.rename(columns=lambda x: "_" + x)
elif strip_underscores in ["both", None]:
df = multiindex_dataframe.rename(columns=lambda x: "_" + x + "_")
else:
df = multiindex_dataframe
df = df.clean_names(strip_underscores=strip_underscores)
levels = [
["a", "bell_chart", "decorated_elephant"],
["b", "normal_distribution", "r_i_p_rhino"],
]
codes = [[1, 0, 2], [1, 0, 2]]
expected_columns = pd.MultiIndex(levels=levels, codes=codes)
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_clean_names_strip_accents():
"""Tests clean_names `strip_accents` parameter."""
df = pd.DataFrame({"João": [1, 2], "Лука́ся": [1, 2], "Käfer": [1, 2]})
df = df.clean_names(strip_accents=True)
expected_columns = ["joao", "лукася", "kafer"]
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_incorrect_strip_underscores(multiindex_dataframe):
"""Checks error is thrown when `strip_underscores` is given an
invalid argument.
"""
with pytest.raises(JanitorError):
multiindex_dataframe.clean_names(strip_underscores="hello")
@pytest.mark.functions
def test_clean_names_preserve_case_true(multiindex_dataframe):
"""Tests clean_names `case_type` parameter = preserve."""
df = multiindex_dataframe.clean_names(case_type="preserve")
levels = [
["a", "Bell_Chart", "decorated_elephant"],
["b", "Normal_Distribution", "r_i_p_rhino"],
]
codes = [[1, 0, 2], [1, 0, 2]]
expected_columns = pd.MultiIndex(levels=levels, codes=codes)
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_clean_names_camelcase_to_snake(dataframe):
"""Tests clean_names `case_type` parameter = snake."""
df = (
dataframe.select_columns(["a"])
.rename_column("a", "AColumnName")
.clean_names(case_type="snake")
)
assert list(df.columns) == ["a_column_name"]
@pytest.mark.functions
def test_clean_names_camelcase_to_snake_multi(dataframe):
"""Tests clean_names `case_type` parameter = snake on a multi-index
dataframe.
"""
df = (
dataframe.select_columns(["a", "Bell__Chart", "decorated-elephant"])
.rename_column("a", "snakesOnAPlane")
.rename_column("Bell__Chart", "SnakesOnAPlane2")
.rename_column("decorated-elephant", "snakes_on_a_plane3")
.clean_names(
case_type="snake", strip_underscores=True, remove_special=True
)
)
assert list(df.columns) == [
"snakes_on_a_plane",
"snakes_on_a_plane2",
"snakes_on_a_plane3",
]
@pytest.mark.functions
def test_clean_names_enforce_string(dataframe):
"""Tests clean_names `enforce_string` parameter."""
df = dataframe.rename(columns={"a": 1}).clean_names(enforce_string=True)
for c in df.columns:
assert isinstance(c, str)
@pytest.mark.functions
def test_clean_names_truncate_limit(dataframe):
"""Tests clean_names `truncate_limit` parameter."""
df = dataframe.clean_names(truncate_limit=7)
expected_columns = ["a", "bell_ch", "decorat", "animals", "cities"]
assert set(df.columns) == set(expected_columns)
@pytest.mark.functions
def test_charac():
"""Ensure non standard characters and spaces have been cleaned up."""
df = pd.DataFrame(
{
r"Current accountbalance(in % of GDP)": range(5),
}
)
df = df.clean_names(strip_underscores=True, case_type="lower")
assert "current_accountbalance_in_%_of_gdp" in df.columns
|
1,127 |
test next
|
import requests_mock
from django.test import SimpleTestCase
from corehq.apps.formplayer_api.smsforms.api import FormplayerInterface
from corehq.apps.formplayer_api.utils import get_formplayer_url
SESSION_ID = "SESSION_ID"
DOMAIN = "smsforms_domain"
USER_ID = "USER_ID"
QUESTION_RESPONSE = {
"event": {
"datatype": "select",
"choices": ["red", "green", "blue"],
"caption": "What's your favorite color?",
"type": "question",
"answer": None,
"required": 0,
"ix": "1",
"help": None
}
}
class FormplayerInterfaceTests(SimpleTestCase):
interface = FormplayerInterface(SESSION_ID, DOMAIN, USER_ID)
def test_get_raw_instance(self):
action = 'get-instance'
with MockFormplayerRequest(action, {}) as mocker:
self.interface.get_raw_instance()
mocker.assert_exactly_one_request()
request = mocker.get_last_request()
expected_request_data = {
'action': action,
'session-id': SESSION_ID,
'session_id': SESSION_ID,
'domain': DOMAIN,
'oneQuestionPerScreen': True,
'nav_mode': 'prompt',
}
self.validate_request(request, expected_request_data)
def test_answer_question(self):
action = 'answer'
with MockFormplayerRequest(action, QUESTION_RESPONSE) as mocker:
self.interface.answer_question("answer1")
mocker.assert_exactly_one_request()
request = mocker.get_last_request()
expected_request_data = {
'action': action,
'answer': 'answer1',
'session-id': SESSION_ID,
'session_id': SESSION_ID,
'domain': DOMAIN,
'oneQuestionPerScreen': True,
'nav_mode': 'prompt',
}
self.validate_request(request, expected_request_data)
def test_current_question(self):
action = 'current'
with MockFormplayerRequest(action, QUESTION_RESPONSE) as mocker:
self.interface.current_question()
mocker.assert_exactly_one_request()
request = mocker.get_last_request()
expected_request_data = {
'action': action,
'session-id': SESSION_ID,
'session_id': SESSION_ID,
'domain': DOMAIN,
'oneQuestionPerScreen': True,
'nav_mode': 'prompt',
}
self.validate_request(request, expected_request_data)
def METHOD_NAME(self):
action = "next"
with MockFormplayerRequest(action, QUESTION_RESPONSE) as mocker:
self.interface.next()
mocker.assert_exactly_one_request()
request = mocker.get_last_request()
expected_request_data = {
'action': action,
'session-id': SESSION_ID,
'session_id': SESSION_ID,
'domain': DOMAIN,
'oneQuestionPerScreen': True,
'nav_mode': 'prompt',
}
self.validate_request(request, expected_request_data)
def validate_request(self, request, expected_request_data):
self.assertEqual(request.json(), expected_request_data)
headers = request.headers
self.assertEqual(headers["X-FORMPLAYER-SESSION"], USER_ID)
self.assertNotEqual(headers["X-MAC-DIGEST"], "")
class MockFormplayerRequest:
def __init__(self, action, mock_response):
self.actions = []
self.add_action(action, mock_response)
self.mocker = requests_mock.Mocker()
# don't mock any requests other than the ones specified via 'actions'
self.mocker.register_uri(requests_mock.ANY, requests_mock.ANY, real_http=True)
def add_action(self, action, mock_response):
self.actions.append((action, mock_response))
def __enter__(self):
self.mocker.__enter__()
for action, response in self.actions:
self.mocker.post(f"{get_formplayer_url()}/{action}", json=response)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.mocker.__exit__(exc_type, exc_val, exc_tb)
def assert_exactly_one_request(self):
assert len(self.mocker.request_history) == 1
def get_last_request(self):
return self.mocker.request_history[-1]
|
1,128 |
test get user groups unknown dc
|
import pytest
import salt.utils.win_functions as win_functions
from tests.support.mock import MagicMock, patch
HAS_WIN32 = False
HAS_PYWIN = False
try:
import win32net
HAS_WIN32 = True
class WinError(win32net.error):
winerror = 0
except ImportError:
HAS_WIN32 = False
try:
import pywintypes
HAS_PYWIN = True
class PyWinError(pywintypes.error):
pywinerror = 0
except ImportError:
HAS_PYWIN = False
# Test cases for salt.utils.win_functions.
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_escape_argument_simple():
"""
Test to make sure we encode simple arguments correctly
"""
encoded = win_functions.escape_argument("simple")
assert encoded == "simple"
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_escape_argument_with_space():
"""
Test to make sure we encode arguments containing spaces correctly
"""
encoded = win_functions.escape_argument("with space")
assert encoded == '^"with space^"'
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_escape_argument_simple_path():
"""
Test to make sure we encode simple path arguments correctly
"""
encoded = win_functions.escape_argument("C:\\some\\path")
assert encoded == "C:\\some\\path"
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_escape_argument_path_with_space():
"""
Test to make sure we encode path arguments containing spaces correctly
"""
encoded = win_functions.escape_argument("C:\\Some Path\\With Spaces")
assert encoded == '^"C:\\Some Path\\With Spaces^"'
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_broadcast_setting_change():
"""
Test to rehash the Environment variables
"""
assert win_functions.broadcast_setting_change()
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_get_user_groups():
groups = ["Administrators", "Users"]
with patch("win32net.NetUserGetLocalGroups", return_value=groups):
assert win_functions.get_user_groups("Administrator") == groups
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_get_user_groups_sid():
groups = ["Administrators", "Users"]
expected = ["S-1-5-32-544", "S-1-5-32-545"]
with patch("win32net.NetUserGetLocalGroups", return_value=groups):
assert win_functions.get_user_groups("Administrator", sid=True) == expected
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
def test_get_user_groups_system():
groups = ["SYSTEM"]
with patch("win32net.NetUserGetLocalGroups", return_value=groups):
assert win_functions.get_user_groups("SYSTEM") == groups
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_WIN32, reason="Requires Win32 libraries")
def test_get_user_groups_unavailable_dc():
groups = ["Administrators", "Users"]
win_error = WinError()
win_error.winerror = 1722
effect = [win_error, groups]
with patch("win32net.NetUserGetLocalGroups", side_effect=effect):
assert win_functions.get_user_groups("Administrator") == groups
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_WIN32, reason="Requires Win32 libraries")
def METHOD_NAME():
groups = ["Administrators", "Users"]
win_error = WinError()
win_error.winerror = 2453
effect = [win_error, groups]
with patch("win32net.NetUserGetLocalGroups", side_effect=effect):
assert win_functions.get_user_groups("Administrator") == groups
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_WIN32, reason="Requires Win32 libraries")
def test_get_user_groups_missing_permission():
groups = ["Administrators", "Users"]
win_error = WinError()
win_error.winerror = 5
effect = [win_error, groups]
with patch("win32net.NetUserGetLocalGroups", side_effect=effect):
assert win_functions.get_user_groups("Administrator") == groups
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_WIN32, reason="Requires Win32 libraries")
def test_get_user_groups_error():
win_error = WinError()
win_error.winerror = 1927
mock_error = MagicMock(side_effect=win_error)
with patch("win32net.NetUserGetLocalGroups", side_effect=mock_error):
with pytest.raises(WinError):
win_functions.get_user_groups("Administrator")
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_PYWIN, reason="Requires pywintypes libraries")
def test_get_user_groups_local_pywin_error():
win_error = PyWinError()
win_error.winerror = 1355
mock_error = MagicMock(side_effect=win_error)
with patch("win32net.NetUserGetLocalGroups", side_effect=mock_error):
with pytest.raises(PyWinError):
win_functions.get_user_groups("Administrator")
@pytest.mark.skip_unless_on_windows(reason="Test is only applicable to Windows.")
@pytest.mark.skipif(not HAS_PYWIN, reason="Requires pywintypes libraries")
def test_get_user_groups_pywin_error():
win_error = PyWinError()
win_error.winerror = 1355
mock_error = MagicMock(side_effect=win_error)
with patch("win32net.NetUserGetLocalGroups", side_effect=mock_error):
with patch("win32net.NetUserGetGroups", side_effect=mock_error):
with pytest.raises(PyWinError):
win_functions.get_user_groups("Administrator")
|
1,129 |
guess format
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import mimetypes
from .widget_core import CoreWidget
from .domwidget import DOMWidget
from .valuewidget import ValueWidget
from .widget import register
from traitlets import Unicode, CUnicode, Bytes, Bool
from .trait_types import bytes_serialization
from .util import text_type
@register
class _Media(DOMWidget, ValueWidget, CoreWidget):
"""Base class for Image, Audio and Video widgets.
The `value` of this widget accepts a byte string. The byte string is the
raw data that you want the browser to display.
If you pass `"url"` to the `"format"` trait, `value` will be interpreted
as a URL as bytes encoded in UTF-8.
"""
# Define the custom state properties to sync with the front-end
value = Bytes(help="The media data as a byte string.").tag(sync=True, **bytes_serialization)
@classmethod
def _from_file(cls, tag, filename, **kwargs):
"""
Create an :class:`Media` from a local file.
Parameters
----------
filename: str
The location of a file to read into the value from disk.
**kwargs:
The keyword arguments for `Media`
Returns an `Media` with the value set from the filename.
"""
value = cls._load_file_value(filename)
if 'format' not in kwargs:
format = cls.METHOD_NAME(tag, filename)
if format is not None:
kwargs['format'] = format
return cls(value=value, **kwargs)
@classmethod
def from_url(cls, url, **kwargs):
"""
Create an :class:`Media` from a URL.
:code:`Media.from_url(url)` is equivalent to:
.. code-block: python
med = Media(value=url, format='url')
But both unicode and bytes arguments are allowed for ``url``.
Parameters
----------
url: [str, bytes]
The location of a URL to load.
"""
if isinstance(url, text_type):
# If unicode (str in Python 3), it needs to be encoded to bytes
url = url.encode('utf-8')
return cls(value=url, format='url')
def set_value_from_file(self, filename):
"""
Convenience method for reading a file into `value`.
Parameters
----------
filename: str
The location of a file to read into value from disk.
"""
value = self._load_file_value(filename)
self.value = value
@classmethod
def _load_file_value(cls, filename):
if getattr(filename, 'read', None) is not None:
return filename.read()
else:
with open(filename, 'rb') as f:
return f.read()
@classmethod
def METHOD_NAME(cls, tag, filename):
# file objects may have a .name parameter
name = getattr(filename, 'name', None)
name = name or filename
try:
mtype, _ = mimetypes.guess_type(name)
if not mtype.startswith('{}/'.format(tag)):
return None
return mtype[len('{}/'.format(tag)):]
except Exception:
return None
def _get_repr(self, cls):
# Truncate the value in the repr, since it will
# typically be very, very large.
class_name = self.__class__.__name__
# Return value first like a ValueWidget
signature = []
sig_value = repr(self.value)
prefix, rest = sig_value.split("'", 1)
content = rest[:-1]
if len(content) > 100:
sig_value = "{}'{}...'".format(prefix, content[0:100])
signature.append('%s=%s' % ('value', sig_value))
for key in super(cls, self)._repr_keys():
if key == 'value':
continue
value = str(getattr(self, key))
signature.append('%s=%r' % (key, value))
signature = ', '.join(signature)
return '%s(%s)' % (class_name, signature)
@register
class Image(_Media):
"""Displays an image as a widget.
The `value` of this widget accepts a byte string. The byte string is the
raw image data that you want the browser to display. You can explicitly
define the format of the byte string using the `format` trait (which
defaults to "png").
If you pass `"url"` to the `"format"` trait, `value` will be interpreted
as a URL as bytes encoded in UTF-8.
"""
_view_name = Unicode('ImageView').tag(sync=True)
_model_name = Unicode('ImageModel').tag(sync=True)
# Define the custom state properties to sync with the front-end
format = Unicode('png', help="The format of the image.").tag(sync=True)
width = CUnicode(help="Width of the image in pixels. Use layout.width "
"for styling the widget.").tag(sync=True)
height = CUnicode(help="Height of the image in pixels. Use layout.height "
"for styling the widget.").tag(sync=True)
def __init__(self, *args, **kwargs):
super(Image, self).__init__(*args, **kwargs)
@classmethod
def from_file(cls, filename, **kwargs):
return cls._from_file('image', filename, **kwargs)
def __repr__(self):
return self._get_repr(Image)
@register
class Video(_Media):
"""Displays a video as a widget.
The `value` of this widget accepts a byte string. The byte string is the
raw video data that you want the browser to display. You can explicitly
define the format of the byte string using the `format` trait (which
defaults to "mp4").
If you pass `"url"` to the `"format"` trait, `value` will be interpreted
as a URL as bytes encoded in UTF-8.
"""
_view_name = Unicode('VideoView').tag(sync=True)
_model_name = Unicode('VideoModel').tag(sync=True)
# Define the custom state properties to sync with the front-end
format = Unicode('mp4', help="The format of the video.").tag(sync=True)
width = CUnicode(help="Width of the video in pixels.").tag(sync=True)
height = CUnicode(help="Height of the video in pixels.").tag(sync=True)
autoplay = Bool(True, help="When true, the video starts when it's displayed").tag(sync=True)
loop = Bool(True, help="When true, the video will start from the beginning after finishing").tag(sync=True)
controls = Bool(True, help="Specifies that video controls should be displayed (such as a play/pause button etc)").tag(sync=True)
@classmethod
def from_file(cls, filename, **kwargs):
return cls._from_file('video', filename, **kwargs)
def __repr__(self):
return self._get_repr(Video)
@register
class Audio(_Media):
"""Displays a audio as a widget.
The `value` of this widget accepts a byte string. The byte string is the
raw audio data that you want the browser to display. You can explicitly
define the format of the byte string using the `format` trait (which
defaults to "mp3").
If you pass `"url"` to the `"format"` trait, `value` will be interpreted
as a URL as bytes encoded in UTF-8.
"""
_view_name = Unicode('AudioView').tag(sync=True)
_model_name = Unicode('AudioModel').tag(sync=True)
# Define the custom state properties to sync with the front-end
format = Unicode('mp3', help="The format of the audio.").tag(sync=True)
autoplay = Bool(True, help="When true, the audio starts when it's displayed").tag(sync=True)
loop = Bool(True, help="When true, the audio will start from the beginning after finishing").tag(sync=True)
controls = Bool(True, help="Specifies that audio controls should be displayed (such as a play/pause button etc)").tag(sync=True)
@classmethod
def from_file(cls, filename, **kwargs):
return cls._from_file('audio', filename, **kwargs)
def __repr__(self):
return self._get_repr(Audio)
|
1,130 |
get delta try except
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
from numpy import array, allclose, isclose
import pandapower as pp
from pandapower.test.consistency_checks import consistency_checks
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
@pytest.fixture
def dcline_net():
net = pp.create_empty_network()
b5 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b1 = pp.create_bus(net, 380)
pp.create_ext_grid(net, b1, 1.02, min_p_mw=0., max_p_mw=1e9)
pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_mw=200, loss_percent=1.0,
loss_mw=0.5, vm_from_pu=1.01, vm_to_pu=1.012, max_p_mw=1000,
in_service=True, index=4)
pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
pp.create_load(net, bus=b4, p_mw=800, controllable=False)
pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
pp.create_ext_grid(net, b5, 1.02, min_p_mw=0., max_p_mw=1e9)
return net
def METHOD_NAME(net):
for delta in [1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12]:
try:
pp.runopp(net, delta=delta)
return delta
except pp.OPFNotConverged:
continue
return 1e-10
@pytest.mark.xfail(reason="numerical issue with OPF convergence. The failure seems to depend on the"
" python version. Should be reworked.")
def test_dispatch1(dcline_net):
net = dcline_net
pp.create_pwl_cost(net, 0, "ext_grid", [[-1e12, 1e9, 100]])
pp.create_pwl_cost(net, 1, "ext_grid", [[-1e12, 1e9, 80]])
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0 # needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
pp.runopp(net, delta=1e-8)
consistency_checks(net)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values, rtol=1e-2)
assert allclose(net.res_ext_grid.p_mw.values, [0.5, 805], atol=0.1)
assert allclose(net.res_ext_grid.q_mvar.values, [-7.78755773243, 0.62830727889], atol=1e-3)
assert allclose(net.res_dcline.p_from_mw.values, [0.500754071], atol=1e-3)
assert allclose(net.res_dcline.q_from_mvar.values, [7.78745600524])
assert allclose(net.res_dcline.p_to_mw.values, array([-5.48553789e-05]))
assert allclose(net.res_dcline.q_to_mvar.values, array([-.62712636707]))
@pytest.mark.xfail(reason="numerical issue with OPF convergence. If vm_pu delta is != 0. at "
"ext_grid -> fail. See build_gen() in line 111 + 112")
def test_dcline_dispatch2(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 0, "ext_grid", cp1_eur_per_mw=80)
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=100)
# pp.create_poly_cost(net, 0, "ext_grid", array([.08, 0]))
# pp.create_poly_cost(net, 1, "ext_grid", array([.1, 0]))
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0 # needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
# pp.runopp(net, delta=get_delta_try_except(net))
pp.runopp(net)
consistency_checks(net, rtol=1e-3)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values)
p_eg_expect = array([8.21525358e+02, 5.43498903e-05])
q_eg_expect = array([-7787.55852923e-3, -21048.59213887e-3])
assert allclose(net.res_ext_grid.p_mw.values, p_eg_expect)
assert allclose(net.res_ext_grid.q_mvar.values, q_eg_expect)
p_from_expect = array([813573.88366999e-3])
q_from_expect = array([-26446.0473644e-3])
assert allclose(net.res_dcline.p_from_mw.values, p_from_expect)
assert allclose(net.res_dcline.q_from_mvar.values, q_from_expect)
p_to_expect = array([-805023.64719801e-3])
q_to_expect = array([-21736.31196315e-3])
assert allclose(net.res_dcline.p_to_mw.values, p_to_expect)
assert allclose(net.res_dcline.q_to_mvar.values, q_to_expect)
@pytest.mark.xfail(reason="numerical issue with OPF convergence. If vm_pu delta is != 0. at "
"ext_grid -> fail. See build_gen() in line 111 + 112")
def test_dcline_dispatch3(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 4, "dcline", cp1_eur_per_mw=1.5)
net.bus["max_vm_pu"] = 1.03 # needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
# pp.runopp(net, delta=get_delta_try_except(net))
pp.runopp(net)
consistency_checks(net, rtol=1e-1)
# dc line is not dispatched because of the assigned costs
assert isclose(net.res_dcline.at[4, "p_to_mw"], 0, atol=1e-2)
assert all(net.res_ext_grid.p_mw.values > 0)
# costs for ext_grid at the end of the DC line get double the costs of DC line transfer
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=2000)
pp.runopp(net)
# pp.runopp(net, delta=get_delta_try_except(net))
# now the total power is supplied through the DC line
assert (net.res_dcline.at[4, "p_to_mw"]) < 1e3
assert net.res_ext_grid.p_mw.at[1] < 1
assert isclose(net.res_cost, net.res_dcline.at[4, "p_from_mw"] * 1.5)
if __name__ == "__main__":
pytest.main([__file__])
|
1,131 |
mogi
|
"""Forward deformation models."""
############################################################
# Program is part of MintPy #
# Copyright (c) 2013, Zhang Yunjun, Heresh Fattahi #
# Author: Zhang Yunjun, 2018 #
############################################################
# Recommend usage:
# from mintpy.simulation import defo_model as defo
# from mintpy.simulation import simulation as sim
import numpy as np
from matplotlib import pyplot as plt
from mintpy.utils import utils0 as ut0
def METHOD_NAME(geometry, xloc, nu=0.25):
"""Computes surface displacements, strains, and tilts due to a Mogi source.
Parameters: geometry : tuple of 4 float, Mogi source geometry: East, North, Depth, Volomn change in SI unit.
xloc : 2D np.array in size of (2, num_pixel), surface coordinates in east/x and north/y
nu : float, Poisson's ratio
Returns: u : 2D np.array of displacement in size of (3, num_pixel) for Ux, Uy, Uz
e : 2D np.array of strains in size of (3, num_pixel) for Exx, Exy, Eyy
t : 2D np.array of tilts in size of (2, num_pixel) for dUz/dx, dUz/dy
Notes: depth denotes an unsigned length and should therefore always be given positive. Keep your units consistent!
This is a python translation from mogi.m originally written by Peter Cervelli, May 1998.
"""
xloc = np.array(xloc, np.float32).reshape(2, -1)
# compute displacements
num_pixel = xloc.shape[1]
E = geometry[0] - xloc[0, :]
N = geometry[1] - xloc[1, :]
E2 = np.square(E)
N2 = np.square(N)
d2 = np.square(geometry[2])
R = np.sqrt(d2 + E2 + N2)
C = (nu - 1.) * geometry[3] / np.pi
R3 = C * np.power(R, -3)
displacement = np.zeros((3, num_pixel), np.float32)
displacement[0, :] = np.multiply(E, R3)
displacement[1, :] = np.multiply(N, R3)
displacement[2, :] = np.multiply(-1 * geometry[2], R3)
# compute strains (if necessary)
R5 = C * np.power(R, -5)
strain = np.zeros((3, num_pixel), np.float32)
strain[0, :] = np.multiply(R5, (2. * E2 - N2 - d2))
strain[1, :] = 3. * np.multiply(R5, np.multiply(E, N))
strain[2, :] = np.multiply(R5, (2. * N2 - E2 - d2))
# compute tilts
tilt = np.zeros((2, num_pixel), np.float32)
tilt[0, :] = -3. * np.multiply(R5, E) * geometry[2]
tilt[1, :] = -3. * np.multiply(R5, N) * geometry[2]
return displacement, strain, tilt
def mogi_los(shape, source_geom, resolution=60., scale=1., display=True):
"""Simulate 2D deformation caused by the overpress of a Mogi source underneath
Parameters: shape: 2-tuple of int in (length, width) or 2D np.ndarray in size of (length, width) in np.bool_
source_geom : 4-tuple of float, Mogi source geometry: East, North, Depth, Volomn change in SI unit.
Returns: dis_los: 2D np.ndarray in size of (length, width), deformation in LOS direction in meter
"""
if isinstance(shape, np.ndarray):
mask = np.multiply(np.array(shape != 0), ~np.isnan(shape))
shape = mask.shape
else:
mask = np.ones(shape, np.bool_)
length, width = shape
yy, xx = np.mgrid[0:length:length*1j, 0:width:width*1j]
yy *= resolution
xx *= resolution
xloc = np.vstack((xx.reshape(1, -1), yy.reshape(1, -1)))
dis_map = METHOD_NAME(source_geom, xloc)[0]
dis_e = dis_map[0, :].reshape(length, width)
dis_n = dis_map[1, :].reshape(length, width)
dis_u = dis_map[2, :].reshape(length, width)
dis_los = ut0.enu2los(dis_e, dis_n, dis_u,
inc_angle=34.,
head_angle=-168.)
dis_los[mask == 0.] = np.nan
dis_los *= scale
if display:
fig, ax = plt.subplots(1, 4, figsize=[10, 3], sharey=True)
dmin = np.nanmin(dis_los)
dmax = np.nanmax(dis_los)
for i, fig_title in enumerate(['east','north','vertical']):
ax[i].imshow(dis_map[i, :].reshape(length, width), vmin=dmin, vmax=dmax)
ax[i].set_title(fig_title)
im = ax[3].imshow(dis_los, vmin=dmin, vmax=dmax)
ax[3].set_title('los - SenD')
fig.subplots_adjust(right=0.90)
cax = fig.add_axes([0.92, 0.25, 0.01, 0.5])
cbar = fig.colorbar(im, cax=cax)
cbar.set_label('Displacement [m]')
plt.show()
return dis_los
|
1,132 |
process scan
|
#!/usr/bin/env python
"""Unprivileged memory RPC client code."""
import abc
from typing import TypeVar, Generic, Iterable
from grr_response_client.unprivileged import communication
from grr_response_client.unprivileged.proto import memory_pb2
class ConnectionWrapper:
"""Wraps a connection, adding protobuf serialization of messages."""
def __init__(self, connection: communication.Connection):
self._connection = connection
def Send(self, request: memory_pb2.Request) -> None:
self._connection.Send(
communication.Message(request.SerializeToString(), b""))
def Recv(self) -> memory_pb2.Response:
raw_response, _ = self._connection.Recv()
response = memory_pb2.Response()
response.ParseFromString(raw_response)
return response
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class OperationError(Exception):
"""Error while executing the operation."""
def __init__(self, message: str, formatted_exception: str):
"""Constructor.
Args:
message: the exception message
formatted_exception: the remote exception formatted using
traceback.format_exc()
"""
super().__init__(message)
self.formatted_exception = formatted_exception
RequestType = TypeVar("RequestType")
ResponseType = TypeVar("ResponseType")
class OperationHandler(abc.ABC, Generic[RequestType, ResponseType]):
"""Base class for RPC handlers."""
def __init__(self, connection: ConnectionWrapper):
self._connection = connection
def Run(self, request: RequestType) -> ResponseType:
self._connection.Send(self.PackRequest(request))
packed_response = self._connection.Recv()
if packed_response.HasField("exception"):
raise OperationError(packed_response.exception.message,
packed_response.exception.formatted_exception)
else:
response = self.UnpackResponse(packed_response)
return response
@abc.abstractmethod
def UnpackResponse(self, response: memory_pb2.Response) -> ResponseType:
"""Extracts an inner Response message from a response message."""
pass
@abc.abstractmethod
def PackRequest(self, request: RequestType) -> memory_pb2.Request:
"""Packs an inner Request message into a request message."""
pass
class UploadSignatureHandler(
OperationHandler[memory_pb2.UploadSignatureRequest,
memory_pb2.UploadSignatureResponse]):
"""Implements the UploadSignature RPC."""
def UnpackResponse(
self,
response: memory_pb2.Response) -> memory_pb2.UploadSignatureResponse:
return response.upload_signature_response
def PackRequest(
self, request: memory_pb2.UploadSignatureRequest) -> memory_pb2.Request:
return memory_pb2.Request(upload_signature_request=request)
class ProcessScanHandler(OperationHandler[memory_pb2.ProcessScanRequest,
memory_pb2.ProcessScanResponse]):
"""Implements the ProcessScan RPC."""
def UnpackResponse(
self, response: memory_pb2.Response) -> memory_pb2.ProcessScanResponse:
return response.process_scan_response
def PackRequest(self,
request: memory_pb2.ProcessScanRequest) -> memory_pb2.Request:
return memory_pb2.Request(process_scan_request=request)
class Client:
"""Client for the RPC memory service."""
def __init__(self, connection: communication.Connection):
self._connection = ConnectionWrapper(connection)
def UploadSignature(self, yara_signature: str):
"""Uploads a yara signature to be used for this connection."""
request = memory_pb2.UploadSignatureRequest(yara_signature=yara_signature)
UploadSignatureHandler(self._connection).Run(request)
def METHOD_NAME(self, serialized_file_descriptor: int,
chunks: Iterable[memory_pb2.Chunk],
timeout_seconds: int) -> memory_pb2.ProcessScanResponse:
"""Scans process memory.
Args:
serialized_file_descriptor: Serialized file descriptor for the process
memory. The file descriptor must be accessible by the server process.
chunks: Chunks (offset, size) to scan.
timeout_seconds: Timeout in seconds.
Returns:
A `ScanResult` proto.
"""
request = memory_pb2.ProcessScanRequest(
serialized_file_descriptor=serialized_file_descriptor,
chunks=chunks,
timeout_seconds=timeout_seconds)
response = ProcessScanHandler(self._connection).Run(request)
return response
def CreateMemoryClient(connection: communication.Connection) -> Client:
"""Creates a memory client."""
return Client(connection)
|
1,133 |
test nested ipaddr
|
import csv
import json
from socket import AF_INET
import pytest
from pr2test.context_manager import make_test_matrix, skip_if_not_supported
from pr2test.marks import require_root
from pyroute2.common import basestring
from pyroute2.ndb.objects import RTNL_Object
from pyroute2.ndb.report import Record, RecordSet
pytestmark = [require_root()]
test_matrix = make_test_matrix(
targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test']
)
@pytest.mark.parametrize(
'view,key,item',
(
('interfaces', 'ifname', 'lo'),
('routes', 'dst', '127.0.0.0/8'),
('addresses', 'address', '127.0.0.1/8'),
),
)
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_contains(context, view, key, item):
context.ndb.interfaces['lo'].set('state', 'up').commit()
getattr(context.ndb, view).wait(**{key: item, 'timeout': 10})
assert item in getattr(context.ndb, view)
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_types(context):
# check for the report type here
assert isinstance(context.ndb.interfaces.summary(), RecordSet)
# repr must be a string
assert isinstance(repr(context.ndb.interfaces.summary()), basestring)
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_iter_keys(context):
for name in ('interfaces', 'addresses', 'neighbours', 'routes', 'rules'):
view = getattr(context.ndb, name)
for key in view:
assert isinstance(key, Record)
obj = view.get(key)
if obj is not None:
assert isinstance(obj, RTNL_Object)
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_slices(context):
a = list(context.ndb.rules.dump())
ln = len(a) - 1
# simple indices
assert a[0] == context.ndb.rules.dump()[0]
assert a[1] == context.ndb.rules.dump()[1]
assert a[-1] == context.ndb.rules.dump()[-1]
assert context.ndb.rules.dump()[ln] == a[-1]
try:
context.ndb.rules.dump()[len(a)]
except IndexError:
pass
# slices
assert a[0:] == context.ndb.rules.dump()[0:]
assert a[:3] == context.ndb.rules.dump()[:3]
assert a[0:3] == context.ndb.rules.dump()[0:3]
assert a[1:3] == context.ndb.rules.dump()[1:3]
# negative slices
assert a[-3:] == context.ndb.rules.dump()[-3:]
assert a[-3:-1] == context.ndb.rules.dump()[-3:-1]
# mixed
assert a[-ln : ln - 1] == context.ndb.rules.dump()[-ln : ln - 1]
# step
assert a[2:ln:2] == context.ndb.rules.dump()[2:ln:2]
@pytest.mark.parametrize('context', test_matrix, indirect=True)
@skip_if_not_supported
def test_report_chains(context):
ipnet = str(context.ipnets[1].network)
ipaddr = context.new_ipaddr
router = context.new_ipaddr
ifname = context.new_ifname
(
context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up')
.add_ip(address=ipaddr, prefixlen=24)
.commit()
)
(
context.ndb.routes.create(
dst=ipnet,
dst_len=24,
gateway=router,
encap={'type': 'mpls', 'labels': [20, 30]},
).commit()
)
with context.ndb.routes.dump() as dump:
dump.select_records(oif=context.ndb.interfaces[ifname]['index'])
dump.select_records(lambda x: x.encap is not None)
dump.select_fields('encap')
for record in dump:
encap = json.loads(record.encap)
break
assert isinstance(encap, list)
assert encap[0]['label'] == 20
assert encap[0]['bos'] == 0
assert encap[1]['label'] == 30
assert encap[1]['bos'] == 1
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_json(context):
data = json.loads(''.join(context.ndb.interfaces.summary().format('json')))
assert isinstance(data, list)
for row in data:
assert isinstance(row, dict)
class MD(csv.Dialect):
quotechar = "'"
doublequote = False
quoting = csv.QUOTE_MINIMAL
delimiter = ","
lineterminator = "\n"
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_csv(context):
record_length = 0
for record in context.ndb.routes.dump():
if record_length == 0:
record_length = len(record)
else:
assert len(record) == record_length
reader = csv.reader(context.ndb.routes.dump().format('csv'), dialect=MD())
for record in reader:
assert len(record) == record_length
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def METHOD_NAME(context):
ifname = context.new_ifname
ipaddr1 = context.new_ipaddr
ipaddr2 = context.new_ipaddr
with context.ndb.interfaces.create(
ifname=ifname, kind='dummy', state='up'
) as interface:
interface.add_ip(address=ipaddr1, prefixlen=24)
interface.add_ip(address=ipaddr2, prefixlen=24)
with context.ndb.interfaces[ifname].ipaddr.dump() as dump:
dump.select_records(lambda x: x.family == AF_INET)
assert len(repr(dump).split('\n')) == 2
@pytest.mark.parametrize('context', test_matrix, indirect=True)
def test_nested_ports(context):
ifbr0 = context.new_ifname
ifbr0p0 = context.new_ifname
ifbr0p1 = context.new_ifname
with context.ndb.interfaces as i:
i.create(ifname=ifbr0p0, kind='dummy').commit()
i.create(ifname=ifbr0p1, kind='dummy').commit()
(
i.create(ifname=ifbr0, kind='bridge')
.add_port(ifbr0p0)
.add_port(ifbr0p1)
.commit()
)
records = len(
repr(context.ndb.interfaces[ifbr0].ports.summary()).split('\n')
)
# 1 port
assert records == 2
|
1,134 |
process csv
|
__all__ = ['process_csv', 'process_from_web']
import csv
import logging
import requests
from hashlib import md5
from .processor import TasProcessor
from indra.util import read_unicode_csv
tas_data_url = 'https://bigmech.s3.amazonaws.com/indra-db/tas.csv'
tas_resource_md5 = '554ccba4617aae7b3b06a62893424c7f'
logger = logging.getLogger(__name__)
def _load_data(data_iter):
# Get the headers.
headers = data_iter[0]
# For some reason this heading is oddly formatted and inconsistent with the
# rest, or with the usual key-style for dicts.
data = [{header: val for header, val in zip(headers, line)}
for line in data_iter[1:]]
return data
def process_from_web(affinity_class_limit=2, named_only=False,
standardized_only=False):
"""Return a TasProcessor for the contents of the TAS dump online.
Interactions are classified into the following classes based on affinity:
| 1 -- Kd < 100nM
| 2 -- 100nM < Kd < 1uM
| 3 -- 1uM < Kd < 10uM
| 10 -- Kd > 10uM
By default, only classes 1 and 2 are extracted but the affinity_class_limit
parameter can be used to change the upper limit of extracted classes.
Parameters
----------
affinity_class_limit : Optional[int]
Defines the highest class of binding affinity that is included in the
extractions. Default: 2
named_only : Optional[bool]
If True, only chemicals that have a name assigned in some name space
(including ones that aren't fully stanadardized per INDRA's ontology,
e.g., CHEMBL1234) are included. If False, chemicals whose name is
assigned based on an ID (e.g., CHEMBL)rather than an actual name are
also included. Default: False
standardized_only : Optional[bool]
If True, only chemicals that are fully standardized per INDRA's
ontology (i.e., they have grounding appearing in one of the
default_ns_order name spaces, and consequently have any
groundings and their name standardized) are extracted.
Default: False
Returns
-------
TasProcessor
A TasProcessor object which has a list of INDRA Statements extracted
from the CSV file representing drug-target inhibitions in its
statements attribute.
"""
logger.info('Downloading TAS data from %s' % tas_data_url)
res = requests.get(tas_data_url)
observed_checksum = md5(res.text.encode('utf-8')).hexdigest()
logger.info('Verifying md5 checksum of data')
if tas_resource_md5 != observed_checksum:
raise RuntimeError('Checksum for downloaded TAS data does not'
' match expected value')
res.raise_for_status()
logger.info('Finished downloading TAS data from %s' % tas_data_url)
data_iter = list(csv.reader(res.text.splitlines(), delimiter=','))
return TasProcessor(_load_data(data_iter),
affinity_class_limit=affinity_class_limit,
named_only=named_only,
standardized_only=standardized_only)
def METHOD_NAME(fname, affinity_class_limit=2, named_only=False,
standardized_only=False):
"""Return a TasProcessor for the contents of a given CSV file..
Interactions are classified into the following classes based on affinity:
| 1 -- Kd < 100nM
| 2 -- 100nM < Kd < 1uM
| 3 -- 1uM < Kd < 10uM
| 10 -- Kd > 10uM
By default, only classes 1 and 2 are extracted but the affinity_class_limit
parameter can be used to change the upper limit of extracted classes.
Parameters
----------
fname : str
The path to a local CSV file containing the TAS data.
affinity_class_limit : Optional[int]
Defines the highest class of binding affinity that is included in the
extractions. Default: 2
named_only : Optional[bool]
If True, only chemicals that have a name assigned in some name space
(including ones that aren't fully stanadardized per INDRA's ontology,
e.g., CHEMBL1234) are included. If False, chemicals whose name is
assigned based on an ID (e.g., CHEMBL)rather than an actual name are
also included. Default: False
standardized_only : Optional[bool]
If True, only chemicals that are fully standardized per INDRA's
ontology (i.e., they have grounding appearing in one of the
default_ns_order name spaces, and consequently have any
groundings and their name standardized) are extracted.
Default: False
Returns
-------
TasProcessor
A TasProcessor object which has a list of INDRA Statements extracted
from the CSV file representing drug-target inhibitions in its
statements attribute.
"""
data_iter = list(read_unicode_csv(fname))
return TasProcessor(_load_data(data_iter),
affinity_class_limit=affinity_class_limit,
named_only=named_only,
standardized_only=standardized_only)
|
1,135 |
to dict
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-import, broad-except
import json
import os
from collections import OrderedDict
from datetime import datetime, timedelta
import requests
from azure.cli.core._config import GLOBAL_CONFIG_DIR
from azure.cli.core._profile import Profile
from azure.cli.core._session import SESSION
from azure.core.exceptions import HttpResponseError
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.cli.core.azclierror import BadRequestError, AzureInternalError
from knack.log import get_logger
from knack.util import todict
from azext_resourcegraph.vendored_sdks.resourcegraph.models import ResultTruncated
from .vendored_sdks.resourcegraph import ResourceGraphClient
from .vendored_sdks.resourcegraph.models import \
QueryRequest, QueryRequestOptions, QueryResponse, ResultFormat, ErrorResponse, Error
__SUBSCRIPTION_LIMIT = 1000
__MANAGEMENT_GROUP_LIMIT = 10
__logger = get_logger(__name__)
def execute_query(client, graph_query, first, skip, subscriptions, management_groups, allow_partial_scopes, skip_token):
# type: (ResourceGraphClient, str, int, int, list[str], list[str], bool, str) -> object
mgs_list = management_groups
if mgs_list is not None and len(mgs_list) > __MANAGEMENT_GROUP_LIMIT:
mgs_list = mgs_list[:__MANAGEMENT_GROUP_LIMIT]
warning_message = "The query included more management groups than allowed. "\
"Only the first {0} management groups were included for the results. "\
"To use more than {0} management groups, "\
"see the docs for examples: "\
"https://aka.ms/arg-error-toomanysubs".format(__MANAGEMENT_GROUP_LIMIT)
__logger.warning(warning_message)
subs_list = None
if mgs_list is None:
subs_list = subscriptions or _get_cached_subscriptions()
if subs_list is not None and len(subs_list) > __SUBSCRIPTION_LIMIT:
subs_list = subs_list[:__SUBSCRIPTION_LIMIT]
warning_message = "The query included more subscriptions than allowed. "\
"Only the first {0} subscriptions were included for the results. "\
"To use more than {0} subscriptions, "\
"see the docs for examples: "\
"https://aka.ms/arg-error-toomanysubs".format(__SUBSCRIPTION_LIMIT)
__logger.warning(warning_message)
response = None
try:
result_truncated = False
request_options = QueryRequestOptions(
top=first,
skip=skip,
skip_token=skip_token,
result_format=ResultFormat.object_array,
allow_partial_scopes=allow_partial_scopes
)
request = QueryRequest(
query=graph_query,
subscriptions=subs_list,
management_groups=mgs_list,
options=request_options)
response = client.resources(request) # type: QueryResponse
if response.result_truncated == ResultTruncated.true:
result_truncated = True
if result_truncated and first is not None and len(response.data) < first:
__logger.warning("Unable to paginate the results of the query. "
"Some resources may be missing from the results. "
"To rewrite the query and enable paging, "
"see the docs for an example: https://aka.ms/arg-results-truncated")
except HttpResponseError as ex:
if ex.model.error.code == 'BadRequest':
raise BadRequestError(json.dumps(METHOD_NAME(ex.model.error), indent=4)) from ex
raise AzureInternalError(json.dumps(METHOD_NAME(ex.model.error), indent=4)) from ex
result_dict = dict()
result_dict['data'] = response.data
result_dict['count'] = response.count
result_dict['total_records'] = response.total_records
result_dict['skip_token'] = response.skip_token
return result_dict
def create_shared_query(client, resource_group_name,
resource_name, description,
graph_query, location='global', tags=None):
from azext_resourcegraph.vendored_sdks.resourcegraph.models import GraphQueryResource
graph_shared_query = GraphQueryResource(description=description,
query=graph_query,
tags=tags,
location=location)
return client.graph_query.create_or_update(resource_group_name=resource_group_name,
resource_name=resource_name,
properties=graph_shared_query)
def _get_cached_subscriptions():
# type: () -> list[str]
cached_subs = Profile().load_cached_subscriptions()
return [sub['id'] for sub in cached_subs]
def METHOD_NAME(obj):
if isinstance(obj, Error):
return METHOD_NAME(todict(obj))
if isinstance(obj, dict):
result = OrderedDict()
# Complex objects should be displayed last
sorted_keys = sorted(obj.keys(), key=lambda k: (isinstance(obj[k], dict), isinstance(obj[k], list), k))
for key in sorted_keys:
if obj[key] is None or obj[key] == [] or obj[key] == {}:
continue
result[key] = METHOD_NAME(obj[key])
return result
if isinstance(obj, list):
return [METHOD_NAME(v) for v in obj]
return obj
|
1,136 |
clean br cpf
|
"""
Clean and validate a DataFrame column containing CPF numbers, Brazilian national identifier.
"""
# pylint: disable=too-many-lines, too-many-arguments, too-many-branches
from typing import Any, Union
from operator import itemgetter
import dask.dataframe as dd
import numpy as np
import pandas as pd
from stdnum.br import cpf
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, to_dask
def METHOD_NAME(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean Brazilian national identifier data in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
col
The name of the column containing data of CPF type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
(default: "standard")
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of CPF data.
>>> df = pd.DataFrame({
"cpf": [
'23100299900',
'231.002.999-00', # InvalidChecksum
'390.533.447=0'] # invalid delimiter
})
>>> clean_br_cpf(df, 'cpf')
cpf cpf_clean
0 23100299900 231.002.999-00
1 231.002.999-00 NaN
2 390.533.447=0 NaN
"""
if output_format not in {"compact", "standard"}:
raise ValueError(
f"output_format {output_format} is invalid. " 'It needs to be "compact" or "standard".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [_format(x, output_format, errors) for x in srs],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
return df
def validate_br_cpf(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is CPF in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(cpf.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(cpf.is_valid)
else:
return df.applymap(cpf.is_valid)
return cpf.is_valid(df)
def _format(val: Any, output_format: str = "standard", errors: str = "coarse") -> Any:
"""
Reformat a number string with proper separators and whitespace.
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
"""
val = str(val)
if val in NULL_VALUES:
return [np.nan]
if not validate_br_cpf(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
return [error_result]
if output_format == "compact":
result = [cpf.compact(val)]
elif output_format == "standard":
result = [cpf.format(val)]
return result
|
1,137 |
input value
|
# -------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Bernhard Mallinger <[email protected]>
#
# -------------------------------------------------------------------------------
# Copyright (C) 2022 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -------------------------------------------------------------------------------
from logging import getLogger
from typing import Any
from eoxserver.services.ows.wps.util import get_process_by_identifier
from eoxserver.services.ows.wps.interfaces import ProcessInterface
from eoxserver.services.ows.wps.exceptions import OperationNotSupportedError
from eoxserver.services.ows.wps.parameters import BoundingBox, ResponseForm
from eoxserver.services.ows.wps.v10.execute_util import (
pack_outputs as pack_outputs_v10,
parse_params as parse_params_v10,
decode_output_requests as decode_output_requests_v10,
)
from eoxserver.services.ows.wps.v10.encoders import WPS10ExecuteResponseRawEncoder
from ows.wps.v20 import decoders
import ows.wps.v20.types as pyows_types
import ows.common.types as pyows_common_types
class WPS20ExecuteHandler(object):
"""WPS 2.0 DescribeProcess service handler."""
service = "WPS"
versions = ("2.0.0",)
request = "Execute"
methods = ["POST"]
def handle(self, request):
"""Handle HTTP request."""
logger = getLogger(__name__)
execute_request: pyows_types.ExecuteRequest = decoders.XMLExecuteDecoder(
request.body
).decode()
process: ProcessInterface = get_process_by_identifier(
execute_request.process_id
)
input_defs = parse_params_v10(process.inputs)
output_defs = parse_params_v10(process.outputs)
# reuse wps 1.0 encoding
resp_form = ResponseForm()
for output in execute_request.output_definitions:
resp_form.set_output(output)
# these fields are not present in pyows, we set them for compatibility
output.uom = None
output.as_reference = None
inputs = {
name: getattr(optional_input, "default", None)
for (name, optional_input) in input_defs.values()
if optional_input.is_optional
}
inputs.update(decode_output_requests_v10(resp_form, output_defs))
inputs.update(
{
input_.identifier: METHOD_NAME(input_)
for input_ in execute_request.inputs
}
)
if execute_request.mode == pyows_types.ExecutionMode.sync:
logger.debug("Execute process %s", execute_request.process_id)
outputs = process.execute(**inputs)
elif execute_request.mode == pyows_types.ExecutionMode.async_:
raise OperationNotSupportedError("Async mode not implemented")
else: # auto
raise OperationNotSupportedError("Auto mode not implemented")
if execute_request.response == pyows_types.ResponseType.raw:
packed_outputs = pack_outputs_v10(
outputs,
response_form=resp_form,
output_defs=output_defs,
)
encoder = WPS10ExecuteResponseRawEncoder(resp_form=resp_form)
response = encoder.encode_response(packed_outputs)
return encoder.serialize(response)
else: # document
raise OperationNotSupportedError("Document mode not implemented")
def METHOD_NAME(input_: pyows_types.Input) -> Any:
if isinstance(input_.data, pyows_types.Data):
data_value = input_.data.value
# TODO: use pattern matching as soon as we can require python 3.10
if isinstance(data_value, pyows_types.LiteralValue):
return data_value.value
elif isinstance(data_value, pyows_common_types.BoundingBox):
return BoundingBox(
bbox=(data_value.bbox[:2], data_value.bbox[2:]),
crs=data_value.crs,
)
else:
# not a common type, process needs to handle it on its own
return data_value
else:
raise OperationNotSupportedError("References as input are not implemented")
|
1,138 |
test simple agg filter
|
from __future__ import annotations
import datetime
import ibis
from ibis.backends.base.sql.compiler import Compiler
from ibis.tests.sql.conftest import to_sql
from ibis.tests.util import assert_decompile_roundtrip
def test_union(union, snapshot):
snapshot.assert_match(to_sql(union), "out.sql")
assert_decompile_roundtrip(union, snapshot, check_equality=False)
def test_union_project_column(union_all, snapshot):
# select a column, get a subquery
expr = union_all[[union_all.key]]
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def test_table_intersect(intersect, snapshot):
snapshot.assert_match(to_sql(intersect), "out.sql")
assert_decompile_roundtrip(intersect, snapshot, check_equality=False)
def test_table_difference(difference, snapshot):
snapshot.assert_match(to_sql(difference), "out.sql")
assert_decompile_roundtrip(difference, snapshot, check_equality=False)
def test_intersect_project_column(intersect, snapshot):
# select a column, get a subquery
expr = intersect[[intersect.key]]
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def test_difference_project_column(difference, snapshot):
# select a column, get a subquery
expr = difference[[difference.key]]
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def test_table_distinct(con, snapshot):
t = con.table("functional_alltypes")
expr = t[t.string_col, t.int_col].distinct()
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_column_distinct(con, snapshot):
t = con.table("functional_alltypes")
expr = t[t.string_col].distinct()
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_count_distinct(con, snapshot):
t = con.table("functional_alltypes")
metric = t.int_col.nunique().name("nunique")
expr = t[t.bigint_col > 0].group_by("string_col").aggregate([metric])
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_multiple_count_distinct(con, snapshot):
# Impala and some other databases will not execute multiple
# count-distincts in a single aggregation query. This error reporting
# will be left to the database itself, for now.
t = con.table("functional_alltypes")
metrics = [
t.int_col.nunique().name("int_card"),
t.smallint_col.nunique().name("smallint_card"),
]
expr = t.group_by("string_col").aggregate(metrics)
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_pushdown_with_or(snapshot):
t = ibis.table(
[
("double_col", "float64"),
("string_col", "string"),
("int_col", "int32"),
("float_col", "float32"),
],
"functional_alltypes",
)
subset = t[(t.double_col > 3.14) & t.string_col.contains("foo")]
expr = subset[(subset.int_col - 1 == 0) | (subset.float_col <= 1.34)]
snapshot.assert_match(to_sql(expr), "out.sql")
def test_having_size(snapshot):
t = ibis.table(
[
("double_col", "double"),
("string_col", "string"),
("int_col", "int32"),
("float_col", "float"),
],
"functional_alltypes",
)
expr = t.group_by(t.string_col).having(t.double_col.max() == 1).size()
snapshot.assert_match(to_sql(expr), "out.sql")
def test_having_from_filter(snapshot):
t = ibis.table([("a", "int64"), ("b", "string")], "t")
filt = t[t.b == "m"]
gb = filt.group_by(filt.b)
having = gb.having(filt.a.max() == 2)
expr = having.aggregate(filt.a.sum().name("sum"))
snapshot.assert_match(to_sql(expr), "out.sql")
# params get different auto incremented counter identifiers
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def METHOD_NAME(snapshot):
t = ibis.table([("a", "int64"), ("b", "string")], name="my_table")
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
snapshot.assert_match(to_sql(expr), "out.sql")
def test_agg_and_non_agg_filter(snapshot):
t = ibis.table([("a", "int64"), ("b", "string")], name="my_table")
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
expr = expr[expr.b == "a"]
snapshot.assert_match(to_sql(expr), "out.sql")
def test_agg_filter(snapshot):
t = ibis.table([("a", "int64"), ("b", "int64")], name="my_table")
t = t.mutate(b2=t.b * 2)
t = t[["a", "b2"]]
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max().name("blah")]
snapshot.assert_match(to_sql(expr), "out.sql")
def test_agg_filter_with_alias(snapshot):
t = ibis.table([("a", "int64"), ("b", "int64")], name="my_table")
t = t.mutate(b2=t.b * 2)
t = t[["a", "b2"]]
filt = t[t.a < 100]
expr = filt[filt.a.name("A") == filt.a.max().name("blah")]
snapshot.assert_match(to_sql(expr), "out.sql")
def test_table_drop_with_filter(snapshot):
left = ibis.table(
[("a", "int64"), ("b", "string"), ("c", "timestamp")], name="t"
).rename(C="c")
left = left.filter(left.C == datetime.datetime(2018, 1, 1))
left = left.drop("C")
left = left.mutate(the_date=datetime.datetime(2018, 1, 1))
right = ibis.table([("b", "string")], name="s")
joined = left.join(right, left.b == right.b)
joined = joined[left.a]
expr = joined.filter(joined.a < 1.0)
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def test_table_drop_consistency():
# GH2829
t = ibis.table([("a", "int64"), ("b", "string"), ("c", "timestamp")], name="t")
expected = t.select(["a", "c"])
result = t.drop("b")
assert expected.schema() == result.schema()
assert set(result.columns) == {"a", "c"}
def test_subquery_where_location(snapshot):
t = ibis.table(
[
("float_col", "float32"),
("timestamp_col", "timestamp"),
("int_col", "int32"),
("string_col", "string"),
],
name="alltypes",
)
param = ibis.param("timestamp").name("my_param")
expr = (
t[["float_col", "timestamp_col", "int_col", "string_col"]][
lambda t: t.timestamp_col < param
]
.group_by("string_col")
.aggregate(foo=lambda t: t.float_col.sum())
.foo.count()
)
out = Compiler.to_sql(expr, params={param: "20140101"})
snapshot.assert_match(out, "out.sql")
# params get different auto incremented counter identifiers
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
def test_column_expr_retains_name(snapshot):
t = ibis.table([("int_col", "int32")], name="int_col_table")
expr = (t.int_col + 4).name("foo")
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_column_expr_default_name(snapshot):
t = ibis.table([("int_col", "int32")], name="int_col_table")
expr = t.int_col + 4
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot)
def test_union_order_by(snapshot):
t = ibis.table(dict(a="int", b="string"), name="t")
expr = t.order_by("b").union(t.order_by("b"))
snapshot.assert_match(to_sql(expr), "out.sql")
assert_decompile_roundtrip(expr, snapshot, check_equality=False)
|
1,139 |
reload
|
import re
import angr
from PySide6.QtCore import Qt
from PySide6.QtGui import QColor
from PySide6.QtWidgets import QAbstractItemView, QMenu, QTableWidget, QTableWidgetItem
from angrmanagement.ui.dialogs.new_state import NewState
from angrmanagement.utils.namegen import NameGenerator
class QStateTableItem(QTableWidgetItem):
"""
An entry within a QStateTable
"""
def __init__(self, state, *args, **kwargs):
super().__init__(*args, **kwargs)
self.state = state
def widgets(self):
state = self.state
name = state.gui_data.name
base_name = state.gui_data.base_name
is_changed = "No" if state.gui_data.is_original else "Yes"
mode = state.mode
address = "%x" % state.addr if isinstance(state.addr, int) else "Symbolic"
state_options = {o for o, v in state.options._options.items() if v is True}
options_plus = state_options - angr.sim_options.modes[mode]
options_minus = angr.sim_options.modes[mode] - state_options
options = " ".join([" ".join("+" + o for o in options_plus), " ".join("-" + o for o in options_minus)])
widgets = [
QTableWidgetItem(name),
QTableWidgetItem(address),
QTableWidgetItem(is_changed),
QTableWidgetItem(base_name),
QTableWidgetItem(mode),
QTableWidgetItem(options),
]
if state.gui_data.is_base:
color = QColor(0, 0, 0x80)
elif state.gui_data.is_original:
color = QColor(0, 0x80, 0)
else:
color = QColor(0, 0, 0)
for w in widgets:
w.setFlags(w.flags() & ~Qt.ItemIsEditable)
w.setForeground(color)
return widgets
class QStateTable(QTableWidget):
"""
The table which is the subject of the States View
"""
def __init__(self, workspace, instance, parent, selection_callback=None):
super().__init__(parent)
self._selected = selection_callback
header_labels = ["Name", "Address", "Changed?", "Base State", "Mode", "Options"]
self.setColumnCount(len(header_labels))
self.setHorizontalHeaderLabels(header_labels)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.items = []
self.workspace = workspace
self.instance = instance
self.states = instance.states
self.itemDoubleClicked.connect(self._on_state_selected)
self.cellDoubleClicked.connect(self._on_state_selected)
self.states.am_subscribe(self._watch_states)
self.METHOD_NAME()
def closeEvent(self, _):
self.states.am_unsubscribe(self._watch_states)
def current_state_record(self):
selected_index = self.currentRow()
if 0 <= selected_index < len(self.items):
return self.items[selected_index]
else:
return None
def METHOD_NAME(self):
# current_row = self.currentRow()
self.clearContents()
self.items = [QStateTableItem(f) for f in self.states]
items_count = len(self.items)
self.setRowCount(items_count)
for idx, item in enumerate(self.items):
for i, it in enumerate(item.widgets()):
self.setItem(idx, i, it)
# if 0 <= current_row < len(self.items):
# self.setCurrentItem(current_row, 0)
def _on_state_selected(self, *args): # pylint: disable=unused-argument
if self._selected is not None:
self._selected(self.current_state_record())
def contextMenuEvent(self, event):
sr = self.current_state_record()
menu = QMenu("", self)
menu.addAction("New state...", self._action_new_state)
menu.addSeparator()
a = menu.addAction("Duplicate state", self._action_duplicate)
if sr is None:
a.setDisabled(True)
a = menu.addAction("Delete state", self._action_delete)
if sr is None:
a.setDisabled(True)
a = menu.addAction("New simulation manager", self._action_new_simulation_manager)
if sr is None:
a.setDisabled(True)
menu.exec_(event.globalPos())
def _action_new_state(self):
dialog = NewState(self.workspace, self.instance, parent=self)
dialog.exec_()
def _action_duplicate(self):
state = self.states[self.currentRow()]
copy = state.copy()
copy.gui_data.name = self._get_copied_state_name(copy.gui_data.name)
self.states.append(copy)
self.states.am_event(src="duplicate", state=copy)
def _action_delete(self):
tmp = self.states.pop(self.currentRow())
self.states.am_event(src="delete", state=tmp)
def _action_new_simulation_manager(self):
state = self.states[self.currentRow()]
simgr_name = NameGenerator.random_name()
self.workspace.create_simulation_manager(state, simgr_name)
def _watch_states(self, **kwargs): # pylint: disable=unused-argument
self.METHOD_NAME()
def _get_copied_state_name(self, current_name):
"""
Get a non-duplicating name for the copied state.
:param str current_name: The current name of the state.
:return: A new name of the copied state.
:rtype: str
"""
m = re.match(r"^([\s\S]*) copy\s*(\d*)$", current_name)
if m:
# ends with copy
ctr_str = m.group(2)
ctr = int(ctr_str) + 1 if ctr_str else 1
current_name = m.group(1)
name = current_name + " copy %d" % ctr
else:
ctr = 0
name = current_name + " copy"
# Increment the counter until there is no conflict with existing names
all_names = {s.gui_data.name for s in self.states}
while name in all_names:
ctr += 1
name = current_name + " copy %d" % ctr
return name
|
1,140 |
test wchar parm
|
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
@need_symbol('c_wchar')
def METHOD_NAME(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
self.assertIs(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.assertEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, (int, long))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_recursive_as_param(self):
from ctypes import c_int
class A(object):
pass
a = A()
a._as_parameter_ = a
with self.assertRaises(RuntimeError):
c_int.from_param(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
|
1,141 |
get report rows
|
"""Import Stripe reports into BigQuery."""
import os
import sys
import warnings
from datetime import datetime, timezone
from pathlib import Path
from tempfile import TemporaryFile
from time import sleep
from typing import Any, Dict, List, Optional
import click
import requests
import stripe
import ujson
from dateutil.relativedelta import relativedelta
from google.cloud import bigquery
from requests.auth import HTTPBasicAuth
def METHOD_NAME(
api_key: Optional[str],
after_date: Optional[datetime],
before_date: Optional[datetime],
report_type: str,
columns: List[str],
):
if api_key is None:
yield from sys.stdin.buffer
else:
stripe.api_key = api_key
parameters: Dict[str, Any] = {"columns": columns}
if after_date:
parameters["interval_start"] = int(after_date.timestamp())
if before_date:
parameters["interval_end"] = int(before_date.timestamp())
try:
run = stripe.reporting.ReportRun.create(
report_type=report_type,
parameters=parameters,
)
except stripe.error.InvalidRequestError as e:
# Wrap exception to hide unnecessary traceback
raise click.ClickException(str(e))
click.echo(f"Waiting on report {run.id!r}", file=sys.stderr)
# wait up to 30 minutes for report to finish
timeout = datetime.utcnow() + relativedelta(minutes=30)
while datetime.utcnow() < timeout:
if run.status != "pending":
break
sleep(10)
run.refresh()
if run.status != "succeeded":
raise click.ClickException(
f"Report {run.id!r} did not succeed, status was {run.status!r}"
)
response = requests.get(
run.result.url, auth=HTTPBasicAuth(api_key, ""), stream=True
)
response.raise_for_status()
yield from (line + b"\n" for line in response.iter_lines())
@click.group("stripe", help="Commands for Stripe ETL.")
def stripe_():
"""Create the CLI group for stripe commands."""
pass
@stripe_.command("import", help=__doc__)
@click.option(
"--api-key",
help="Stripe API key to use for authentication; If not set resources will be read "
"from stdin",
)
@click.option(
"--date",
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m", "%Y"]),
help="Creation date of resources to pull from stripe API; Added to --table "
"to ensure only that date partition is replaced",
)
@click.option(
"--after-date",
type=click.DateTime(formats=["%Y-%m-%d"]),
help="Only pull resources from stripe API with a creation date on or after this; "
"Used when importing resources older than the earliest available events",
)
@click.option(
"--before-date",
type=click.DateTime(formats=["%Y-%m-%d"]),
help="Only pull resources from stripe API with a creation date before this; "
"Used when importing resources older than the earliest available events",
)
@click.option(
"--table",
help="BigQuery Standard SQL format table ID where resources will be written; "
"if not set resources will be written to stdout",
)
@click.option(
"--quiet",
is_flag=True,
help="Write output to os.devnull instead of sys.stdout",
)
@click.option(
"--report-type",
help="Stripe report type to import",
required=True,
)
@click.option(
"--time-partitioning-field",
default="created",
help="Field to use for partitioning and clustering; if --date or --before-date or"
" --after-date are specified, values must fall within that window",
)
@click.option(
"--time-partitioning-type",
default=bigquery.TimePartitioningType.DAY,
type=click.Choice(
[
bigquery.TimePartitioningType.DAY,
bigquery.TimePartitioningType.MONTH,
bigquery.TimePartitioningType.YEAR,
]
),
help="BigQuery time partitioning type for --table",
)
def stripe_import(
api_key: Optional[str],
date: Optional[datetime],
after_date: Optional[datetime],
before_date: Optional[datetime],
table: Optional[str],
quiet: bool,
report_type: str,
time_partitioning_field: str,
time_partitioning_type: str,
):
"""Import Stripe data into BigQuery."""
if after_date:
after_date = after_date.replace(tzinfo=timezone.utc)
if before_date:
before_date = before_date.replace(tzinfo=timezone.utc)
if date:
date = date.replace(tzinfo=timezone.utc)
if time_partitioning_type == bigquery.TimePartitioningType.DAY:
after_date = date
before_date = after_date + relativedelta(days=1)
if table:
table = f"{table}${date:%Y%m%d}"
elif time_partitioning_type == bigquery.TimePartitioningType.MONTH:
after_date = date.replace(day=1)
before_date = after_date + relativedelta(months=1)
if table:
table = f"{table}${date:%Y%m}"
elif time_partitioning_type == bigquery.TimePartitioningType.YEAR:
after_date = date.replace(month=1, day=1)
before_date = after_date + relativedelta(years=1)
if table:
table = f"{table}${date:%Y}"
if table:
handle = TemporaryFile(mode="w+b")
elif quiet:
handle = open(os.devnull, "w+b")
else:
handle = sys.stdout.buffer
with handle as file_obj:
path = Path(__file__).parent / f"{report_type}.schema.json"
root = bigquery.SchemaField.from_api_repr(
{"name": "root", "type": "RECORD", "fields": ujson.loads(path.read_text())}
)
columns = [f.name for f in root.fields]
for row in METHOD_NAME(
api_key, after_date, before_date, report_type, columns
):
file_obj.write(row)
if table:
if file_obj.writable():
file_obj.seek(0)
warnings.filterwarnings("ignore", module="google.auth._default")
job_config = bigquery.LoadJobConfig(
clustering_fields=[time_partitioning_field],
ignore_unknown_values=False,
time_partitioning=bigquery.TimePartitioning(
field=time_partitioning_field, type_=time_partitioning_type
),
write_disposition=bigquery.WriteDisposition.WRITE_TRUNCATE,
source_format=bigquery.SourceFormat.CSV,
skip_leading_rows=1,
schema=root.fields,
)
if "$" in table:
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION
]
job = bigquery.Client().load_table_from_file(
file_obj=file_obj,
destination=table,
job_config=job_config,
)
try:
click.echo(f"Waiting for {job.job_id}", file=sys.stderr)
job.result()
except Exception as e:
full_message = f"{job.job_id} failed: {e}"
for error in job.errors or ():
message = error.get("message")
if message and message != getattr(e, "message", None):
full_message += "\n" + message
raise click.ClickException(full_message)
else:
click.echo(f"{job.job_id} succeeded", file=sys.stderr)
|
1,142 |
i n classa
|
# Generated by h2py from /usr/include/netinet/in.h
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def METHOD_NAME(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
# Included from sys/queue.h
def SLIST_HEAD_INITIALIZER(head): return \
def SLIST_ENTRY(type): return \
def STAILQ_HEAD_INITIALIZER(head): return \
def STAILQ_ENTRY(type): return \
def LIST_HEAD_INITIALIZER(head): return \
def LIST_ENTRY(type): return \
def TAILQ_HEAD_INITIALIZER(head): return \
def TAILQ_ENTRY(type): return \
def CIRCLEQ_ENTRY(type): return \
__KAME_VERSION = "20000701/FreeBSD-current"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_BINDV6ONLY = 27
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_BINDV6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_MAXID = 28
|
1,143 |
test lstm weight layout iofg
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Test code for LSTM."""
import numpy as np
from rsa import verify
import tvm
from tvm import te, topi
import tvm.testing
import tvm.topi.testing
def verify_lstm(
target,
dev,
seq_len,
batch_size,
in_dim,
hidden_dim,
proj_dim=0,
bias=True,
zero_init=True,
peephole=False,
reverse=False,
weight_layout="IFGO",
):
out_dim = proj_dim if proj_dim > 0 else hidden_dim
def rand(*shape):
sqrt_k = np.sqrt(1 / hidden_dim)
return np.random.uniform(-sqrt_k, sqrt_k, size=shape).astype("float32")
def get_ref_data():
Xs = np.random.normal(size=(seq_len, batch_size, in_dim)).astype("float32")
Wi = rand(4 * hidden_dim, in_dim)
Wh = rand(4 * hidden_dim, out_dim)
Bi = None
Bh = None
h0 = None
c0 = None
proj = None
p_i = None
p_f = None
p_o = None
if bias:
Bi = rand(4 * hidden_dim)
Bh = rand(4 * hidden_dim)
if not zero_init:
h0 = np.random.normal(size=(batch_size, out_dim)).astype("float32")
c0 = np.random.normal(size=(batch_size, hidden_dim)).astype("float32")
if proj_dim > 0:
proj = rand(proj_dim, hidden_dim)
if peephole:
p_i, p_f, p_o = [rand(batch_size, hidden_dim) for _ in range(3)]
hs, cs = tvm.topi.testing.lstm_python(
Xs,
Wi,
Wh,
Bi=Bi,
Bh=Bh,
h_init=h0,
c_init=c0,
proj=proj,
p_i=p_i,
p_f=p_f,
p_o=p_o,
reverse=reverse,
weight_layout=weight_layout,
)
return [Xs, Wi, Wh, Bi, Bh, h0, c0, proj, p_i, p_f, p_o], [hs, cs]
args_np, (hs_np, cs_np) = get_ref_data()
args = [te.placeholder(a.shape, "float32") if a is not None else a for a in args_np]
real_args = [a for a in args if a is not None]
hs, cs = topi.nn.lstm(*args, reverse=reverse, weight_layout=weight_layout)
with tvm.target.Target(target):
sch = topi.generic.schedule_lstm([hs, cs])
func = tvm.build(sch, real_args + [hs, cs], target=target)
args_nd = [tvm.nd.array(a, dev) for a in args_np if a is not None]
hs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, out_dim), "float32"), dev)
cs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, hidden_dim), "float32"), dev)
func(*args_nd, hs_nd, cs_nd)
tvm.testing.assert_allclose(hs_nd.numpy(), hs_np, rtol=1e-4)
tvm.testing.assert_allclose(cs_nd.numpy(), cs_np, rtol=1e-4)
def test_lstm():
verify_lstm(
"llvm",
tvm.cpu(0),
1,
1,
1,
1,
0,
True,
True,
False,
False,
"IFGO",
)
verify_lstm(
"llvm",
tvm.cpu(0),
8,
4,
8,
16,
0,
True,
False,
False,
False,
"IFGO",
)
def test_lstm_proj():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 8, True, True, False, False, "IFGO")
def test_lstm_peephole():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, True, False, "IFGO")
def test_lstm_reverse():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, True, "IFGO")
def METHOD_NAME():
# IOFG is used by ONNX, while IFGO is used by PyTorch
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, False, "IOFG")
def test_lstm_assorted():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 16, True, False, True, True, "OIGF")
|
1,144 |
show permanent help
|
from typing import Any
from AnyQt.QtWidgets import QTextBrowser
from AnyQt.QtGui import QStatusTipEvent, QWhatsThisClickedEvent
from AnyQt.QtCore import QObject, QCoreApplication, QEvent, QTimer, QUrl
from AnyQt.QtCore import pyqtSignal as Signal
class QuickHelp(QTextBrowser):
#: Emitted when the shown text changes.
textChanged = Signal()
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
super().__init__(*args, **kwargs)
self.setOpenExternalLinks(False)
self.setOpenLinks(False)
self.__text = ""
self.__permanentText = ""
self.__defaultText = ""
self.__timer = QTimer(self, timeout=self.__on_timeout,
singleShot=True)
self.anchorClicked.connect(self.__on_anchorClicked)
def showHelp(self, text, timeout=0):
# type: (str, int) -> None
"""
Show help for `timeout` milliseconds. if timeout is 0 then
show the text until it is cleared with clearHelp or showHelp is
called with an empty string.
"""
if self.__text != text:
self.__text = text
self.__update()
self.textChanged.emit()
if timeout > 0:
self.__timer.start(timeout)
def clearHelp(self):
# type: () -> None
"""
Clear help text previously set with `showHelp`.
"""
self.__timer.stop()
self.showHelp("")
def METHOD_NAME(self, text):
# type: (str) -> None
"""
Set permanent help text. The text may be temporarily overridden
by showHelp but will be shown again when that is cleared.
"""
if self.__permanentText != text:
self.__permanentText = text
self.__update()
self.textChanged.emit()
def setDefaultText(self, text):
# type: (str) -> None
"""
Set default help text. The text is overriden by normal and permanent help messages,
but is show again after such messages are cleared.
"""
if self.__defaultText != text:
self.__defaultText = text
self.__update()
self.textChanged.emit()
def currentText(self):
# type: () -> str
"""
Return the current shown text.
"""
return self.__text or self.__permanentText
def __update(self):
# type: () -> None
if self.__text:
self.setHtml(self.__text)
elif self.__permanentText:
self.setHtml(self.__permanentText)
else:
self.setHtml(self.__defaultText)
def __on_timeout(self):
# type: () -> None
if self.__text:
self.__text = ""
self.__update()
self.textChanged.emit()
def __on_anchorClicked(self, anchor):
# type: (QUrl) -> None
ev = QuickHelpDetailRequestEvent(anchor.toString(), anchor)
QCoreApplication.postEvent(self, ev)
class QuickHelpTipEvent(QStatusTipEvent):
Temporary, Normal, Permanent = range(1, 4)
def __init__(self, tip, html="", priority=Normal, timeout=0):
# type: (str, str, int, int) -> None
super().__init__(tip)
self.__html = html or ""
self.__priority = priority
self.__timeout = timeout
def html(self):
# type: () -> str
return self.__html
def priority(self):
# type: () -> int
return self.__priority
def timeout(self):
# type: () -> int
return self.__timeout
class QuickHelpDetailRequestEvent(QWhatsThisClickedEvent):
def __init__(self, href, url):
# type: (str, QUrl) -> None
super().__init__(href)
self.__url = QUrl(url)
def url(self):
# type: () -> QUrl
return QUrl(self.__url)
class StatusTipPromoter(QObject):
"""
Promotes `QStatusTipEvent` to `QuickHelpTipEvent` using ``whatsThis``
property of the object.
"""
def eventFilter(self, obj, event):
# type: (QObject, QEvent) -> bool
if event.type() == QEvent.StatusTip and \
not isinstance(event, QuickHelpTipEvent) and \
hasattr(obj, "whatsThis") and \
callable(obj.whatsThis):
assert isinstance(event, QStatusTipEvent)
tip = event.tip()
try:
text = obj.whatsThis()
except Exception:
text = None
if text:
ev = QuickHelpTipEvent(tip, text if tip else "")
return QCoreApplication.sendEvent(obj, ev)
return super().eventFilter(obj, event)
|
1,145 |
construct nginx config
|
# -*- coding: utf-8 -*-
"""General purpose nginx test configuration generator."""
import getpass
from typing import Optional
import pkg_resources
def METHOD_NAME(nginx_root: str, nginx_webroot: str, http_port: int, https_port: int,
other_port: int, default_server: bool, key_path: Optional[str] = None,
cert_path: Optional[str] = None, wtf_prefix: str = 'le') -> str:
"""
This method returns a full nginx configuration suitable for integration tests.
:param str nginx_root: nginx root configuration path
:param str nginx_webroot: nginx webroot path
:param int http_port: HTTP port to listen on
:param int https_port: HTTPS port to listen on
:param int other_port: other HTTP port to listen on
:param bool default_server: True to set a default server in nginx config, False otherwise
:param str key_path: the path to a SSL key
:param str cert_path: the path to a SSL certificate
:param str wtf_prefix: the prefix to use in all domains handled by this nginx config
:return: a string containing the full nginx configuration
:rtype: str
"""
key_path = key_path if key_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/key.pem')
cert_path = cert_path if cert_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/cert.pem')
return '''\
# This error log will be written regardless of server scope error_log
# definitions, so we have to set this here in the main scope.
#
# Even doing this, Nginx will still try to create the default error file, and
# log a non-fatal error when it fails. After that things will work, however.
error_log {nginx_root}/error.log;
# The pidfile will be written to /var/run unless this is set.
pid {nginx_root}/nginx.pid;
user {user};
worker_processes 1;
events {{
worker_connections 1024;
}}
# “This comment contains valid Unicode”.
http {{
# Set an array of temp, cache and log file options that will otherwise default to
# restricted locations accessible only to root.
client_body_temp_path {nginx_root}/client_body;
fastcgi_temp_path {nginx_root}/fastcgi_temp;
proxy_temp_path {nginx_root}/proxy_temp;
#scgi_temp_path {nginx_root}/scgi_temp;
#uwsgi_temp_path {nginx_root}/uwsgi_temp;
access_log {nginx_root}/error.log;
# This should be turned off in a Virtualbox VM, as it can cause some
# interesting issues with data corruption in delivered files.
sendfile off;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
index index.html index.htm index.php;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
default_type application/octet-stream;
server {{
# IPv4.
listen {http_port} {default_server};
# IPv6.
listen [::]:{http_port} {default_server};
server_name nginx.{wtf_prefix}.wtf nginx2.{wtf_prefix}.wtf;
root {nginx_webroot};
location / {{
# First attempt to serve request as file, then as directory, then fall
# back to index.html.
try_files $uri $uri/ /index.html;
}}
}}
server {{
listen {http_port};
listen [::]:{http_port};
server_name nginx3.{wtf_prefix}.wtf;
root {nginx_webroot};
location /.well-known/ {{
return 404;
}}
return 301 https://$host$request_uri;
}}
server {{
listen {other_port};
listen [::]:{other_port};
server_name nginx4.{wtf_prefix}.wtf nginx5.{wtf_prefix}.wtf;
}}
server {{
listen {http_port};
listen [::]:{http_port};
listen {https_port} ssl;
listen [::]:{https_port} ssl;
if ($scheme != "https") {{
return 301 https://$host$request_uri;
}}
server_name nginx6.{wtf_prefix}.wtf nginx7.{wtf_prefix}.wtf;
ssl_certificate {cert_path};
ssl_certificate_key {key_path};
}}
}}
'''.format(nginx_root=nginx_root, nginx_webroot=nginx_webroot, user=getpass.getuser(),
http_port=http_port, https_port=https_port, other_port=other_port,
default_server='default_server' if default_server else '', wtf_prefix=wtf_prefix,
key_path=key_path, cert_path=cert_path)
|
1,146 |
test parse response with primary key
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from http import HTTPStatus
from unittest.mock import MagicMock
import pytest
import requests
from source_tplcentral.streams import TplcentralStream
@pytest.fixture
def config():
return {
"config": {
"authenticator": None,
"url_base": "https://secure-wms.com/",
"client_id": "xxx",
"client_secret": "yyy",
"user_login_id": 123,
"tpl_key": "{00000000-0000-0000-0000-000000000000}",
"customer_id": 4,
"facility_id": 5,
"start_date": "2021-10-01",
}
}
@pytest.fixture
def patch_base_class(mocker):
mocker.patch.object(TplcentralStream, "path", "v0/example_endpoint")
mocker.patch.object(TplcentralStream, "primary_key", "test_primary_key")
mocker.patch.object(TplcentralStream, "cursor_field", "test_cursor_field")
mocker.patch.object(TplcentralStream, "collection_field", "CollectionField")
mocker.patch.object(TplcentralStream, "__abstractmethods__", set())
@pytest.fixture
def patch_base_class_page_size(mocker):
mocker.patch.object(TplcentralStream, "page_size", 10)
@pytest.fixture
def patch_base_class_upstream_primary_key(mocker):
mocker.patch.object(TplcentralStream, "upstream_primary_key", "Nested.PrimaryKey")
@pytest.fixture
def patch_base_class_upstream_cursor_field(mocker):
mocker.patch.object(TplcentralStream, "upstream_cursor_field", "Nested.Cursor")
@pytest.fixture
def stream(patch_base_class, config):
return TplcentralStream(**config)
def test_request_params(stream):
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": "next_page_token_is_the_params"}
expected_params = "next_page_token_is_the_params"
assert stream.request_params(**inputs) == expected_params
def test_next_page_token(stream, requests_mock):
# No results
requests_mock.get("https://dummy", json={"TotalResults": 0, "CollectionField": []})
resp = requests.get("https://dummy")
expected_next_page_token = None
assert stream.next_page_token(resp) == expected_next_page_token
# Invalid response
requests_mock.get("https://dummy", json={"TotalResults": 1000, "CollectionField": []})
resp = requests.get("https://dummy")
expected_next_page_token = None
assert stream.next_page_token(resp) == expected_next_page_token
# Implicit page size
requests_mock.get("https://dummy", json={"TotalResults": 1000, "CollectionField": [None, None, None]})
resp = requests.get("https://dummy")
expected_next_page_token = {"pgsiz": 3, "pgnum": 2}
assert stream.next_page_token(resp) == expected_next_page_token
def test_next_page_token_with_page_size(patch_base_class_page_size, stream, requests_mock):
# Explicit page size
requests_mock.get("https://dummy", json={"TotalResults": 1000, "CollectionField": []})
resp = requests.get("https://dummy")
expected_next_page_token = {"pgsiz": 10, "pgnum": 2}
assert stream.next_page_token(resp) == expected_next_page_token
def test_parse_response(stream, requests_mock):
requests_mock.get(
"https://dummy",
json={
"TotalResults": 2,
"CollectionField": [
{
"Foo": "foo",
"Bar": {
"Baz": "baz",
},
"_links": [],
}
],
},
)
resp = requests.get("https://dummy")
inputs = {"response": resp}
expected_parsed_object = {"Bar": {"Baz": "baz"}, "Foo": "foo"}
assert next(stream.parse_response(**inputs)) == expected_parsed_object
def METHOD_NAME(patch_base_class_upstream_primary_key, stream, requests_mock):
requests_mock.get(
"https://dummy",
json={
"TotalResults": 2,
"CollectionField": [
{
"Nested": {
"PrimaryKey": 42,
},
},
],
},
)
resp = requests.get("https://dummy")
inputs = {"response": resp}
expected_parsed_object = {"Nested": {"PrimaryKey": 42}, "test_primary_key": 42}
assert next(stream.parse_response(**inputs)) == expected_parsed_object
def test_parse_response_with_cursor_field(patch_base_class_upstream_cursor_field, stream, requests_mock):
requests_mock.get(
"https://dummy",
json={
"TotalResults": 2,
"CollectionField": [
{
"Nested": {
"Cursor": 43,
},
},
],
},
)
resp = requests.get("https://dummy")
inputs = {"response": resp}
expected_parsed_object = {"Nested": {"Cursor": 43}, "test_cursor_field": 43}
assert next(stream.parse_response(**inputs)) == expected_parsed_object
def test_request_headers(stream):
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
expected_headers = {}
assert stream.request_headers(**inputs) == expected_headers
def test_http_method(stream):
expected_method = "GET"
assert stream.http_method == expected_method
@pytest.mark.parametrize(
("http_status", "should_retry"),
[
(HTTPStatus.OK, False),
(HTTPStatus.BAD_REQUEST, False),
(HTTPStatus.TOO_MANY_REQUESTS, True),
(HTTPStatus.INTERNAL_SERVER_ERROR, True),
],
)
def test_should_retry(stream, http_status, should_retry):
response_mock = MagicMock()
response_mock.status_code = http_status
assert stream.should_retry(response_mock) == should_retry
def test_backoff_time(stream):
response_mock = MagicMock()
expected_backoff_time = None
assert stream.backoff_time(response_mock) == expected_backoff_time
|
1,147 |
annotate sse
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application.dssp"
__author__ = "Patrick Kunzmann"
__all__ = ["DsspApp"]
from tempfile import NamedTemporaryFile
from ..localapp import LocalApp, cleanup_tempfile
from ..application import AppState, requires_state
from ...structure.io.pdbx.file import PDBxFile
from ...structure.io.pdbx.convert import set_structure
import numpy as np
class DsspApp(LocalApp):
r"""
Annotate the secondary structure of a protein structure using the
*DSSP* software.
Internally this creates a :class:`Popen` instance, which handles
the execution.
DSSP differentiates between 8 different types of secondary
structure elements:
- C: loop, coil or irregular
- H: :math:`{\alpha}`-helix
- B: :math:`{\beta}`-bridge
- E: extended strand, participation in :math:`{\beta}`-ladder
- G: 3 :sub:`10`-helix
- I: :math:`{\pi}`-helix
- T: hydrogen bonded turn
- S: bend
Parameters
----------
atom_array : AtomArray
The atom array to be annotated.
bin_path : str, optional
Path of the *DDSP* binary.
Examples
--------
>>> app = DsspApp(atom_array)
>>> app.start()
>>> app.join()
>>> print(app.get_sse())
['C' 'H' 'H' 'H' 'H' 'H' 'H' 'H' 'T' 'T' 'G' 'G' 'G' 'G' 'T' 'C' 'C' 'C'
'C' 'C']
"""
def __init__(self, atom_array, bin_path="mkdssp"):
super().__init__(bin_path)
# mkdssp requires also the
# 'occupancy', 'b_factor' and 'charge' fields
# -> Add these annotations to a copy of the input structure
self._array = atom_array.copy()
categories = self._array.get_annotation_categories()
if "charge" not in categories:
self._array.set_annotation(
"charge", np.zeros(self._array.array_length(), dtype=int)
)
if "b_factor" not in categories:
self._array.set_annotation(
"b_factor", np.zeros(self._array.array_length(), dtype=float)
)
if "occupancy" not in categories:
self._array.set_annotation(
"occupancy", np.ones(self._array.array_length(), dtype=float)
)
self._in_file = NamedTemporaryFile("w", suffix=".cif", delete=False)
self._out_file = NamedTemporaryFile("r", suffix=".dssp", delete=False)
def run(self):
in_file = PDBxFile()
set_structure(in_file, self._array, data_block="DSSP_INPUT")
in_file.write(self._in_file)
self._in_file.flush()
self.set_arguments(
["-i", self._in_file.name, "-o", self._out_file.name]
)
super().run()
def evaluate(self):
super().evaluate()
lines = self._out_file.read().split("\n")
# Index where SSE records start
sse_start = None
for i, line in enumerate(lines):
if line.startswith(" # RESIDUE AA STRUCTURE"):
sse_start = i+1
if sse_start is None:
raise ValueError("DSSP file does not contain SSE records")
# Remove "!" for missing residues
lines = [
line for line in lines[sse_start:]
if len(line) != 0 and line[13] != "!"
]
self._sse = np.zeros(len(lines), dtype="U1")
# Parse file for SSE letters
for i, line in enumerate(lines):
self._sse[i] = line[16]
self._sse[self._sse == " "] = "C"
def clean_up(self):
super().clean_up()
cleanup_tempfile(self._in_file)
cleanup_tempfile(self._out_file)
@requires_state(AppState.JOINED)
def get_sse(self):
"""
Get the resulting secondary structure assignment.
Returns
-------
sse : ndarray, dtype="U1"
An array containing DSSP secondary structure symbols
corresponding to the residues in the input atom array.
"""
return self._sse
@staticmethod
def METHOD_NAME(atom_array, bin_path="mkdssp"):
"""
Perform a secondary structure assignment to an atom array.
This is a convenience function, that wraps the :class:`DsspApp`
execution.
Parameters
----------
atom_array : AtomArray
The atom array to be annotated.
bin_path : str, optional
Path of the DDSP binary.
Returns
-------
sse : ndarray, dtype="U1"
An array containing DSSP secondary structure symbols
corresponding to the residues in the input atom array.
"""
app = DsspApp(atom_array, bin_path)
app.start()
app.join()
return app.get_sse()
|
1,148 |
test filter text
|
# Copyright 2006 Joe Wreschnig
# 2013 Christoph Reiter
# 2016-22 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from gi.repository import Gtk
from quodlibet.browsers._base import FakeDisplayItem as FDI, \
DisplayPatternMixin, FakeDisplayItem
from quodlibet.util.cover import CoverManager
from tests import TestCase, init_fake_app, destroy_fake_app, mkstemp, run_gtk_loop
from .helper import realized, dummy_path
from quodlibet import browsers, app
from quodlibet.formats import AudioFile
from quodlibet import config
from quodlibet.browsers import Browser
from quodlibet.library import SongFileLibrary, SongLibrarian
SONGS = [
AudioFile({
"title": "one",
"artist": "piman",
"~filename": dummy_path(u"/dev/null"),
}),
AudioFile({
"title": "two",
"artist": "mu",
"~filename": dummy_path(u"/dev/zero"),
}),
AudioFile({
"title": "three",
"artist": "boris",
"~filename": dummy_path(u"/bin/ls"),
})
]
SONGS.sort()
for song in SONGS:
song.sanitize()
class TBrowser(TestCase):
def setUp(self):
self.browser = Browser()
def test_can_filter(self):
for key in ["foo", "title", "fake~key", "~woobar", "~#huh"]:
self.failIf(self.browser.can_filter(key))
def test_defaults(self):
self.failUnless(self.browser.background)
self.failIf(self.browser.can_reorder)
self.failIf(self.browser.headers)
def test_status_bar(self):
self.assertEqual(self.browser.status_text(1, "21s"),
"1 song (21s)")
self.assertEqual(self.browser.status_text(101, "2:03"),
"101 songs (2:03)")
def tearDown(self):
self.browser = None
class TBrowserBase(TestCase):
Kind = None
def setUp(self):
config.init()
init_fake_app()
app.cover_manager = CoverManager()
self.library = library = SongFileLibrary()
library.librarian = SongLibrarian()
library.add(SONGS)
self.Kind.init(library)
self.b = self.Kind(library)
def tearDown(self):
self.b.destroy()
self.library.librarian = None
self.library.destroy()
config.quit()
destroy_fake_app()
class TBrowserMixin:
def test_menu(self):
# FIXME: the playlist browser accesses the song list directly
if self.b.name == "Playlists":
return
menu = self.b.Menu([], self.library, [])
self.assertTrue(isinstance(menu, Gtk.Menu))
def test_key(self):
self.assertEqual(browsers.get(browsers.name(self.Kind)), self.Kind)
def test_pack_unpack(self):
to_pack = Gtk.Button()
container = self.b.pack(to_pack)
self.b.unpack(container, to_pack)
def test_pack_noshow_songpane(self):
to_pack = Gtk.Button()
to_pack.hide()
container = self.b.pack(to_pack)
self.assertFalse(to_pack.get_visible())
self.b.unpack(container, to_pack)
self.assertFalse(to_pack.get_visible())
def test_name(self):
self.failIf("_" in self.b.name)
self.failUnless("_" in self.b.accelerated_name)
def test_init(self):
self.Kind.init(self.library)
def test_active_filter(self):
with realized(self.b):
if self.b.active_filter is not None:
self.b.active_filter(SONGS[0])
def test_save_restore(self):
self.b.restore()
self.b.finalize(True)
try:
self.b.save()
except NotImplementedError:
pass
def test_msic(self):
with realized(self.b):
self.b.activate()
self.b.status_text(1000)
self.b.status_text(1)
song = AudioFile({"~filename": dummy_path(u"/fake")})
song.sanitize()
self.b.scroll(song)
def test_filters_caps(self):
with realized(self.b):
self.failUnless(isinstance(self.b.can_filter_tag("foo"), bool))
self.failUnless(isinstance(self.b.can_filter_text(), bool))
self.failUnless(isinstance(self.b.can_filter("foo"), bool))
def METHOD_NAME(self):
with realized(self.b):
if self.b.can_filter_tag("foo"):
self.b.filter("foo", ["bar"])
if self.b.can_filter_tag("(((((##!!!!))),"):
self.b.filter("(((((##!!!!))),", ["(((((##!!!!))),"])
if self.b.can_filter_text():
self.b.filter_text("foo")
self.b.filter_text("(((((##!!!!))),,,==")
def test_get_filter_text(self):
with realized(self.b):
if self.b.can_filter_text():
self.assertEqual(self.b.get_filter_text(), u"")
self.assertTrue(
isinstance(self.b.get_filter_text(), str))
self.b.filter_text(u"foo")
self.assertEqual(self.b.get_filter_text(), u"foo")
self.assertTrue(
isinstance(self.b.get_filter_text(), str))
def test_filter_albums(self):
with realized(self.b):
if self.b.can_filter_albums():
self.b.filter_albums([])
self.b.filter_albums([object])
self.b.filter_albums(self.library.albums.values())
def test_filter_other(self):
with realized(self.b):
self.b.unfilter()
def test_cover_art_changed(self):
# See #4110
app.cover_manager.emit("cover-changed", SONGS[2:3])
run_gtk_loop()
# No (generic) assertions, but ensure coverage
class TFakeDisplayItem(TestCase):
def test_call(self):
self.assertEqual(FDI()("title"), "Title")
self.assertEqual(FDI()("~title~artist"), "Title - Artist")
self.assertEqual(FDI(title="foo")("title"), "foo")
self.assertEqual(FDI(title="f")("~title~artist"), "f - Artist")
self.assertEqual(FDI()("~#rating"), "Rating")
self.assertEqual(FDI({"~#rating": 0.5})("~#rating"), 0.5)
self.assertEqual(FDI()("~#rating:max"), "Rating<max>")
def test_get(self):
self.assertEqual(FDI().get("title"), "Title")
def test_comma(self):
self.assertEqual(FDI().comma("title"), "Title")
self.assertEqual(FDI({"~#rating": 0.5}).comma("~#rating"), 0.5)
self.assertEqual(FDI(title="a\nb").comma("title"), "a, b")
class DummyDPM(DisplayPatternMixin):
fd, _PATTERN_FN = mkstemp()
os.close(fd)
class TDisplayPatternMixin(TestCase):
TEST_PATTERN = u"<~name>: <artist|<artist>|?> [b]<~length>[/b]"
def setUp(self):
with open(DummyDPM._PATTERN_FN, "wb") as f:
f.write(self.TEST_PATTERN.encode("utf-8") + b"\n")
@classmethod
def tearDownClass(cls):
os.unlink(DummyDPM._PATTERN_FN)
def test_loading_pattern(self):
dpm = DummyDPM()
dpm.load_pattern()
self.failUnlessEqual(dpm.display_pattern_text, self.TEST_PATTERN)
def test_updating_pattern(self):
dpm = DummyDPM()
dpm.load_pattern()
dpm.update_pattern("static")
self.failUnlessEqual(
dpm.display_pattern % FakeDisplayItem(),
"static")
def test_markup(self):
dpm = DummyDPM()
dpm.load_pattern()
item = FakeDisplayItem({"~length": "2:34"})
self.failUnlessEqual(dpm.display_pattern % item,
"Name: Artist <b>2:34</b>")
browsers.init()
# create a new test class for each browser
for browser in browsers.browsers:
cls = TBrowserBase
name = "TB" + browser.__name__
new_test = type(name, (TBrowserBase, TBrowserMixin), {})
new_test.Kind = browser
globals()[name] = new_test
|
1,149 |
frontend looplift
|
import warnings
from numba.core import (errors, types, typing, funcdesc, config, pylowering,
transforms)
from numba.core.compiler_machinery import (FunctionPass, LoweringPass,
register_pass)
from collections import defaultdict
@register_pass(mutates_CFG=True, analysis_only=False)
class ObjectModeFrontEnd(FunctionPass):
_name = "object_mode_front_end"
def __init__(self):
FunctionPass.__init__(self)
def METHOD_NAME(self, state):
"""
Loop lifting analysis and transformation
"""
loop_flags = state.flags.copy()
outer_flags = state.flags.copy()
# Do not recursively loop lift
outer_flags.enable_looplift = False
loop_flags.enable_looplift = False
if not state.flags.enable_pyobject_looplift:
loop_flags.enable_pyobject = False
loop_flags.enable_ssa = False
main, loops = transforms.loop_lifting(state.func_ir,
typingctx=state.typingctx,
targetctx=state.targetctx,
locals=state.locals,
flags=loop_flags)
if loops:
# Some loops were extracted
if config.DEBUG_FRONTEND or config.DEBUG:
for loop in loops:
print("Lifting loop", loop.get_source_location())
from numba.core.compiler import compile_ir
cres = compile_ir(state.typingctx, state.targetctx, main,
state.args, state.return_type,
outer_flags, state.locals,
lifted=tuple(loops), lifted_from=None,
is_lifted_loop=True)
return cres
def run_pass(self, state):
from numba.core.compiler import _EarlyPipelineCompletion
# NOTE: That so much stuff, including going back into the compiler, is
# captured in a single pass is not ideal.
if state.flags.enable_looplift:
assert not state.lifted
cres = self.METHOD_NAME(state)
if cres is not None:
raise _EarlyPipelineCompletion(cres)
# Fallback typing: everything is a python object
state.typemap = defaultdict(lambda: types.pyobject)
state.calltypes = defaultdict(lambda: types.pyobject)
state.return_type = types.pyobject
return True
@register_pass(mutates_CFG=True, analysis_only=False)
class ObjectModeBackEnd(LoweringPass):
_name = "object_mode_back_end"
def __init__(self):
LoweringPass.__init__(self)
def _py_lowering_stage(self, targetctx, library, interp, flags):
fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(
interp
)
with targetctx.push_code_library(library):
lower = pylowering.PyLower(targetctx, library, fndesc, interp)
lower.lower()
if not flags.no_cpython_wrapper:
lower.create_cpython_wrapper()
env = lower.env
call_helper = lower.call_helper
del lower
from numba.core.compiler import _LowerResult # TODO: move this
if flags.no_compile:
return _LowerResult(fndesc, call_helper, cfunc=None, env=env)
else:
# Prepare for execution
cfunc = targetctx.get_executable(library, fndesc, env)
return _LowerResult(fndesc, call_helper, cfunc=cfunc, env=env)
def run_pass(self, state):
"""
Lowering for object mode
"""
if state.library is None:
codegen = state.targetctx.codegen()
state.library = codegen.create_library(state.func_id.func_qualname)
# Enable object caching upfront, so that the library can
# be later serialized.
state.library.enable_object_caching()
def backend_object_mode():
"""
Object mode compilation
"""
if len(state.args) != state.nargs:
# append missing
# BUG?: What's going on with nargs here?
# check state.nargs vs self.nargs on original code
state.args = (tuple(state.args) + (types.pyobject,) *
(state.nargs - len(state.args)))
return self._py_lowering_stage(state.targetctx,
state.library,
state.func_ir,
state.flags)
lowered = backend_object_mode()
signature = typing.signature(state.return_type, *state.args)
from numba.core.compiler import compile_result
state.cr = compile_result(
typing_context=state.typingctx,
target_context=state.targetctx,
entry_point=lowered.cfunc,
typing_error=state.status.fail_reason,
type_annotation=state.type_annotation,
library=state.library,
call_helper=lowered.call_helper,
signature=signature,
objectmode=True,
lifted=state.lifted,
fndesc=lowered.fndesc,
environment=lowered.env,
metadata=state.metadata,
reload_init=state.reload_init,
)
# Warn, deprecated behaviour, code compiled in objmode without
# force_pyobject indicates fallback from nopython mode
if not state.flags.force_pyobject:
# first warn about object mode and yes/no to lifted loops
if len(state.lifted) > 0:
warn_msg = ('Function "%s" was compiled in object mode without'
' forceobj=True, but has lifted loops.' %
(state.func_id.func_name,))
else:
warn_msg = ('Function "%s" was compiled in object mode without'
' forceobj=True.' % (state.func_id.func_name,))
warnings.warn(errors.NumbaWarning(warn_msg,
state.func_ir.loc))
url = ("https://numba.readthedocs.io/en/stable/reference/"
"deprecation.html#deprecation-of-object-mode-fall-"
"back-behaviour-when-using-jit")
msg = ("\nFall-back from the nopython compilation path to the "
"object mode compilation path has been detected. This is "
"deprecated behaviour that will be removed in Numba 0.59.0."
"\n\nFor more information visit %s" % url)
warnings.warn(errors.NumbaDeprecationWarning(msg,
state.func_ir.loc))
if state.flags.release_gil:
warn_msg = ("Code running in object mode won't allow parallel"
" execution despite nogil=True.")
warnings.warn_explicit(warn_msg, errors.NumbaWarning,
state.func_id.filename,
state.func_id.firstlineno)
return True
|
1,150 |
grab
|
from typing import Optional
import qrcode
import qrcode.exceptions
from PyQt5.QtGui import QColor, QPen
import PyQt5.QtGui as QtGui
from PyQt5.QtCore import Qt, QRect
from PyQt5.QtWidgets import (
QApplication, QVBoxLayout, QTextEdit, QHBoxLayout, QPushButton, QWidget,
QFileDialog,
)
from electrum.i18n import _
from electrum.simple_config import SimpleConfig
from .util import WindowModalDialog, WWLabel, getSaveFileName
class QrCodeDataOverflow(qrcode.exceptions.DataOverflowError):
pass
class QRCodeWidget(QWidget):
def __init__(self, data=None, *, manual_size: bool = False):
QWidget.__init__(self)
self.data = None
self.qr = None
self._framesize = None # type: Optional[int]
self._manual_size = manual_size
self.setData(data)
def setData(self, data):
if data:
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=0,
)
try:
qr.add_data(data)
qr_matrix = qr.get_matrix() # test that data fits in QR code
except (ValueError, qrcode.exceptions.DataOverflowError) as e:
raise QrCodeDataOverflow() from e
self.qr = qr
self.data = data
if not self._manual_size:
k = len(qr_matrix)
self.setMinimumSize(k * 5, k * 5)
else:
self.qr = None
self.data = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
grey = QColor(196, 196, 196, 255)
white = QColor(255, 255, 255, 255)
black_pen = QPen(black) if self.isEnabled() else QPen(grey)
black_pen.setJoinStyle(Qt.MiterJoin)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
framesize = min(r.width(), r.height())
self._framesize = framesize
boxsize = int(framesize/(k + 2))
if boxsize < 2:
qp.drawText(0, 20, 'Cannot draw QR code:')
qp.drawText(0, 40, 'Boxsize too small')
qp.end()
return
size = k*boxsize
left = (framesize - size)/2
top = (framesize - size)/2
# Draw white background with margin
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(0, 0, framesize, framesize)
# Draw qr code
qp.setBrush(black if self.isEnabled() else grey)
qp.setPen(black_pen)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(
int(left+c*boxsize), int(top+r*boxsize),
boxsize - 1, boxsize - 1)
qp.end()
def METHOD_NAME(self) -> QtGui.QPixmap:
"""Overrides QWidget.grab to only include the QR code itself,
excluding horizontal/vertical stretch.
"""
fsize = self._framesize
if fsize is None:
fsize = -1
rect = QRect(0, 0, fsize, fsize)
return QWidget.METHOD_NAME(self, rect)
class QRDialog(WindowModalDialog):
def __init__(
self,
*,
data,
parent=None,
title="",
show_text=False,
help_text=None,
show_copy_text_btn=False,
config: SimpleConfig,
):
WindowModalDialog.__init__(self, parent, title)
self.config = config
vbox = QVBoxLayout()
qrw = QRCodeWidget(data, manual_size=True)
qrw.setMinimumSize(250, 250)
vbox.addWidget(qrw, 1)
help_text = data if show_text else help_text
if help_text:
text_label = WWLabel()
text_label.setText(help_text)
vbox.addWidget(text_label)
hbox = QHBoxLayout()
hbox.addStretch(1)
def print_qr():
filename = getSaveFileName(
parent=self,
title=_("Select where to save file"),
filename="qrcode.png",
config=self.config,
)
if not filename:
return
p = qrw.METHOD_NAME()
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_image_to_clipboard():
p = qrw.METHOD_NAME()
QApplication.clipboard().setPixmap(p)
self.show_message(_("QR code copied to clipboard"))
def copy_text_to_clipboard():
QApplication.clipboard().setText(data)
self.show_message(_("Text copied to clipboard"))
b = QPushButton(_("Copy Image"))
hbox.addWidget(b)
b.clicked.connect(copy_image_to_clipboard)
if show_copy_text_btn:
b = QPushButton(_("Copy Text"))
hbox.addWidget(b)
b.clicked.connect(copy_text_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
# note: the word-wrap on the text_label is causing layout sizing issues.
# see https://stackoverflow.com/a/25661985 and https://bugreports.qt.io/browse/QTBUG-37673
# workaround:
self.setMinimumSize(self.sizeHint())
|
1,151 |
del validity period start
|
# -*- coding: utf-8 -*-
#
# LinOTP - the open source solution for two factor authentication
# Copyright (C) 2010-2019 KeyIdentity GmbH
# Copyright (C) 2019- netgo software GmbH
#
# This file is part of LinOTP server.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License, version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-mail: [email protected]
# Contact: www.linotp.org
# Support: www.linotp.de
#
from datetime import datetime
class TokenValidityMixin(object):
"""
A mixin for the Token validity handling which could be
* time based with
** start and end validity period
* counter based with
** access counter based
** success counter based
Stored values are
for the success counters:
count_auth_success_max
count_auth_success
for the access counters:
count_auth_max
count_auth
for the time period:
validity_period_start
validity_period_end
TODO: currently the validity info is stored in the token info
but in a redesign could be moved into a dedicated table or column
"""
# ---------------------------------------------------------------------- --
# success counter handling
@property
def count_auth_success_max(self):
"""get the counter for the maximum allowed successful logins"""
return int(self.getFromTokenInfo("count_auth_success_max", 0) or 0)
@count_auth_success_max.setter
def count_auth_success_max(self, count):
"""Sets the counter for the maximum allowed successful logins"""
self.addToTokenInfo("count_auth_success_max", int(count))
def del_count_auth_success_max(self):
"""delete the success access counter"""
self.removeFromTokenInfo("count_auth_success_max")
@property
def count_auth_success(self):
"""getter for the count_auth_success"""
return int(self.getFromTokenInfo("count_auth_success", 0) or 0)
@count_auth_success.setter
def count_auth_success(self, count):
"""setter for the count_auth_success"""
self.addToTokenInfo("count_auth_success", int(count))
def inc_count_auth_success(self):
"""
increment the auth success counter
"""
self.count_auth_success = self.count_auth_success + 1
return self.count_auth_success
# access counter handling
@property
def count_auth_max(self):
return int(self.getFromTokenInfo("count_auth_max", 0) or 0)
@count_auth_max.setter
def count_auth_max(self, count):
"""Sets the counter for the maximum allowed login attemps"""
self.addToTokenInfo("count_auth_max", int(count))
def del_count_auth_max(self):
"""delete the access counter"""
self.removeFromTokenInfo("count_auth_max")
@property
def count_auth(self):
return int(self.getFromTokenInfo("count_auth", 0))
@count_auth.setter
def count_auth(self, count):
"""Sets the counter for the occurred login attepms"""
self.addToTokenInfo("count_auth", int(count))
def inc_count_auth(self):
"""increment the access counter"""
self.count_auth = self.count_auth + 1
return self.count_auth
def del_count_auth(self):
"""delete the access counter"""
self.removeFromTokenInfo("count_auth")
# time based validity handling
@property
def validity_period_end(self):
"""
returns the end of validity period (if set)
"""
end_time = self.getFromTokenInfo("validity_period_end", "") or ""
if end_time:
return datetime.strptime(end_time, "%d/%m/%y %H:%M")
return ""
@validity_period_end.setter
def validity_period_end(self, end_date):
"""
sets the end date of the validity period for a token
"""
# upper layer will catch. we just try to verify the date format
datetime.strptime(end_date, "%d/%m/%y %H:%M")
self.addToTokenInfo("validity_period_end", end_date)
def del_validity_period_end(self):
"""
delete the end date of the validity period for a token
"""
self.removeFromTokenInfo("validity_period_end")
@property
def validity_period_start(self):
"""
returns the start of validity period (if set)
"""
start_time = self.getFromTokenInfo("validity_period_start", "") or ""
if start_time:
return datetime.strptime(start_time, "%d/%m/%y %H:%M")
return ""
@validity_period_start.setter
def validity_period_start(self, start_date):
"""
sets the start date of the validity period for a token
"""
# upper layer will catch. we just try to verify the date format
datetime.strptime(start_date, "%d/%m/%y %H:%M")
self.addToTokenInfo("validity_period_start", start_date)
def METHOD_NAME(self):
"""
delete the start date of the validity period for a token
"""
self.removeFromTokenInfo("validity_period_start")
def is_not_yet_valid(self):
now = datetime.utcnow()
return self.validity_period_start and now < self.validity_period_start
def is_expired(self):
now = datetime.utcnow()
return self.validity_period_end and now > self.validity_period_end
def has_exceeded_success(self):
return (
self.count_auth_success_max > 0
and self.count_auth_success >= self.count_auth_success_max
)
def has_exceeded_usage(self):
return (
self.count_auth_max > 0 and self.count_auth >= self.count_auth_max
)
def is_valid(self):
return not (
self.is_not_yet_valid()
or self.is_expired()
or self.has_exceeded_success()
or self.has_exceeded_usage()
)
# eof
|
1,152 |
delete tasks id owners id with http
|
from _typeshed import Incomplete
from influxdb_client.service._base_service import _BaseService
class TasksService(_BaseService):
def __init__(self, api_client: Incomplete | None = None) -> None: ...
def delete_tasks_id(self, task_id, **kwargs): ...
def delete_tasks_id_with_http_info(self, task_id, **kwargs): ...
async def delete_tasks_id_async(self, task_id, **kwargs): ...
def delete_tasks_id_labels_id(self, task_id, label_id, **kwargs): ...
def delete_tasks_id_labels_id_with_http_info(self, task_id, label_id, **kwargs): ...
async def delete_tasks_id_labels_id_async(self, task_id, label_id, **kwargs): ...
def delete_tasks_id_members_id(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_members_id_with_http_info(self, user_id, task_id, **kwargs): ...
async def delete_tasks_id_members_id_async(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_owners_id(self, user_id, task_id, **kwargs): ...
def METHOD_NAME(self, user_id, task_id, **kwargs): ...
async def delete_tasks_id_owners_id_async(self, user_id, task_id, **kwargs): ...
def delete_tasks_id_runs_id(self, task_id, run_id, **kwargs): ...
def delete_tasks_id_runs_id_with_http_info(self, task_id, run_id, **kwargs): ...
async def delete_tasks_id_runs_id_async(self, task_id, run_id, **kwargs): ...
def get_tasks(self, **kwargs): ...
def get_tasks_with_http_info(self, **kwargs): ...
async def get_tasks_async(self, **kwargs): ...
def get_tasks_id(self, task_id, **kwargs): ...
def get_tasks_id_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_async(self, task_id, **kwargs): ...
def get_tasks_id_labels(self, task_id, **kwargs): ...
def get_tasks_id_labels_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_labels_async(self, task_id, **kwargs): ...
def get_tasks_id_logs(self, task_id, **kwargs): ...
def get_tasks_id_logs_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_logs_async(self, task_id, **kwargs): ...
def get_tasks_id_members(self, task_id, **kwargs): ...
def get_tasks_id_members_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_members_async(self, task_id, **kwargs): ...
def get_tasks_id_owners(self, task_id, **kwargs): ...
def get_tasks_id_owners_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_owners_async(self, task_id, **kwargs): ...
def get_tasks_id_runs(self, task_id, **kwargs): ...
def get_tasks_id_runs_with_http_info(self, task_id, **kwargs): ...
async def get_tasks_id_runs_async(self, task_id, **kwargs): ...
def get_tasks_id_runs_id(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_with_http_info(self, task_id, run_id, **kwargs): ...
async def get_tasks_id_runs_id_async(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_logs(self, task_id, run_id, **kwargs): ...
def get_tasks_id_runs_id_logs_with_http_info(self, task_id, run_id, **kwargs): ...
async def get_tasks_id_runs_id_logs_async(self, task_id, run_id, **kwargs): ...
def patch_tasks_id(self, task_id, task_update_request, **kwargs): ...
def patch_tasks_id_with_http_info(self, task_id, task_update_request, **kwargs): ...
async def patch_tasks_id_async(self, task_id, task_update_request, **kwargs): ...
def post_tasks(self, task_create_request, **kwargs): ...
def post_tasks_with_http_info(self, task_create_request, **kwargs): ...
async def post_tasks_async(self, task_create_request, **kwargs): ...
def post_tasks_id_labels(self, task_id, label_mapping, **kwargs): ...
def post_tasks_id_labels_with_http_info(self, task_id, label_mapping, **kwargs): ...
async def post_tasks_id_labels_async(self, task_id, label_mapping, **kwargs): ...
def post_tasks_id_members(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_members_with_http_info(self, task_id, add_resource_member_request_body, **kwargs): ...
async def post_tasks_id_members_async(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_owners(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_owners_with_http_info(self, task_id, add_resource_member_request_body, **kwargs): ...
async def post_tasks_id_owners_async(self, task_id, add_resource_member_request_body, **kwargs): ...
def post_tasks_id_runs(self, task_id, **kwargs): ...
def post_tasks_id_runs_with_http_info(self, task_id, **kwargs): ...
async def post_tasks_id_runs_async(self, task_id, **kwargs): ...
def post_tasks_id_runs_id_retry(self, task_id, run_id, **kwargs): ...
def post_tasks_id_runs_id_retry_with_http_info(self, task_id, run_id, **kwargs): ...
async def post_tasks_id_runs_id_retry_async(self, task_id, run_id, **kwargs): ...
|
1,153 |
test pickle serializer needs a sane pickler
|
import importlib
import sys
from pathlib import Path
from types import ModuleType
from typing import Generator
import pytest
from prefect.packaging.serializers import (
ImportSerializer,
PickleSerializer,
SourceSerializer,
)
def foo(return_val="foo"):
return return_val
@pytest.mark.parametrize(
"serializer", [SourceSerializer(), ImportSerializer(), PickleSerializer()]
)
def test_serialize_function(serializer):
blob = serializer.dumps(foo)
result = serializer.loads(blob)
assert type(result) == type(foo)
assert result.__kwdefaults__ == foo.__kwdefaults__
assert result.__name__ == foo.__name__
# The source serializer updates the module to __prefect_loader__
if not isinstance(serializer, SourceSerializer):
assert result.__module__ == result.__module__
assert result() == foo(), "Result should be callable"
@pytest.fixture
def busted_pickler() -> Generator[ModuleType, None, None]:
spec = importlib.machinery.ModuleSpec("busted_pickle", None)
busted_pickler = importlib.util.module_from_spec(spec)
sys.modules["busted_pickler"] = busted_pickler
try:
yield busted_pickler
finally:
del sys.modules["busted_pickler"]
def METHOD_NAME(busted_pickler: ModuleType):
with pytest.raises(ValueError, match="Failed to import requested pickle library"):
PickleSerializer(picklelib="not-even-valid-identifier")
with pytest.raises(ValueError, match="does not have a 'dumps'"):
PickleSerializer(picklelib="busted_pickler")
setattr(busted_pickler, "dumps", lambda: "wat")
with pytest.raises(ValueError, match="does not have a 'loads'"):
PickleSerializer(picklelib="busted_pickler")
setattr(busted_pickler, "loads", lambda: "wat")
serializer = PickleSerializer(picklelib="busted_pickler")
assert serializer.picklelib == "busted_pickler"
def test_pickle_serializer_warns_about_mismatched_versions():
import cloudpickle
assert cloudpickle.__version__ != "0.0.0.0.0.0"
with pytest.warns(RuntimeWarning, match="Mismatched 'cloudpickle' versions"):
PickleSerializer(picklelib="cloudpickle", picklelib_version="0.0.0.0.0.0")
PickleSerializer(picklelib="cloudpickle", picklelib_version=cloudpickle.__version__)
def test_source_serializer_must_find_module():
with pytest.raises(ValueError, match="Cannot determine source module for object"):
# object a C object that doesn't have a __module__
SourceSerializer().dumps(object())
def test_source_serializer_needs_a_file_module():
with pytest.raises(ValueError, match="Found module <module 'builtins'"):
# object comes from the module `builtins`, a C module without Python source
SourceSerializer().dumps(object)
@pytest.mark.parametrize(
"garbage",
[
b"{}",
b"[]",
b"null",
b'{"source": "import antigravity\\n"}',
],
)
def test_source_serializer_cannot_decode_just_any_old_thing(garbage: bytes):
with pytest.raises(ValueError, match="Invalid serialized data"):
SourceSerializer().loads(garbage)
def test_pickle_serializer_does_not_allow_pickle_modules_without_cloudpickle():
with pytest.raises(ValueError, match="cloudpickle"):
PickleSerializer(pickle_modules=["test"], picklelib="pickle")
def test_pickle_serializer_supports_module_serialization(monkeypatch):
monkeypatch.syspath_prepend(str(Path(__file__).parent / "examples"))
from my_module.flow import test_flow
serializer = PickleSerializer(pickle_modules=["my_module"])
content = serializer.dumps(test_flow)
monkeypatch.undo()
sys.modules.pop("my_module")
flow = serializer.loads(content)
assert flow() == "test!"
def test_pickle_serializer_fails_on_relative_import_without_module_serialization(
monkeypatch,
):
monkeypatch.syspath_prepend(str(Path(__file__).parent / "examples"))
from my_module.flow import test_flow
serializer = PickleSerializer()
content = serializer.dumps(test_flow)
monkeypatch.undo()
sys.modules.pop("my_module")
with pytest.raises(ModuleNotFoundError, match="No module named 'my_module'"):
serializer.loads(content)
|
1,154 |
reload unit registry
|
"""Helper functions for converting between units."""
import logging
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
import pint
_unit_registry = None
logger = logging.getLogger('inventree')
def get_unit_registry():
"""Return a custom instance of the Pint UnitRegistry."""
global _unit_registry
# Cache the unit registry for speedier access
if _unit_registry is None:
return METHOD_NAME()
else:
return _unit_registry
def METHOD_NAME():
"""Reload the unit registry from the database.
This function is called at startup, and whenever the database is updated.
"""
import time
t_start = time.time()
global _unit_registry
_unit_registry = None
reg = pint.UnitRegistry()
# Define some "standard" additional units
reg.define('piece = 1')
reg.define('each = 1 = ea')
reg.define('dozen = 12 = dz')
reg.define('hundred = 100')
reg.define('thousand = 1000')
# Allow for custom units to be defined in the database
try:
from common.models import CustomUnit
for cu in CustomUnit.objects.all():
try:
reg.define(cu.fmt_string())
except Exception as e:
logger.error(f'Failed to load custom unit: {cu.fmt_string()} - {e}')
# Once custom units are loaded, save registry
_unit_registry = reg
except Exception:
# Database is not ready, or CustomUnit model is not available
pass
dt = time.time() - t_start
logger.debug(f'Loaded unit registry in {dt:.3f}s')
return reg
def convert_physical_value(value: str, unit: str = None, strip_units=True):
"""Validate that the provided value is a valid physical quantity.
Arguments:
value: Value to validate (str)
unit: Optional unit to convert to, and validate against
strip_units: If True, strip units from the returned value, and return only the dimension
Raises:
ValidationError: If the value is invalid or cannot be converted to the specified unit
Returns:
The converted quantity, in the specified units
"""
# Ensure that the value is a string
value = str(value).strip()
# Error on blank values
if not value:
raise ValidationError(_('No value provided'))
ureg = get_unit_registry()
error = ''
try:
# Convert to a quantity
val = ureg.Quantity(value)
if unit:
if is_dimensionless(val):
# If the provided value is dimensionless, assume that the unit is correct
val = ureg.Quantity(value, unit)
else:
# Convert to the provided unit (may raise an exception)
val = val.to(unit)
# At this point we *should* have a valid pint value
# To double check, look at the maginitude
float(ureg.Quantity(val.magnitude).magnitude)
except (TypeError, ValueError, AttributeError):
error = _('Provided value is not a valid number')
except (pint.errors.UndefinedUnitError, pint.errors.DefinitionSyntaxError):
error = _('Provided value has an invalid unit')
if unit:
error += f' ({unit})'
except pint.errors.DimensionalityError:
error = _('Provided value could not be converted to the specified unit')
if unit:
error += f' ({unit})'
except Exception as e:
error = _('Error') + ': ' + str(e)
if error:
raise ValidationError(error)
# Calculate the "magnitude" of the value, as a float
# If the value is specified strangely (e.g. as a fraction or a dozen), this can cause isuses
# So, we ensure that it is converted to a floating point value
# If we wish to return a "raw" value, some trickery is required
if unit:
magnitude = ureg.Quantity(val.to(unit)).magnitude
else:
magnitude = ureg.Quantity(val.to_base_units()).magnitude
magnitude = float(ureg.Quantity(magnitude).to_base_units().magnitude)
if strip_units:
return magnitude
elif unit or val.units:
return ureg.Quantity(magnitude, unit or val.units)
else:
return ureg.Quantity(magnitude)
def is_dimensionless(value):
"""Determine if the provided value is 'dimensionless'
A dimensionless value might look like:
0.1
1/2 dozen
three thousand
1.2 dozen
(etc)
"""
ureg = get_unit_registry()
# Ensure the provided value is in the right format
value = ureg.Quantity(value)
if value.units == ureg.dimensionless:
return True
if value.to_base_units().units == ureg.dimensionless:
return True
# At this point, the value is not dimensionless
return False
|
1,155 |
save
|
from urllib.parse import urlparse
from django import forms
from django.utils.translation import gettext_lazy as _
from django.urls import resolve
from django.urls.exceptions import Resolver404
from tendenci.apps.site_settings.utils import get_setting
from tendenci.apps.directories.models import Directory, Category
from tendenci.apps.base.forms import FormControlWidgetMixin
from tendenci.apps.notifications import models as notification
from tendenci.apps.site_settings.utils import get_setting
from .models import RequestEmail, AffiliateRequest, Connection
class RequestAssociateForm(FormControlWidgetMixin, forms.ModelForm):
from_directory_url = forms.URLField(label=_('Your Listing URL'))
request_as = forms.ModelChoiceField(queryset=None, required=True)
message = forms.CharField(max_length=1000,
widget=forms.Textarea(attrs={'rows':'3'}))
class Meta:
model = RequestEmail
fields = (
'from_directory_url',
'message')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.to_directory = kwargs.pop('to_directory')
super(RequestAssociateForm, self).__init__(*args, **kwargs)
self.fields['from_directory_url'].help_text = _('Example: %s/%s/example/') % (
get_setting('site', 'global', 'siteurl'),
get_setting('module', 'directories', 'url'))
self.fields['from_directory_url'].widget.attrs.update({'placeholder': _('Your marketplace listing URL'),
'size': 50})
affiliate_cats_queryset = Category.objects.filter(id__in=Connection.objects.filter(
cat__in=self.to_directory.cats.all()).values_list(
'affliated_cats', flat=True)).distinct()
self.fields['request_as'].queryset = affiliate_cats_queryset
def clean(self):
self.cleaned_data = super(RequestAssociateForm, self).clean()
from_directory_url = self.cleaned_data['from_directory_url']
request_as = self.cleaned_data['request_as']
# check if this request to associate is allowed
o = urlparse(from_directory_url)
url_path = o.path
try:
resolver = resolve(url_path)
except Resolver404:
raise forms.ValidationError(_("Your marketplace listing is not valid. Please check your URL."))
if 'slug' in resolver.kwargs and resolver.view_name == 'directory' \
and Directory.objects.filter(slug=resolver.kwargs['slug']).exists():
self.from_directory = Directory.objects.get(slug=resolver.kwargs['slug'])
if not (self.request.user.is_superuser or self.from_directory.is_owner(self.request.user)):
raise forms.ValidationError(_("You are not allowed to submit this listing."))
elif not self.to_directory.can_connect_from(directory_from=self.from_directory):
raise forms.ValidationError(_("This connection is not allowed."))
elif request_as not in self.from_directory.cats.all():
raise forms.ValidationError(_("This connection can not be established."))
else:
raise forms.ValidationError(_("Invalid marketplace listing."))
return self.cleaned_data
def METHOD_NAME(self, *args, **kwargs):
"""
Save the request form and send email notifications
"""
self.instance = super(RequestAssociateForm, self).METHOD_NAME(*args, **kwargs)
[affiliate_request] = AffiliateRequest.objects.filter(
to_directory=self.to_directory,
from_directory=self.from_directory,
request_as=self.cleaned_data['request_as']
)[:1] or [None]
if not affiliate_request:
affiliate_request = AffiliateRequest.objects.create(
to_directory=self.to_directory,
from_directory=self.from_directory,
request_as=self.cleaned_data['request_as'],
creator= self.request.user,)
self.instance.affiliate_request = affiliate_request
self.instance.sender = self.request.user
# get recipients emails
self.instance.recipients = self.to_directory.get_owner_emails_list()
self.instance.METHOD_NAME()
self.send_emails(self.instance)
return self.instance
def send_emails(self, request_email):
# email notifications
if request_email.recipients:
site_display_name = get_setting('site', 'global', 'sitedisplayname')
site_url = get_setting('site', 'global', 'siteurl')
params = {
'SITE_GLOBAL_SITEDISPLAYNAME': site_display_name,
'SITE_GLOBAL_SITEURL': site_url,
'MODULE_DIRECTORIES_LABEL_PLURAL': get_setting('module', 'directories', 'label_plural'),
'directory': self.to_directory,
'from_directory': self.from_directory,
'message': request_email.message,
'first_name': request_email.sender.first_name,
'last_name': request_email.sender.last_name,
'affiliate_request': self.instance.affiliate_request,
}
# to to_directory owner
params['reply_to'] = request_email.sender.email
notification.send_emails(request_email.recipients,
'affiliate_requested_to_owner', params)
# to submitter
submitter_email = (request_email.sender.email).strip()
params['reply_to'] = request_email.recipients[0]
notification.send_emails([submitter_email],
'affiliate_requested_to_submitter', params)
|
1,156 |
disable keys
|
#!/usr/bin/env python3
"""
sorting_animation.py
A minimal sorting algorithm animation:
Sorts a shelf of 10 blocks using insertion
sort, selection sort and quicksort.
Shelfs are implemented using builtin lists.
Blocks are turtles with shape "square", but
stretched to rectangles by shapesize()
---------------------------------------
To exit press space button
---------------------------------------
"""
from turtle import *
import random
class Block(Turtle):
def __init__(self, size):
self.size = size
Turtle.__init__(self, shape="square", visible=False)
self.pu()
self.shapesize(size * 1.5, 1.5, 2) # square-->rectangle
self.fillcolor("black")
self.st()
def glow(self):
self.fillcolor("red")
def unglow(self):
self.fillcolor("black")
def __repr__(self):
return "Block size: {0}".format(self.size)
class Shelf(list):
def __init__(self, y):
"create a shelf. y is y-position of first block"
self.y = y
self.x = -150
def push(self, d):
width, _, _ = d.shapesize()
# align blocks by the bottom edge
y_offset = width / 2 * 20
d.sety(self.y + y_offset)
d.setx(self.x + 34 * len(self))
self.append(d)
def _close_gap_from_i(self, i):
for b in self[i:]:
xpos, _ = b.pos()
b.setx(xpos - 34)
def _open_gap_from_i(self, i):
for b in self[i:]:
xpos, _ = b.pos()
b.setx(xpos + 34)
def pop(self, key):
b = list.pop(self, key)
b.glow()
b.sety(200)
self._close_gap_from_i(key)
return b
def insert(self, key, b):
self._open_gap_from_i(key)
list.insert(self, key, b)
b.setx(self.x + 34 * key)
width, _, _ = b.shapesize()
# align blocks by the bottom edge
y_offset = width / 2 * 20
b.sety(self.y + y_offset)
b.unglow()
def isort(shelf):
length = len(shelf)
for i in range(1, length):
hole = i
while hole > 0 and shelf[i].size < shelf[hole - 1].size:
hole = hole - 1
shelf.insert(hole, shelf.pop(i))
return
def ssort(shelf):
length = len(shelf)
for j in range(0, length - 1):
imin = j
for i in range(j + 1, length):
if shelf[i].size < shelf[imin].size:
imin = i
if imin != j:
shelf.insert(j, shelf.pop(imin))
def partition(shelf, left, right, pivot_index):
pivot = shelf[pivot_index]
shelf.insert(right, shelf.pop(pivot_index))
store_index = left
for i in range(left, right): # range is non-inclusive of ending value
if shelf[i].size < pivot.size:
shelf.insert(store_index, shelf.pop(i))
store_index = store_index + 1
shelf.insert(store_index, shelf.pop(right)) # move pivot to correct position
return store_index
def qsort(shelf, left, right):
if left < right:
pivot_index = left
pivot_new_index = partition(shelf, left, right, pivot_index)
qsort(shelf, left, pivot_new_index - 1)
qsort(shelf, pivot_new_index + 1, right)
def randomize():
METHOD_NAME()
clear()
target = list(range(10))
random.shuffle(target)
for i, t in enumerate(target):
for j in range(i, len(s)):
if s[j].size == t + 1:
s.insert(i, s.pop(j))
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def show_text(text, line=0):
line = 20 * line
goto(0,-250 - line)
write(text, align="center", font=("Courier", 16, "bold"))
def start_ssort():
METHOD_NAME()
clear()
show_text("Selection Sort")
ssort(s)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def start_isort():
METHOD_NAME()
clear()
show_text("Insertion Sort")
isort(s)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def start_qsort():
METHOD_NAME()
clear()
show_text("Quicksort")
qsort(s, 0, len(s) - 1)
clear()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
def init_shelf():
global s
s = Shelf(-200)
vals = (4, 2, 8, 9, 1, 5, 10, 3, 7, 6)
for i in vals:
s.push(Block(i))
def METHOD_NAME():
onkey(None, "s")
onkey(None, "i")
onkey(None, "q")
onkey(None, "r")
def enable_keys():
onkey(start_isort, "i")
onkey(start_ssort, "s")
onkey(start_qsort, "q")
onkey(randomize, "r")
onkey(bye, "space")
def main():
getscreen().clearscreen()
ht(); penup()
init_shelf()
show_text(instructions1)
show_text(instructions2, line=1)
enable_keys()
listen()
return "EVENTLOOP"
instructions1 = "press i for insertion sort, s for selection sort, q for quicksort"
instructions2 = "spacebar to quit, r to randomize"
if __name__=="__main__":
msg = main()
mainloop()
|
1,157 |
sorted watt keys
|
import dataclasses
from collections import defaultdict
from typing import (
Any,
Collection,
Dict,
Iterator,
Mapping,
MutableSet,
Optional,
Protocol,
Sequence,
Type,
TypeVar,
)
from .k8sobject import KubernetesObject, KubernetesObjectKey
class Dependency(Protocol):
"""
Dependencies link information provided by processors of a given Watt
invocation to other processors that need the processed result. This results
in an ordering of keys so that processors can be dependent on each other
without direct knowledge of where data is coming from.
"""
def watt_key(self) -> str:
...
class ServiceDependency(Dependency):
"""
A dependency that exposes information about the Kubernetes service for
Ambassador itself.
"""
ambassador_service: Optional[KubernetesObject]
discovered_services: Dict[KubernetesObjectKey, KubernetesObject]
def __init__(self) -> None:
self.ambassador_service = None
self.discovered_services = {}
def watt_key(self) -> str:
return "service"
class SecretDependency(Dependency):
"""
A dependency that is satisfied once secret information has been mapped and
emitted.
"""
def watt_key(self) -> str:
return "secret"
class IngressClassesDependency(Dependency):
"""
A dependency that provides the list of ingress classes that are valid (i.e.,
have the proper controller) for this cluster.
"""
ingress_classes: MutableSet[str]
def __init__(self):
self.ingress_classes = set()
def watt_key(self) -> str:
return "ingressclasses"
D = TypeVar("D", bound=Dependency)
class DependencyMapping(Protocol):
def __contains__(self, key: Type[D]) -> bool:
...
def __getitem__(self, key: Type[D]) -> D:
...
class DependencyInjector:
"""
Each processor instance is provided with a dependency injector that allows
it to declare what dependencies it provides as part of its processing and
what dependencies it wants to do its processing.
Note that dependencies need not be fulfilled; for example, nothing may
provide information about the Ambassador service or the list of valid
ingress classes. Processors should be prepared to deal with the absence of
valid data when they run.
"""
wants: MutableSet[Type[Dependency]]
provides: MutableSet[Type[Dependency]]
deps: DependencyMapping
def __init__(self, deps: DependencyMapping) -> None:
self.wants = set()
self.provides = set()
self.deps = deps
def want(self, cls: Type[D]) -> D:
self.wants.add(cls)
return self.deps[cls]
def provide(self, cls: Type[D]) -> D:
self.provides.add(cls)
return self.deps[cls]
class DependencyGraph:
"""
Once dependency relationships are known, this class provides the ability to
link them holistically and traverse them in topological order. It is most
useful in the context of the sorted_watt_keys() method of the
DependencyManager.
"""
@dataclasses.dataclass
class Vertex:
out: MutableSet[Any]
in_count: int
vertices: Mapping[Any, Vertex]
def __init__(self) -> None:
self.vertices = defaultdict(lambda: DependencyGraph.Vertex(out=set(), in_count=0))
def connect(self, a: Any, b: Any) -> None:
if b not in self.vertices[a].out:
self.vertices[a].out.add(b)
self.vertices[b].in_count += 1
def traverse(self) -> Iterator[Any]:
"""
Returns the items in this graph in topological order.
"""
if len(self.vertices) == 0:
return
# This method implements Kahn's algorithm. See
# https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm for
# more information.
# Create a copy of the counts of each inbound edge so we can mutate
# them.
in_counts = {obj: vertex.in_count for obj, vertex in self.vertices.items()}
# Find the roots of the graph.
queue = [obj for obj, in_count in in_counts.items() if in_count == 0]
# No roots of a graph with at least one vertex indicates a cycle.
if len(queue) == 0:
raise ValueError("cyclic")
while len(queue) > 0:
cur = queue.pop(0)
yield cur
for obj in self.vertices[cur].out:
in_counts[obj] -= 1
if in_counts[obj] == 0:
queue.append(obj)
assert sum(in_counts.values()) == 0, "Traversal did not reach every vertex exactly once"
class DependencyManager:
"""
A manager that provides access to a set of dependencies for arbitrary object
instances and the ability to compute a sorted list of Watt keys that
represent the processing order for the dependencies.
"""
deps: DependencyMapping
injectors: Mapping[Any, DependencyInjector]
def __init__(self, deps: Collection[D]) -> None:
self.deps = {dep.__class__: dep for dep in deps}
self.injectors = defaultdict(lambda: DependencyInjector(self.deps))
def for_instance(self, obj: Any) -> DependencyInjector:
return self.injectors[obj]
def METHOD_NAME(self) -> Sequence[str]:
g = DependencyGraph()
for obj, injector in self.injectors.items():
for cls in injector.provides:
g.connect(obj, cls)
for cls in injector.wants:
g.connect(cls, obj)
return [self.deps[obj].watt_key() for obj in g.traverse() if obj in self.deps]
|
1,158 |
recalculate
|
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
from __future__ import absolute_import, division, print_function
import wxtbx.plots
from libtbx.utils import Sorry
import wx
import os
import sys
def display_file_info(file_name, obs_type="amplitudes", parent=None,
n_bins=40, out=sys.stdout):
from iotbx import file_reader
hkl_in = file_reader.any_file(file_name, force_type="hkl")
hkl_in.check_file_type("hkl")
anom_data = []
for array in hkl_in.file_server.miller_arrays :
if (not array.anomalous_flag()) : continue
if (array.is_xray_amplitude_array()) or (array.is_xray_intensity_array()):
anom_data.append(array)
if (len(anom_data) == 0):
raise Sorry("No anomalous data arrays found.")
x_label = "D_anom(F) / <F>"
if (obs_type == "intensities"):
x_label = "D_anom(I) / <I>"
for array in anom_data :
array_name = os.path.basename(file_name)+":"+array.info().label_string()
frame = AnomHistPlotFrame(
parent=parent,
title="Bijvoet ratios for %s" % array_name,
array=array,
array_name=array_name)
frame.Show()
class AnomHistPlotFrame(wxtbx.plots.plot_frame):
def __init__(self, *args, **kwds):
self._array = kwds.pop("array")
self._array_name = kwds.pop("array_name")
wxtbx.plots.plot_frame.__init__(self, *args, **kwds)
self.METHOD_NAME()
def create_plot_panel(self):
return wxtbx.plots.histogram(parent=self, figure_size=(12,6))
def draw_top_panel(self):
self.top_panel = wx.Panel(parent=self, style=wx.RAISED_BORDER)
szr = wx.BoxSizer(wx.VERTICAL)
self.top_panel.SetSizer(szr)
szr2 = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(szr2, 1)
self.meas_box = wx.CheckBox(self.top_panel, label="Measurable differences only")
szr2.Add(self.meas_box, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.log_box = wx.CheckBox(self.top_panel, label="Log scale")
szr2.Add(self.log_box, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
szr2.Add(wx.StaticText(self.top_panel, label="Data type:"), 0,
wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.obs_type = wx.Choice(self.top_panel,
choices=["amplitudes","intensities"])
szr2.Add(self.obs_type, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
szr2.Add(wx.StaticText(self.top_panel, label="# of bins:"), 0,
wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.n_bins = wx.SpinCtrl(self.top_panel, -1)
self.n_bins.SetValue(40)
szr2.Add(self.n_bins, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_CHECKBOX, lambda evt: self.METHOD_NAME(), self.meas_box)
self.Bind(wx.EVT_CHECKBOX, lambda evt: self.METHOD_NAME(), self.log_box)
self.Bind(wx.EVT_CHOICE, lambda evt: self.METHOD_NAME(), self.obs_type)
self.Bind(wx.EVT_SPINCTRL, lambda evt: self.METHOD_NAME(), self.n_bins)
def METHOD_NAME(self):
from scitbx.array_family import flex
measurable_only = self.meas_box.GetValue()
obs_type = self.obs_type.GetStringSelection()
n_bins = self.n_bins.GetValue()
d_ano_rel = self._array.bijvoet_ratios(
obs_type=obs_type,
measurable_only=measurable_only)
hist = flex.histogram(d_ano_rel, n_slots=n_bins)
hist.show(f=sys.stdout)
if (obs_type == "intensities"):
x_label = "D_anom(I[hkl]) / I_mean[hkl]"
else :
x_label = "D_anom(F[hkl]) / F_mean[hkl]"
self.show_histogram(
data=list(d_ano_rel),
n_bins=n_bins,
x_label=x_label,
y_label="# hkl",
title="Bijvoet ratios for %s" % self._array_name,
log_scale=self.log_box.GetValue())
def show_histogram(self, *args, **kwds):
self.plot_panel.show_histogram(*args, **kwds)
self.Refresh()
def run(args=(), params=None, out=sys.stdout):
import wxtbx.app
app = wxtbx.app.CCTBXApp(0)
if (len(args) == 0):
from wxtbx.command_line.inspect_r_free_flags import ask_for_file
file_name = ask_for_file(parent=None)
else :
file_name = args[0]
display_file_info(file_name)
app.MainLoop()
if (__name__ == "__main__"):
run(sys.argv[1:])
|
1,159 |
test finalize all
|
import pytest
from briefcase.config import AppConfig
from .conftest import DummyCommand
@pytest.fixture
def first_app():
return AppConfig(
app_name="first",
bundle="com.example",
version="0.0.1",
description="The first simple app",
sources=["src/first"],
)
@pytest.fixture
def second_app():
return AppConfig(
app_name="second",
bundle="com.example",
version="0.0.2",
description="The second simple app",
sources=["src/second"],
)
@pytest.fixture
def base_command(tmp_path, first_app, second_app):
return DummyCommand(
base_path=tmp_path,
apps={
"first": first_app,
"second": second_app,
},
)
def METHOD_NAME(base_command, first_app, second_app):
"A call to finalize verifies host, tools, and finalized all app configs"
base_command.finalize()
# The right sequence of things will be done
assert base_command.actions == [
# Host OS is verified
("verify-host",),
# Tools are verified
("verify-tools",),
# App config has been finalized
("finalize-app-config", "first"),
# App config has been finalized
("finalize-app-config", "second"),
]
# Apps are no longer in draft mode
assert not hasattr(first_app, "__draft__")
assert not hasattr(second_app, "__draft__")
def test_finalize_single(base_command, first_app, second_app):
"A call to finalize verifies host, tools, and finalized all app configs"
base_command.finalize(first_app)
# The right sequence of things will be done
assert base_command.actions == [
# Host OS is verified
("verify-host",),
# Tools are verified
("verify-tools",),
# App config has been finalized
("finalize-app-config", "first"),
]
# First app is no longer in draft mode; second is
assert not hasattr(first_app, "__draft__")
assert hasattr(second_app, "__draft__")
def test_finalize_all_repeat(base_command, first_app, second_app):
"Multiple calls to finalize verifies host & tools multiple times, but only once on config"
# Finalize apps twice. This is an approximation of what happens
# when a command chain is executed; create, update, build and run will
# all finalize; create will finalize the app configs, each command will
# have it's own tools verified.
base_command.finalize()
base_command.finalize()
# The right sequence of things will be done
assert base_command.actions == [
# Host OS is verified
("verify-host",),
# Tools are verified
("verify-tools",),
# App config has been finalized
("finalize-app-config", "first"),
# App config has been finalized
("finalize-app-config", "second"),
# Host OS is verified again
("verify-host",),
# Tools are verified again
("verify-tools",),
]
# Apps are no longer in draft mode
assert not hasattr(first_app, "__draft__")
assert not hasattr(second_app, "__draft__")
def test_finalize_single_repeat(base_command, first_app, second_app):
"Multiple calls to finalize verifies host & tools multiple times, but finalizes app config once"
# Finalize app twice. This is an approximation of what happens
# when a command chain is executed; create, update, build and run will
# all finalize; create will finalize the app config, each command will
# have it's own tools verified.
base_command.finalize(first_app)
base_command.finalize(first_app)
# The right sequence of things will be done
assert base_command.actions == [
# Host OS is verified
("verify-host",),
# Tools are verified
("verify-tools",),
# App config has been finalized
("finalize-app-config", "first"),
# Host OS is verified again
("verify-host",),
# Tools are verified again
("verify-tools",),
]
# First app is no longer in draft mode; second is
assert not hasattr(first_app, "__draft__")
assert hasattr(second_app, "__draft__")
|
1,160 |
test region equality
|
# Copyright (C) 2022 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from gz.math7 import Intervald, Region3d, Vector3d
class TestRegion3(unittest.TestCase):
def test_default_constructor(self):
region = Region3d()
self.assertTrue(region.ix().empty())
self.assertTrue(region.iy().empty())
self.assertTrue(region.iz().empty())
def test_constructor(self):
region = Region3d(
Intervald.open(0., 1.),
Intervald.closed(-1., 1.),
Intervald.open(-1., 0.))
self.assertEqual(region.ix(), Intervald.open(0., 1.))
self.assertEqual(region.iy(), Intervald.closed(-1., 1.))
self.assertEqual(region.iz(), Intervald.open(-1., 0.))
def test_construction_helpers(self):
openRegion = Region3d.open(0., 0., 0., 1., 1., 1.)
self.assertEqual(openRegion.ix(), Intervald.open(0., 1.))
self.assertEqual(openRegion.iy(), Intervald.open(0., 1.))
self.assertEqual(openRegion.iz(), Intervald.open(0., 1.))
closedRegion = Region3d.closed(0., 0., 0., 1., 1., 1.)
self.assertEqual(closedRegion.ix(), Intervald.closed(0., 1.))
self.assertEqual(closedRegion.iy(), Intervald.closed(0., 1.))
self.assertEqual(closedRegion.iz(), Intervald.closed(0., 1.))
def test_empty_region(self):
self.assertFalse(Region3d.open(0., 0., 0., 1., 1., 1.).empty())
self.assertTrue(Region3d.open(0., 0., 0., 0., 0., 0.).empty())
self.assertTrue(Region3d.open(0., 0., 0., 0., 1., 1.).empty())
self.assertTrue(Region3d.open(0., 0., 0., 1., 0., 1.).empty())
self.assertTrue(Region3d.open(0., 0., 0., 1., 1., 0.).empty())
self.assertFalse(Region3d.closed(0., 0., 0., 0., 0., 0.).empty())
self.assertTrue(Region3d.closed(1., 1., 1., 0., 0., 0.).empty())
def test_region_membership(self):
openRegion = Region3d.open(0., 0., 0., 1., 1., 1.)
self.assertFalse(openRegion.contains(Vector3d(0., 0., 0.)))
self.assertTrue(openRegion.contains(Vector3d(0.5, 0.5, 0.5)))
self.assertFalse(openRegion.contains(Vector3d(1., 1., 1.)))
closedRegion = Region3d.closed(0., 0., 0., 1., 1., 1.)
self.assertTrue(closedRegion.contains(Vector3d(0., 0., 0.)))
self.assertTrue(closedRegion.contains(Vector3d(0.5, 0.5, 0.5)))
self.assertTrue(closedRegion.contains(Vector3d(1., 1., 1.)))
def test_region_subset(self):
openRegion = Region3d.open(0., 0., 0., 1., 1., 1.)
self.assertTrue(openRegion.contains(
Region3d.open(0.25, 0.25, 0.25,
0.75, 0.75, 0.75)))
self.assertFalse(openRegion.contains(
Region3d.open(-1., 0.25, 0.25,
0., 0.75, 0.75)))
self.assertFalse(openRegion.contains(
Region3d.open(0.25, -1., 0.25,
0.75, 0., 0.75)))
self.assertFalse(openRegion.contains(
Region3d.open(0.25, 0.25, -1.,
0.75, 0.75, 0.)))
self.assertFalse(openRegion.contains(
Region3d.closed(0., 0., 0.,
1., 1., 1.)))
closedRegion = Region3d.closed(0., 0., 0., 1., 1., 1.)
self.assertTrue(closedRegion.contains(
Region3d.closed(0., 0., 0., 1., 1., 1.)))
self.assertTrue(closedRegion.contains(
Region3d.closed(0., 0., 0., 0., 0., 0.)))
def METHOD_NAME(self):
self.assertNotEqual(
Region3d.open(0., 0., 0., 0., 0., 0.),
Region3d.open(0., 0., 0., 0., 0., 0.))
self.assertEqual(
Region3d.closed(0., 0., 0., 0., 0., 0.),
Region3d.closed(0., 0., 0., 0., 0., 0.))
self.assertNotEqual(
Region3d.open(0., 0., 0., 1., 1., 1.),
Region3d.closed(0., 0., 0., 1., 1., 1.))
def test_region_intersection(self):
region = Region3d.open(0., 0., 0., 1., 1., 1.)
self.assertTrue(region.intersects(
Region3d.open(0.5, 0.5, 0.5, 1.5, 1.5, 1.5)))
self.assertTrue(region.intersects(
Region3d.open(-0.5, -0.5, -0.5, 0.5, 0.5, 0.5)))
self.assertFalse(region.intersects(
Region3d.open(1., 1., 1., 2., 2., 2.)))
self.assertFalse(region.intersects(
Region3d.open(-1., -1., -1., 0., 0., 0.)))
if __name__ == '__main__':
unittest.main()
|
1,161 |
assert raises message with given cause
|
import contextlib
import re
import sys
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def in_(a, b, msg=None):
"""Assert a in b, with repr messaging on failure."""
assert a in b, msg or "%r not in %r" % (a, b)
def not_in(a, b, msg=None):
"""Assert a in not b, with repr messaging on failure."""
assert a not in b, msg or "%r is in %r" % (a, b)
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. We want
these exceptions in a cause chain.
"""
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def _assert_proper_cause_cls(exception, cause_cls):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. We want
these exceptions in a cause chain.
"""
assert isinstance(exception.__cause__, cause_cls), (
"Exception %r was correctly raised but has cause %r, which does not "
"have the expected cause type %r."
% (exception, exception.__cause__, cause_cls)
)
def assert_raises(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw)
def assert_raises_with_proper_context(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_with_given_cause(
except_cls, cause_cls, callable_, *args, **kw
):
return _assert_raises(except_cls, callable_, args, kw, cause_cls=cause_cls)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def assert_raises_message_with_proper_context(
except_cls, msg, callable_, *args, **kwargs
):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def METHOD_NAME(
except_cls, msg, cause_cls, callable_, *args, **kwargs
):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, cause_cls=cause_cls
)
def _assert_raises(
except_cls,
callable_,
args,
kwargs,
msg=None,
check_context=False,
cause_cls=None,
):
with _expect_raises(except_cls, msg, check_context, cause_cls) as ec:
callable_(*args, **kwargs)
return ec.error
class _ErrorContainer:
error = None
@contextlib.contextmanager
def _expect_raises(except_cls, msg=None, check_context=False, cause_cls=None):
ec = _ErrorContainer()
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
yield ec
success = False
except except_cls as err:
ec.error = err
success = True
if msg is not None:
# I'm often pdbing here, and "err" above isn't
# in scope, so assign the string explicitly
error_as_string = str(err)
assert re.search(msg, error_as_string, re.UNICODE), "%r !~ %s" % (
msg,
error_as_string,
)
if cause_cls is not None:
_assert_proper_cause_cls(err, cause_cls)
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(str(err).encode("utf-8"))
# it's generally a good idea to not carry traceback objects outside
# of the except: block, but in this case especially we seem to have
# hit some bug in either python 3.10.0b2 or greenlet or both which
# this seems to fix:
# https://github.com/python-greenlet/greenlet/issues/242
del ec
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def expect_raises(except_cls, check_context=False):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message(except_cls, msg, check_context=False):
return _expect_raises(except_cls, msg=msg, check_context=check_context)
def expect_raises_with_proper_context(except_cls, check_context=True):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message_with_proper_context(
except_cls, msg, check_context=True
):
return _expect_raises(except_cls, msg=msg, check_context=check_context)
|
1,162 |
test create with params
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Crowd Modelling game."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.mfg.games import crowd_modelling
import pyspiel
MFG_STR_CONST = "_a"
class MFGCrowdModellingGameTest(absltest.TestCase):
def test_load(self):
game = pyspiel.load_game("python_mfg_crowd_modelling")
game.new_initial_state()
def test_create(self):
"""Checks we can create the game and clone states."""
game = crowd_modelling.MFGCrowdModellingGame()
self.assertEqual(game.size, crowd_modelling._SIZE)
self.assertEqual(game.horizon, crowd_modelling._HORIZON)
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
print("Num distinct actions:", game.num_distinct_actions())
state = game.new_initial_state()
clone = state.clone()
print("Initial state:", state)
print("Cloned initial state:", clone)
def METHOD_NAME(self):
game = pyspiel.load_game("python_mfg_crowd_modelling(horizon=100,size=20)")
self.assertEqual(game.size, 20)
self.assertEqual(game.horizon, 100)
def test_random_game(self):
"""Tests basic API functions."""
horizon = 20
size = 50
game = crowd_modelling.MFGCrowdModellingGame(params={
"horizon": horizon,
"size": size
})
pyspiel.random_sim_test(
game, num_sims=10, serialize=False, verbose=True)
def test_reward(self):
game = crowd_modelling.MFGCrowdModellingGame()
state = game.new_initial_state()
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(game.size // 2)
self.assertEqual(state.current_player(), 0)
# This expected reward assumes that the game is initialized with
# uniform state distribution.
self.assertAlmostEqual(state.rewards()[0], 1. + np.log(game.size))
self.assertAlmostEqual(state.returns()[0], 1. + np.log(game.size))
state.apply_action(1)
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
self.assertAlmostEqual(state.returns()[0], 1. + np.log(game.size))
def test_distribution(self):
"""Checks that distribution-related functions work."""
game = crowd_modelling.MFGCrowdModellingGame()
state = game.new_initial_state()
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(game.size // 2)
self.assertEqual(state.current_player(), 0)
# This expected reward assumes that the game is initialized with
# uniform state distribution.
self.assertAlmostEqual(state.rewards()[0], 1. + np.log(game.size))
state.apply_action(crowd_modelling.MFGCrowdModellingState._NEUTRAL_ACTION)
# Chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(crowd_modelling.MFGCrowdModellingState._NEUTRAL_ACTION)
self.assertEqual(state.distribution_support(), [
"(0, 1)_a", "(1, 1)_a", "(2, 1)_a", "(3, 1)_a", "(4, 1)_a", "(5, 1)_a",
"(6, 1)_a", "(7, 1)_a", "(8, 1)_a", "(9, 1)_a"
])
new_distrib = [0.01] * 9 + [1. - 0.01 * 9]
state.update_distribution(new_distrib)
self.assertAlmostEqual(state._distribution, new_distrib)
# Check that the distribution is taken into account for the reward
# computation.
self.assertAlmostEqual(state.rewards()[0], 1. - np.log(0.01))
def test_compare_py_cpp(self):
"""Compares py and cpp implementations of this game."""
py_game = pyspiel.load_game("python_mfg_crowd_modelling")
cpp_game = pyspiel.load_game("mfg_crowd_modelling")
np.random.seed(7)
py_state = py_game.new_initial_state()
cpp_state = cpp_game.new_initial_state()
t = 0
while not cpp_state.is_terminal():
self.assertFalse(py_state.is_terminal())
self.assertEqual(str(cpp_state), str(py_state))
self.assertAlmostEqual(cpp_state.returns()[0], py_state.returns()[0])
if cpp_state.current_player() == pyspiel.PlayerId.CHANCE:
actions, probs = zip(*cpp_state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.assertEqual(
cpp_state.action_to_string(action),
py_state.action_to_string(action))
cpp_state.apply_action(action)
py_state.apply_action(action)
elif cpp_state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
num_cpp_states = len(cpp_state.distribution_support())
distribution = [1 / num_cpp_states] * num_cpp_states
cpp_state.update_distribution(distribution)
py_state.update_distribution(distribution)
else:
self.assertEqual(cpp_state.current_player(), 0)
legal_actions = cpp_state.legal_actions()
action = np.random.choice(legal_actions)
self.assertEqual(
cpp_state.action_to_string(action),
py_state.action_to_string(action))
cpp_state.apply_action(action)
py_state.apply_action(action)
t += 1
if __name__ == "__main__":
absltest.main()
|
1,163 |
set up
|
import unittest
from unittest.mock import patch
import builtins
import rlcompleter
class CompleteMe:
""" Trivial class used in testing rlcompleter.Completer. """
spam = 1
_ham = 2
class TestRlcompleter(unittest.TestCase):
def METHOD_NAME(self):
self.stdcompleter = rlcompleter.Completer()
self.completer = rlcompleter.Completer(dict(spam=int,
egg=str,
CompleteMe=CompleteMe))
# forces stdcompleter to bind builtins namespace
self.stdcompleter.complete('', 0)
def test_namespace(self):
class A(dict):
pass
class B(list):
pass
self.assertTrue(self.stdcompleter.use_main_ns)
self.assertFalse(self.completer.use_main_ns)
self.assertFalse(rlcompleter.Completer(A()).use_main_ns)
self.assertRaises(TypeError, rlcompleter.Completer, B((1,)))
def test_global_matches(self):
# test with builtins namespace
self.assertEqual(sorted(self.stdcompleter.global_matches('di')),
[x+'(' for x in dir(builtins) if x.startswith('di')])
self.assertEqual(sorted(self.stdcompleter.global_matches('st')),
[x+'(' for x in dir(builtins) if x.startswith('st')])
self.assertEqual(self.stdcompleter.global_matches('akaksajadhak'), [])
# test with a customized namespace
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe()'])
self.assertEqual(self.completer.global_matches('eg'),
['egg('])
# XXX: see issue5256
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe()'])
def test_attr_matches(self):
# test with builtins namespace
self.assertEqual(self.stdcompleter.attr_matches('str.s'),
['str.{}('.format(x) for x in dir(str)
if x.startswith('s')])
self.assertEqual(self.stdcompleter.attr_matches('tuple.foospamegg'), [])
expected = sorted({'None.%s%s' % (x, '(' if x != '__doc__' else '')
for x in dir(None)})
self.assertEqual(self.stdcompleter.attr_matches('None.'), expected)
self.assertEqual(self.stdcompleter.attr_matches('None._'), expected)
self.assertEqual(self.stdcompleter.attr_matches('None.__'), expected)
# test with a customized namespace
self.assertEqual(self.completer.attr_matches('CompleteMe.sp'),
['CompleteMe.spam'])
self.assertEqual(self.completer.attr_matches('Completeme.egg'), [])
self.assertEqual(self.completer.attr_matches('CompleteMe.'),
['CompleteMe.mro()', 'CompleteMe.spam'])
self.assertEqual(self.completer.attr_matches('CompleteMe._'),
['CompleteMe._ham'])
matches = self.completer.attr_matches('CompleteMe.__')
for x in matches:
self.assertTrue(x.startswith('CompleteMe.__'), x)
self.assertIn('CompleteMe.__name__', matches)
self.assertIn('CompleteMe.__new__(', matches)
with patch.object(CompleteMe, "me", CompleteMe, create=True):
self.assertEqual(self.completer.attr_matches('CompleteMe.me.me.sp'),
['CompleteMe.me.me.spam'])
self.assertEqual(self.completer.attr_matches('egg.s'),
['egg.{}('.format(x) for x in dir(str)
if x.startswith('s')])
def test_excessive_getattr(self):
"""Ensure getattr() is invoked no more than once per attribute"""
# note the special case for @property methods below; that is why
# we use __dir__ and __getattr__ in class Foo to create a "magic"
# class attribute 'bar'. This forces `getattr` to call __getattr__
# (which is doesn't necessarily do).
class Foo:
calls = 0
bar = ''
def __getattribute__(self, name):
if name == 'bar':
self.calls += 1
return None
return super().__getattribute__(name)
f = Foo()
completer = rlcompleter.Completer(dict(f=f))
self.assertEqual(completer.complete('f.b', 0), 'f.bar')
self.assertEqual(f.calls, 1)
def test_property_method_not_called(self):
class Foo:
_bar = 0
property_called = False
@property
def bar(self):
self.property_called = True
return self._bar
f = Foo()
completer = rlcompleter.Completer(dict(f=f))
self.assertEqual(completer.complete('f.b', 0), 'f.bar')
self.assertFalse(f.property_called)
def test_uncreated_attr(self):
# Attributes like properties and slots should be completed even when
# they haven't been created on an instance
class Foo:
__slots__ = ("bar",)
completer = rlcompleter.Completer(dict(f=Foo()))
self.assertEqual(completer.complete('f.', 0), 'f.bar')
@unittest.mock.patch('rlcompleter._readline_available', False)
def test_complete(self):
completer = rlcompleter.Completer()
self.assertEqual(completer.complete('', 0), '\t')
self.assertEqual(completer.complete('a', 0), 'and ')
self.assertEqual(completer.complete('a', 1), 'as ')
self.assertEqual(completer.complete('as', 2), 'assert ')
self.assertEqual(completer.complete('an', 0), 'and ')
self.assertEqual(completer.complete('pa', 0), 'pass')
self.assertEqual(completer.complete('Fa', 0), 'False')
self.assertEqual(completer.complete('el', 0), 'elif ')
self.assertEqual(completer.complete('el', 1), 'else')
self.assertEqual(completer.complete('tr', 0), 'try:')
def test_duplicate_globals(self):
namespace = {
'False': None, # Keyword vs builtin vs namespace
'assert': None, # Keyword vs namespace
'try': lambda: None, # Keyword vs callable
'memoryview': None, # Callable builtin vs non-callable
'Ellipsis': lambda: None, # Non-callable builtin vs callable
}
completer = rlcompleter.Completer(namespace)
self.assertEqual(completer.complete('False', 0), 'False')
self.assertIsNone(completer.complete('False', 1)) # No duplicates
# Space or colon added due to being a reserved keyword
self.assertEqual(completer.complete('assert', 0), 'assert ')
self.assertIsNone(completer.complete('assert', 1))
self.assertEqual(completer.complete('try', 0), 'try:')
self.assertIsNone(completer.complete('try', 1))
# No opening bracket "(" because we overrode the built-in class
self.assertEqual(completer.complete('memoryview', 0), 'memoryview')
self.assertIsNone(completer.complete('memoryview', 1))
self.assertEqual(completer.complete('Ellipsis', 0), 'Ellipsis()')
self.assertIsNone(completer.complete('Ellipsis', 1))
if __name__ == '__main__':
unittest.main()
|
1,164 |
crowd api url
|
# coding=utf-8
import logging
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Crowd(AtlassianRestAPI):
"""Crowd API wrapper.
Important to note that you will have to use an application credentials,
not user credentials, in order to access Crowd APIs"""
def __init__(
self,
url,
username,
password,
timeout=60,
api_root="rest",
api_version="latest",
):
super(Crowd, self).__init__(url, username, password, timeout, api_root, api_version)
def METHOD_NAME(self, api, resource):
return "/{api_root}/{api}/{version}/{resource}".format(
api_root=self.api_root,
api=api,
version=self.api_version,
resource=resource,
)
def _user_change_status(self, username, active):
"""
Change user status.
:param username: str - username
:param active: bool - True/False
:return:
"""
user = self.user(username)
user_object = {
"name": username,
"active": active,
"display-name": user.get("display-name"),
"first-name": user.get("first-name"),
"last-name": user.get("last-name"),
"email": user.get("email"),
}
params = {"username": username}
return self.put(
self.METHOD_NAME("usermanagement", "user"),
params=params,
data=user_object,
)
def user(self, username):
"""
Get user information
:param username:
:return:
"""
params = {"username": username}
return self.get(self.METHOD_NAME("usermanagement", "user"), params=params)
def user_activate(self, username):
"""
Activate user
:param username: str - username
"""
return self._user_change_status(username, True)
def user_create(
self,
username,
active,
first_name,
last_name,
display_name,
email,
password,
):
"""
Create new user method
:param active: bool:
:param username: string: username
:param active: bool:
:param first_name: string:
:param last_name: string:
:param display_name: string:
:param email: string:
:param password: string:
:return:
"""
user_object = {
"name": username,
"password": {"value": password},
"active": active,
"first-name": first_name,
"last-name": last_name,
"display-name": display_name,
"email": email,
}
return self.post(self.METHOD_NAME("usermanagement", "user"), data=user_object)
def user_deactivate(self, username):
"""
Deactivate user
:return:
"""
return self._user_change_status(username, False)
def user_delete(self, username):
"""
Delete user
:param username: str - username
:return:
"""
params = {"username": username}
return self.delete(self.METHOD_NAME("usermanagement", "user"), params=params)
def group_add_user(self, username, groupname):
"""
Add user to group
:return:
"""
data = {"name": groupname}
params = {"username": username}
return self.post(
self.METHOD_NAME("usermanagement", "user/group/direct"),
params=params,
json=data,
)
def group_nested_members(self, group):
"""
Get nested members of group
:param group:
:return:
"""
params = {"groupname": group}
return self.get(self.METHOD_NAME("group", "nested"), params=params)
def health_check(self):
"""
Get health status
https://confluence.atlassian.com/jirakb/how-to-retrieve-health-check-results-using-rest-api-867195158.html
:return:
"""
# check as Troubleshooting & Support Tools Plugin
response = self.get("rest/troubleshooting/1.0/check/")
if not response:
# check as support tools
response = self.get("rest/supportHealthCheck/1.0/check/")
return response
def get_plugins_info(self):
"""
Provide plugins info
:return a json of installed plugins
"""
url = "rest/plugins/1.0/"
return self.get(url, headers=self.no_check_headers, trailing=True)
def get_plugin_info(self, plugin_key):
"""
Provide plugin info
:return a json of installed plugins
"""
url = "rest/plugins/1.0/{plugin_key}-key".format(plugin_key=plugin_key)
return self.get(url, headers=self.no_check_headers, trailing=True)
def get_plugin_license_info(self, plugin_key):
"""
Provide plugin license info
:return a json specific License query
"""
url = "rest/plugins/1.0/{plugin_key}-key/license".format(plugin_key=plugin_key)
return self.get(url, headers=self.no_check_headers, trailing=True)
def upload_plugin(self, plugin_path):
"""
Provide plugin path for upload into Jira e.g. useful for auto deploy
:param plugin_path:
:return:
"""
files = {"plugin": open(plugin_path, "rb")}
upm_token = self.request(
method="GET",
path="rest/plugins/1.0/",
headers=self.no_check_headers,
trailing=True,
).headers["upm-token"]
url = "rest/plugins/1.0/?token={upm_token}".format(upm_token=upm_token)
return self.post(url, files=files, headers=self.no_check_headers)
def delete_plugin(self, plugin_key):
"""
Delete plugin
:param plugin_key:
:return:
"""
url = "rest/plugins/1.0/{}-key".format(plugin_key)
return self.delete(url)
def check_plugin_manager_status(self):
url = "rest/plugins/latest/safe-mode"
return self.request(method="GET", path=url, headers=self.safe_mode_headers)
def update_plugin_license(self, plugin_key, raw_license):
"""
Update license for plugin
:param plugin_key:
:param raw_license:
:return:
"""
app_headers = {
"X-Atlassian-Token": "nocheck",
"Content-Type": "application/vnd.atl.plugins+json",
}
url = "/plugins/1.0/{plugin_key}/license".format(plugin_key=plugin_key)
data = {"rawLicense": raw_license}
return self.put(url, data=data, headers=app_headers)
|
1,165 |
std string
|
#!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def METHOD_NAME(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = METHOD_NAME(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times)
|
1,166 |
ankle2
|
#!/usr/bin/env python3
# Copyright (C) 2013-2016 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
from boxes import edges
class LegEdge(edges.BaseEdge):
def __call__(self, l, **kw):
d0 = (l - 12.0) /2
self.hole(l/2, 6, 3.0)
self.polyline(d0, 90, 0, (-180, 6), 0, 90, d0)
class OttoLegs(Boxes):
"""Otto LC - a laser cut chassis for Otto DIY - legs"""
ui_group = "Misc"
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings, finger=1.0, space=1.0,
surroundingspaces=1.0)
self.argparser.add_argument(
"--anklebolt1", action="store", type=float, default=3.0,
help="diameter for hole for ankle bolts - foot side")
self.argparser.add_argument(
"--anklebolt2", action="store", type=float, default=2.6,
help="diameter for hole for ankle bolts - leg side")
self.argparser.add_argument(
"--length", action="store", type=float, default=34.0,
help="length of legs (34mm min)")
def foot(self, x, y, ly, l, r=5., move=None):
if self.move(x, y, move, True):
return
t = self.thickness
w = ly + 5.5 + 2 * t
self.fingerHolesAt(x/2 - w/2, 0, l, 90)
self.fingerHolesAt(x/2 + w/2, 0, l, 90)
self.moveTo(r, 0)
for l in (x, y, x, y):
self.polyline((l - 2*r, 2), 45, r*2**0.5, 45)
self.move(x, y, move)
def ankles(self, x, h, edge="f", callback=None, move=None):
f = 0.5
tw = x
th = 2 * h + self.thickness
if self.move(tw, th, move, True):
return
self.moveTo(0, self.thickness)
for i in range(2):
self.cc(callback, 0)
self.edges[edge](x)
self.polyline(0, 90)
self.cc(callback, 1)
self.polyline((h, 2), 90, (f*x, 1), 45, ((2**0.5)*(1-f)*x, 1), 45,
(h-(1-f)*x, 1), 90)
self.moveTo(tw, th, 180)
self.ctx.stroke()
self.move(tw, th, move)
def ankle1(self):
# from vertical edge
self.hole(15, 10, 3.45) # 3.45 for servo arm, 2.3 for knob
def servoring(self, move=""):
if self.move(20, 20, move, True):
return
self.moveTo(10, 10, 90)
self.moveTo(3.45, 0, -90)
self.polyline(0, (-264, 3.45), 0, 36, 6.55, 108, 0, (330, 9.0, 4), 0, 108, 6.55)
self.move(20, 20, move)
def METHOD_NAME(self):
# from vertical edge
self.hole(15, 10, self.anklebolt1/2)
def servoHole(self):
self.hole(6, 6, 11.6/2)
self.hole(6, 12, 5.5/2)
def render(self):
# adjust to the variables you want in the local scope
t = self.thickness
ws = 25
lx, ly, lh = 12.4, 23.5, max(self.length, ws+6+t)
self.ctx.save()
# Legs
c1 = edges.CompoundEdge(self, "FE", (ly-7.0, 7.0))
c2 = edges.CompoundEdge(self, "EF", (7.0, lh-7.0))
e = [c1, c2, "F", "F"]
for i in range(2):
# front
self.rectangularWall(lx, lh-7., [LegEdge(self, None), "f", "F", "f"], callback=[None, lambda:self.fingerHolesAt(ws-7., 0, lx)], move="right")
# back
self.rectangularWall(lx, lh, "FfFf", callback=[
lambda:self.hole(lx/2, 7, self.anklebolt2/2)], move="right")
# sides
self.rectangularWall(ly, lh, e, callback=[None,
lambda:self.fingerHolesAt(ws, 7.0, ly-7.0-3.0)], move="right")
self.rectangularWall(ly, lh, e, callback=[
lambda:self.rectangularHole(ly/2, ws+3+0.5*t, 12, 6, 3),
lambda:self.fingerHolesAt(ws, 7.0, ly-7.0-3.0)], move="right")
# top
self.partsMatrix(2, 1, "right", self.rectangularWall, ly, lx, "ffff",
callback=[None, lambda: self.hole(lx/2, ly/2, 2.3)])
self.partsMatrix(2, 1, "right", self.rectangularWall, lx, ly, "eeee", callback=[lambda: self.hole(lx/2, ly/2, 1.5)])
# hold servo at the front
self.partsMatrix(2, 1, "right", self.rectangularWall, 4.6, lx, "efee")
# bottom
self.partsMatrix(2, 1, "right", self.rectangularWall, lx, ly-7.0, "efff")
# hold servo inside
self.partsMatrix(2, 1, "right", self.rectangularWall, lx, ly-7.0-3.0, "efef")
self.ctx.restore()
self.rectangularWall(lx, lh, "ffff", move="up only")
# feet
self.foot(60, 40, ly, 30, move="right")
self.foot(60, 40, ly, 30, move="right")
self.ankles(30, 25, callback=[None, self.ankle1], move="right")
self.ankles(30, 25, callback=[None, self.METHOD_NAME], move="right")
self.partsMatrix(2, 2, "right", self.servoring)
|
1,167 |
to compas
|
import bpy
import bmesh
from compas.datastructures import Mesh
from compas.geometry import Point
from ._geometry import BlenderGeometry
class BlenderMesh(BlenderGeometry):
"""Wrapper for Blender meshes.
Attributes
----------
object : :blender:`bpy.types.Object`
The Blender scene object.
geometry : :blender:`bpy.types.Mesh`
The mesh data block.
bmesh : :blender:`bpy.types.BMesh`
The mesh data structure.
location : :class:`~compas.geometry.Point`
The location of the object in the scene.
vertices : List[:class:`~compas.geometry.Point`]
The mesh vertex locations.
faces : List[List[:obj:`int`]]
The mesh face vertices.
Examples
--------
.. code-block:: python
import os
import compas
from compas_blender.conversions import BlenderMesh
mesh = BlenderMesh.from_monkey().to_compas()
mesh = mesh.subdivide(k=2)
path = os.path.join(os.path.expanduser(~), 'Desktop', 'monkey.json')
compas.json_dump(mesh, path)
"""
@property
def object(self):
return self._object
@object.setter
def object(self, obj):
mesh = bpy.data.meshes.new_from_object(obj)
self._object = obj
self._geometry = mesh
@property
def geometry(self):
return self._geometry
@geometry.setter
def geometry(self, data):
self._object = None
self._geometry = data
@property
def bmesh(self):
return bmesh.from_edit_mesh(self.mesh)
@property
def location(self):
if self.object:
return Point(self.object.location)
return Point(0, 0, 0)
@property
def vertices(self):
point = self.location
return [point + list(vertex.co) for vertex in self.geometry.vertices]
@property
def faces(self):
return [list(face.vertices) for face in self.geometry.polygons]
@classmethod
def from_bmesh(cls, bm, name=None, free=True):
"""Construct a Blender mesh wrappper from a BMesh.
Parameters
----------
bm : :blender:`bpy.types.BMesh`
The Blender mesh data structure.
name : :obj:`str`, optional
The name of the data block.
free : :obj:`bool`, optional
Free the data structure once the data block is created.
Returns
-------
:class:`~compas_blender.conversions.BlenderMesh`
"""
data = bpy.data.meshes.new(name or "Mesh")
bm.to_mesh(data)
if free:
bm.free()
mesh = cls()
mesh.geometry = data
return mesh
@classmethod
def from_monkey(cls, name=None):
"""Construct a Blender mesh wrappper from the Blender monkey.
Parameters
----------
name : :obj:`str`, optional
The name of the data block.
Returns
-------
:class:`~compas_blender.conversions.BlenderMesh`
"""
bm = bmesh.new()
bmesh.ops.create_monkey(bm)
data = bpy.data.meshes.new(name or "Mesh")
bm.to_mesh(data)
bm.free()
mesh = cls()
mesh.geometry = data
return mesh
def METHOD_NAME(self, cls=None):
"""Convert the Blender mesh to a COMPAS mesh.
Parameters
----------
cls : :class:`~compas.datastructures.Mesh`, optional
The type of COMPAS mesh.
Returns
-------
:class:`~compas.datastructure.Mesh`
"""
cls = cls or Mesh
return cls.from_vertices_and_faces(self.vertices, self.faces)
# def get_vertex_coordinates(self, vertex):
# return add_vectors(self.location, self.geometry.vertices[vertex].co)
# def get_vertices_coordinates(self):
# xyzs = [vertex.co for vertex in self.geometry.vertices]
# return {vertex: add_vectors(self.location, xyz) for vertex, xyz in enumerate(xyzs)}
# def set_vertices_coordinates(self, xyzs):
# for vertex, xyz in xyzs.items():
# self.geometry.vertices[vertex].co = subtract_vectors(xyz, self.location)
# def get_vertices_colors(self, vertices=None):
# colors = {}
# col = self.geometry.vertex_colors.active
# if col:
# if not vertices:
# vertices = range(len(self.geometry.vertices))
# for face in self.geometry.polygons:
# for i in face.loop_indices:
# j = self.geometry.loops[i].vertex_index
# if (j in vertices) and (not colors.get(j, None)):
# colors[j] = list(col.data[i].color)[:3]
# return colors
# def set_vertices_colors(self, colors):
# if self.geometry.vertex_colors:
# col = self.geometry.vertex_colors.active
# else:
# col = self.geometry.vertex_colors.new()
# for face in self.geometry.polygons:
# for i in face.loop_indices:
# j = self.geometry.loops[i].vertex_index
# if j in colors:
# col.data[i].color = list(colors[j]) + [1]
# def unset_vertices_colors(self):
# vertex_colors = self.geometry.vertex_colors
# while vertex_colors:
# vertex_colors.remove(vertex_colors[0])
# def get_edge_vertex_indices(self, edge):
# return list(self.geometry.edges[edge].vertices)
# def get_edges_vertex_indices(self, edges=None):
# if not edges:
# edges = range(len(self.geometry.edges))
# return {edge: self.get_edge_vertex_indices(edge=edge) for edge in edges}
# def edge_length(self, edge):
# u, v = self.geometry.edges[edge].vertices
# sp, ep = [list(self.geometry.vertices[i].co) for i in [u, v]]
# return distance_point_point(sp, ep)
# def edges_lengths(self, edges=None):
# if not edges:
# edges = range(len(self.geometry.edges))
# return {edge: self.edge_length(edge=edge) for edge in edges}
# def get_face_vertex_indices(self, face):
# return list(self.geometry.polygons[face].vertices)
# def get_faces_vertex_indices(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.get_face_vertex_indices(face=face) for face in faces}
# def face_normal(self, face):
# return list(self.geometry.polygons[face].normal)
# def faces_normals(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.face_normal(face=face) for face in faces}
# def face_area(self, face):
# return self.geometry.polygons[face].area
# def faces_areas(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.face_area(face=face) for face in faces}
# def bevel(self, width=0.2, segments=1, only_vertices=False):
# self.object.modifiers.new('bevel', type='BEVEL')
# self.object.modifiers['bevel'].width = width
# self.object.modifiers['bevel'].segments = segments
# self.object.modifiers['bevel'].use_only_vertices = only_vertices
# self.refresh()
# def subdivide(self, levels=1, type='SIMPLE'):
# self.object.modifiers.new('subdivision', type='SUBSURF')
# self.object.modifiers['subdivision'].levels = levels
# self.object.modifiers['subdivision'].subdivision_type = type # or 'CATMULL_CLARK'
# self.refresh()
# def triangulate(self):
# self.object.modifiers.new('triangulate', type='TRIANGULATE')
# self.refresh()
# def get_vertices_and_faces(self):
# vertices = self.get_vertices_coordinates()
# faces = self.get_faces_vertex_indices()
# return vertices, faces
|
1,168 |
recv handle
|
import multiprocessing as mp
import os
from functools import partial
from queue import Empty as QueueIsEmpty
import numpy as np
import pytest
from ucp._libs import ucx_api
from ucp._libs.arr import Array
from ucp._libs.utils import get_address
from ucp._libs.utils_test import blocking_am_recv, blocking_am_send
mp = mp.get_context("spawn")
RNDV_THRESH = 8192
def get_data():
ret = {}
ret["bytearray"] = {
"allocator": bytearray,
"generator": lambda n: bytearray(os.urandom(n)),
"validator": lambda recv, exp: np.testing.assert_equal(recv, exp),
"memory_type": ucx_api.AllocatorType.HOST,
}
ret["numpy"] = {
"allocator": partial(np.ones, dtype=np.uint8),
"generator": partial(np.arange, dtype=np.int64),
"validator": lambda recv, exp: np.testing.assert_equal(
recv.view(np.int64), exp
),
"memory_type": ucx_api.AllocatorType.HOST,
}
try:
import cupy as cp
ret["cupy"] = {
"allocator": partial(cp.ones, dtype=np.uint8),
"generator": partial(cp.arange, dtype=np.int64),
"validator": lambda recv, exp: cp.testing.assert_array_equal(
recv.view(np.int64), exp
),
"memory_type": ucx_api.AllocatorType.CUDA,
}
except ImportError:
pass
return ret
def _echo_server(get_queue, put_queue, msg_size, datatype):
"""Server that send received message back to the client
Notice, since it is illegal to call progress() in call-back functions,
we use a "chain" of call-back functions.
"""
data = get_data()[datatype]
ctx = ucx_api.UCXContext(
config_dict={"RNDV_THRESH": str(RNDV_THRESH)},
feature_flags=(ucx_api.Feature.AM,),
)
worker = ucx_api.UCXWorker(ctx)
worker.register_am_allocator(data["allocator"], data["memory_type"])
# A reference to listener's endpoint is stored to prevent it from going
# out of scope too early.
ep = None
def _send_handle(request, exception, msg):
# Notice, we pass `msg` to the handler in order to make sure
# it doesn't go out of scope prematurely.
assert exception is None
def METHOD_NAME(recv_obj, exception, ep):
assert exception is None
msg = Array(recv_obj)
ucx_api.am_send_nbx(ep, msg, msg.nbytes, cb_func=_send_handle, cb_args=(msg,))
def _listener_handler(conn_request):
global ep
ep = ucx_api.UCXEndpoint.create_from_conn_request(
worker,
conn_request,
endpoint_error_handling=True,
)
# Wireup
ucx_api.am_recv_nb(ep, cb_func=METHOD_NAME, cb_args=(ep,))
# Data
ucx_api.am_recv_nb(ep, cb_func=METHOD_NAME, cb_args=(ep,))
listener = ucx_api.UCXListener(worker=worker, port=0, cb_func=_listener_handler)
put_queue.put(listener.port)
while True:
worker.progress()
try:
get_queue.get(block=False, timeout=0.1)
except QueueIsEmpty:
continue
else:
break
def _echo_client(msg_size, datatype, port):
data = get_data()[datatype]
ctx = ucx_api.UCXContext(
config_dict={"RNDV_THRESH": str(RNDV_THRESH)},
feature_flags=(ucx_api.Feature.AM,),
)
worker = ucx_api.UCXWorker(ctx)
worker.register_am_allocator(data["allocator"], data["memory_type"])
ep = ucx_api.UCXEndpoint.create(
worker,
get_address(),
port,
endpoint_error_handling=True,
)
# The wireup message is sent to ensure endpoints are connected, otherwise
# UCX may not perform any rendezvous transfers.
send_wireup = bytearray(b"wireup")
send_data = data["generator"](msg_size)
blocking_am_send(worker, ep, send_wireup)
blocking_am_send(worker, ep, send_data)
recv_wireup = blocking_am_recv(worker, ep)
recv_data = blocking_am_recv(worker, ep)
# Cast recv_wireup to bytearray when using NumPy as a host allocator,
# this ensures the assertion below is correct
if datatype == "numpy":
recv_wireup = bytearray(recv_wireup)
assert bytearray(recv_wireup) == send_wireup
if data["memory_type"] == "cuda" and send_data.nbytes < RNDV_THRESH:
# Eager messages are always received on the host, if no host
# allocator is registered UCX-Py defaults to `bytearray`.
assert recv_data == bytearray(send_data.get())
data["validator"](recv_data, send_data)
@pytest.mark.parametrize("msg_size", [10, 2**24])
@pytest.mark.parametrize("datatype", get_data().keys())
def test_server_client(msg_size, datatype):
put_queue, get_queue = mp.Queue(), mp.Queue()
server = mp.Process(
target=_echo_server,
args=(put_queue, get_queue, msg_size, datatype),
)
server.start()
port = get_queue.get()
client = mp.Process(target=_echo_client, args=(msg_size, datatype, port))
client.start()
client.join(timeout=10)
assert not client.exitcode
put_queue.put("Finished")
server.join(timeout=10)
assert not server.exitcode
|
1,169 |
make regression y
|
"""Utility functions for generating panel data and learning task scenarios."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["mloning", "fkiraly"]
__all__ = [
"make_classification_problem",
"make_regression_problem",
"make_transformer_problem",
]
import numpy as np
import pandas as pd
from sklearn.utils.validation import check_random_state
from sktime.datatypes import convert
def _make_panel(
n_instances=20,
n_columns=1,
n_timepoints=20,
y=None,
all_positive=False,
random_state=None,
return_mtype="pd-multiindex",
):
"""Generate sktime compatible test data, Panel data formats.
Parameters
----------
n_instances : int, optional, default=20
number of instances per series in the panel
n_columns : int, optional, default=1
number of variables in the time series
n_timepoints : int, optional, default=20
number of time points in each series
y : None (default), or 1D np.darray or 1D array-like, shape (n_instances, )
if passed, return will be generated with association to y
all_positive : bool, optional, default=False
whether series contain only positive values when generated
random_state : None (default) or int
if int is passed, will be used in numpy RandomState for generation
return_mtype : str, sktime Panel mtype str, default="pd-multiindex"
see sktime.datatypes.MTYPE_LIST_PANEL for a full list of admissible strings
see sktime.datatypes.MTYPE_REGISTER for an short explanation of formats
see examples/AA_datatypes_and_datasets.ipynb for a full specification
Returns
-------
X : an sktime time series data container of mtype return_mtype
with n_instances instances, n_columns variables, n_timepoints time points
generating distribution is all values i.i.d. normal with std 0.5
if y is passed, i-th series values are additively shifted by y[i] * 100
"""
# If target variable y is given, we ignore n_instances and instead generate as
# many instances as in the target variable
if y is not None:
y = np.asarray(y)
n_instances = len(y)
rng = check_random_state(random_state)
# Generate data as 3d numpy array
X = rng.normal(scale=0.5, size=(n_instances, n_columns, n_timepoints))
# Generate association between data and target variable
if y is not None:
X = X + (y * 100).reshape(-1, 1, 1)
if all_positive:
X = X**2
X = convert(X, from_type="numpy3D", to_type=return_mtype)
return X
def _make_panel_X(
n_instances=20,
n_columns=1,
n_timepoints=20,
y=None,
all_positive=False,
return_numpy=False,
random_state=None,
):
if return_numpy:
return_mtype = "numpy3D"
else:
return_mtype = "nested_univ"
return _make_panel(
n_instances=n_instances,
n_columns=n_columns,
n_timepoints=n_timepoints,
y=y,
all_positive=all_positive,
random_state=random_state,
return_mtype=return_mtype,
)
def METHOD_NAME(n_instances=20, return_numpy=True, random_state=None):
rng = check_random_state(random_state)
y = rng.normal(size=n_instances)
if return_numpy:
return y
else:
return pd.Series(y)
def _make_classification_y(
n_instances=20, n_classes=2, return_numpy=True, random_state=None
):
if not n_instances > n_classes:
raise ValueError("n_instances must be bigger than n_classes")
rng = check_random_state(random_state)
n_repeats = int(np.ceil(n_instances / n_classes))
y = np.tile(np.arange(n_classes), n_repeats)[:n_instances]
rng.shuffle(y)
if return_numpy:
return y
else:
return pd.Series(y)
def make_classification_problem(
n_instances=20,
n_columns=1,
n_timepoints=20,
n_classes=2,
return_numpy=False,
random_state=None,
):
"""Make Classification Problem."""
y = _make_classification_y(
n_instances, n_classes, return_numpy=return_numpy, random_state=random_state
)
X = _make_panel_X(
n_columns=n_columns,
n_timepoints=n_timepoints,
return_numpy=return_numpy,
random_state=random_state,
y=y,
)
return X, y
def make_regression_problem(
n_instances=20,
n_columns=1,
n_timepoints=20,
return_numpy=False,
random_state=None,
):
"""Make Regression Problem."""
y = METHOD_NAME(
n_instances, random_state=random_state, return_numpy=return_numpy
)
X = _make_panel_X(
n_columns=n_columns,
n_timepoints=n_timepoints,
return_numpy=return_numpy,
random_state=random_state,
y=y,
)
return X, y
def make_clustering_problem(
n_instances=20,
n_columns=1,
n_timepoints=20,
return_numpy=False,
random_state=None,
):
"""Make Clustering Problem."""
# Can only currently support univariate so converting
# to univaritate for the time being
return _make_panel_X(
n_instances=n_instances,
n_columns=n_columns,
n_timepoints=n_timepoints,
return_numpy=return_numpy,
random_state=random_state,
)
def make_transformer_problem(
n_instances=20,
n_columns=1,
n_timepoints=20,
return_numpy=True,
random_state=None,
panel=True,
):
"""Make Transformer Problem."""
if not panel:
X = make_transformer_problem(
n_instances=n_instances,
n_columns=n_columns,
n_timepoints=n_timepoints,
return_numpy=True,
random_state=random_state,
panel=True,
)
if return_numpy:
X = X[0]
else:
X = pd.DataFrame(X[0])
else:
X = _make_panel_X(
n_instances=n_instances,
n_columns=n_columns,
n_timepoints=n_timepoints,
return_numpy=True,
random_state=random_state,
)
if not return_numpy:
arr = []
for data in X:
arr.append(pd.DataFrame(data))
X = arr
return X
def _make_nested_from_array(array, n_instances=20, n_columns=1):
return pd.DataFrame(
[[pd.Series(array) for _ in range(n_columns)] for _ in range(n_instances)],
columns=[f"col{c}" for c in range(n_columns)],
)
|
1,170 |
test how many dots
|
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.community.general.plugins.module_utils.database import (
is_input_dangerous,
pg_quote_identifier,
SQLParseError,
)
# These are all valid strings
# The results are based on interpreting the identifier as a table name
VALID = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
INVALID = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
HOW_MANY_DOTS = (
('role', 'role', '"role"',
'PostgreSQL does not support role with more than 1 dots'),
('db', 'database', '"db"',
'PostgreSQL does not support database with more than 1 dots'),
('db.schema', 'schema', '"db"."schema"',
'PostgreSQL does not support schema with more than 2 dots'),
('db.schema.table', 'table', '"db"."schema"."table"',
'PostgreSQL does not support table with more than 3 dots'),
('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
'PostgreSQL does not support column with more than 4 dots'),
)
VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
IS_STRINGS_DANGEROUS = (
(u'', False),
(u' ', False),
(u'alternative database', False),
(u'backup of TRUNCATED table', False),
(u'bob.dropper', False),
(u'd\'artagnan', False),
(u'user_with_select_update_truncate_right', False),
(u';DROP DATABASE fluffy_pets_photos', True),
(u';drop DATABASE fluffy_pets_photos', True),
(u'; TRUNCATE TABLE his_valuable_table', True),
(u'; truncate TABLE his_valuable_table', True),
(u'\'--', True),
(u'"--', True),
(u'\' union select username, password from admin_credentials', True),
(u'\' UNION SELECT username, password from admin_credentials', True),
(u'\' intersect select', True),
(u'\' INTERSECT select', True),
(u'\' except select', True),
(u'\' EXCEPT select', True),
(u';ALTER TABLE prices', True),
(u';alter table prices', True),
(u"; UPDATE products SET price = '0'", True),
(u";update products SET price = '0'", True),
(u"; DELETE FROM products", True),
(u"; delete FROM products", True),
(u"; SELECT * FROM products", True),
(u" ; select * from products", True),
)
@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
def test_valid_quotes(identifier, quoted_identifier):
assert pg_quote_identifier(identifier, 'table') == quoted_identifier
@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
def test_invalid_quotes(identifier, id_type, msg):
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
def METHOD_NAME(identifier, id_type, quoted_identifier, msg):
assert pg_quote_identifier(identifier, id_type) == quoted_identifier
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier('%s.more' % identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("string, result", IS_STRINGS_DANGEROUS)
def test_is_input_dangerous(string, result):
assert is_input_dangerous(string) == result
|
1,171 |
test help option
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import tempfile
from scilpy.io.fetcher import fetch_data, get_home, get_testing_files_dict
fetch_data(get_testing_files_dict(), keys=['btensor_testdata.zip'])
tmp_dir = tempfile.TemporaryDirectory()
def METHOD_NAME(script_runner):
ret = script_runner.run('scil_compute_divide.py', '--help')
assert ret.success
def test_nb_btensors_check(script_runner):
os.chdir(os.path.expanduser(tmp_dir.name))
in_dwi_lin = os.path.join(get_home(), 'btensor_testdata',
'dwi_linear.nii.gz')
in_bval_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvals')
in_bvec_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvecs')
in_dwi_plan = os.path.join(get_home(), 'btensor_testdata',
'dwi_planar.nii.gz')
in_bval_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvals')
in_bvec_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvecs')
fa = os.path.join(get_home(), 'btensor',
'fa.nii.gz')
ret = script_runner.run('scil_compute_divide.py', '--in_dwis',
in_dwi_lin, '--in_bvals', in_bval_lin,
'--in_bvecs', in_bvec_lin, '--in_bdeltas', '1',
'--fa', fa, '--do_weight_bvals',
'--do_weight_pa', '--do_multiple_s0',
'--processes', '1', '-f')
assert (not ret.success)
ret = script_runner.run('scil_compute_divide.py', '--in_dwis',
in_dwi_lin, in_dwi_plan, '--in_bvals', in_bval_lin,
in_bval_plan, '--in_bvecs', in_bvec_lin,
in_bvec_plan, '--in_bdeltas', '1', '1',
'--fa', fa, '--do_weight_bvals',
'--do_weight_pa', '--do_multiple_s0',
'--processes', '1', '-f')
assert (not ret.success)
def test_inputs_check(script_runner):
os.chdir(os.path.expanduser(tmp_dir.name))
in_dwi_lin = os.path.join(get_home(), 'btensor_testdata',
'dwi_linear.nii.gz')
in_bval_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvals')
in_bvec_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvecs')
in_dwi_plan = os.path.join(get_home(), 'btensor_testdata',
'dwi_planar.nii.gz')
in_bval_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvals')
in_bvec_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvecs')
fa = os.path.join(get_home(), 'btensor',
'fa.nii.gz')
ret = script_runner.run('scil_compute_divide.py', '--in_dwis',
in_dwi_lin, in_dwi_plan, '--in_bvals', in_bval_lin,
'--in_bvecs', in_bvec_lin, '--in_bdeltas', '1',
'--fa', fa, '--do_weight_bvals',
'--do_weight_pa', '--do_multiple_s0',
'--processes', '1', '-f')
assert (not ret.success)
ret = script_runner.run('scil_compute_divide.py', '--in_dwis',
in_dwi_lin, in_dwi_plan, '--in_bvals',
in_bval_lin, in_bval_plan, '--in_bvecs',
in_bvec_lin, in_bvec_plan, '--in_bdeltas', '1',
'-0.5', '0', '--fa', fa, '--do_weight_bvals',
'--do_weight_pa', '--do_multiple_s0',
'--processes', '1', '-f')
assert (not ret.success)
def test_execution_processing(script_runner):
os.chdir(os.path.expanduser(tmp_dir.name))
in_dwi_lin = os.path.join(get_home(), 'btensor_testdata',
'dwi_linear.nii.gz')
in_bval_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvals')
in_bvec_lin = os.path.join(get_home(), 'btensor_testdata',
'linear.bvecs')
in_dwi_plan = os.path.join(get_home(), 'btensor_testdata',
'dwi_planar.nii.gz')
in_bval_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvals')
in_bvec_plan = os.path.join(get_home(), 'btensor_testdata',
'planar.bvecs')
in_dwi_sph = os.path.join(get_home(), 'btensor_testdata',
'dwi_spherical.nii.gz')
in_bval_sph = os.path.join(get_home(), 'btensor_testdata',
'spherical.bvals')
in_bvec_sph = os.path.join(get_home(), 'btensor_testdata',
'spherical.bvecs')
fa = os.path.join(get_home(), 'btensor_testdata',
'fa.nii.gz')
ret = script_runner.run('scil_compute_divide.py', '--in_dwis',
in_dwi_lin, in_dwi_plan, in_dwi_sph,
'--in_bvals', in_bval_lin, in_bval_plan,
in_bval_sph, '--in_bvecs', in_bvec_lin,
in_bvec_plan, in_bvec_sph, '--in_bdeltas',
'1', '-0.5', '0', '--fa', fa, '--do_weight_bvals',
'--do_weight_pa', '--do_multiple_s0',
'--processes', '1', '-f')
assert (ret.success)
|
1,172 |
test pointgroup sa4 newton
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import fci
def setUpModule():
global mol, molsym, m, msym, mc_ref
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
)
m = scf.RHF(mol)
m.conv_tol = 1e-10
m.scf()
molsym = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
symmetry = True
)
msym = scf.RHF(molsym)
msym.conv_tol = 1e-10
msym.scf()
mc_ref = mcscf.CASSCF (m, 4, 4).state_average_([0.25,]*4)
# SA-CASSCF may be stuck at a local minimum e_tot = -75.75381945 with the
# default initial guess from HF orbitals. The initial guess below is closed
# to the single state CASSCF orbitals which can lead to a lower SA-CASSCF
# energy e_tot = -75.762754627
mo = mc_ref.sort_mo([4,5,6,10], base=1)
mc_ref.kernel (mo)
def tearDownModule():
global mol, molsym, m, msym, mc_ref
mol.stdout.close()
molsym.stdout.close()
del mol, molsym, m, msym, mc_ref
# 4 states, in order: 1^A1, 3^B2, 1^B2, 3^A1
# 3 distinct ways of using state_average_mix to specify these states
class KnownValues(unittest.TestCase):
def test_nosymm_sa4_newton (self):
mc = mcscf.CASSCF (m, 4, 4).state_average_([0.25,]*4).newton ()
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (mc.e_states, mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def test_spin_sa4 (self):
fcisolvers = [fci.solver (mol, singlet=not(bool(i)), symm=False) for i in range (2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[1].spin = 2
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (m, 4, 4), fcisolvers, [0.25,]*4)
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def test_spin_sa4_newton (self):
fcisolvers = [fci.solver (mol, singlet=not(bool(i)), symm=False) for i in range (2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[1].spin = 2
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (m, 4, 4), fcisolvers, [0.25,]*4).newton ()
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def test_pointgroup_sa4 (self):
fcisolvers = [fci.solver (molsym, symm=True, singlet=False) for i in range (2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B1'
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (msym, 4, 4), fcisolvers, [0.25,]*4)
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def METHOD_NAME (self):
fcisolvers = [fci.solver (molsym, symm=True, singlet=False) for i in range (2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B1'
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (msym, 4, 4), fcisolvers, [0.25,]*4).newton ()
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def test_spin_and_pointgroup_sa4 (self):
fcisolvers = [fci.solver (molsym, singlet = not(bool(i%2))) for i in range (4)]
fcisolvers[0].wfnsym = fcisolvers[1].wfnsym = 'B1'
fcisolvers[2].wfnsym = fcisolvers[3].wfnsym = 'A1'
fcisolvers[1].spin = fcisolvers[3].spin = 2
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (msym, 4, 4), fcisolvers, [0.25,]*4)
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
def test_spin_and_pointgroup_sa4_newton (self):
fcisolvers = [fci.solver (molsym, singlet = not(bool(i%2))) for i in range (4)]
fcisolvers[0].wfnsym = fcisolvers[1].wfnsym = 'B1'
fcisolvers[2].wfnsym = fcisolvers[3].wfnsym = 'A1'
fcisolvers[1].spin = fcisolvers[3].spin = 2
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (msym, 4, 4), fcisolvers, [0.25,]*4).newton ()
mo = mc.sort_mo([4,5,6,10], base=1)
mc.kernel(mo)
self.assertAlmostEqual (mc.e_tot, mc_ref.e_tot, 8)
for e1, e0 in zip (numpy.sort (mc.e_states), mc_ref.e_states):
self.assertAlmostEqual (e1, e0, 5)
if __name__ == "__main__":
print("Full Tests for H2O")
unittest.main()
|
1,173 |
get strcols
|
"""
Module for formatting output data in console (to string).
"""
from __future__ import annotations
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
Iterable,
)
import numpy as np
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas.io.formats.format import DataFrameFormatter
class StringFormatter:
"""Formatter for string representation of a dataframe."""
def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None:
self.fmt = fmt
self.adj = fmt.adj
self.frame = fmt.frame
self.line_width = line_width
def to_string(self) -> str:
text = self._get_string_representation()
if self.fmt.should_show_dimensions:
text = "".join([text, self.fmt.dimensions_info])
return text
def METHOD_NAME(self) -> list[list[str]]:
strcols = self.fmt.get_strcols()
if self.fmt.is_truncated:
strcols = self._insert_dot_separators(strcols)
return strcols
def _get_string_representation(self) -> str:
if self.fmt.frame.empty:
return self._empty_info_line
strcols = self.METHOD_NAME()
if self.line_width is None:
# no need to wrap around just print the whole frame
return self.adj.adjoin(1, *strcols)
if self._need_to_wrap_around:
return self._join_multiline(strcols)
return self._fit_strcols_to_terminal_width(strcols)
@property
def _empty_info_line(self) -> str:
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {pprint_thing(self.frame.columns)}\n"
f"Index: {pprint_thing(self.frame.index)}"
)
@property
def _need_to_wrap_around(self) -> bool:
return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:
str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
index_length = len(str_index)
if self.fmt.is_truncated_horizontally:
strcols = self._insert_dot_separator_horizontal(strcols, index_length)
if self.fmt.is_truncated_vertically:
strcols = self._insert_dot_separator_vertical(strcols, index_length)
return strcols
@property
def _adjusted_tr_col_num(self) -> int:
return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
def _insert_dot_separator_horizontal(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)
return strcols
def _insert_dot_separator_vertical(
self, strcols: list[list[str]], index_length: int
) -> list[list[str]]:
n_header_rows = index_length - len(self.fmt.tr_frame)
row_num = self.fmt.tr_row_num
for ix, col in enumerate(strcols):
cwidth = self.adj.len(col[row_num])
if self.fmt.is_truncated_horizontally:
is_dot_col = ix == self._adjusted_tr_col_num
else:
is_dot_col = False
if cwidth > 3 or is_dot_col:
dots = "..."
else:
dots = ".."
if ix == 0 and self.fmt.index:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
col.insert(row_num + n_header_rows, dot_str)
return strcols
def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols_input)
if self.fmt.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
assert lwidth is not None
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.fmt.is_truncated_vertically:
assert self.fmt.max_rows_fitted is not None
nrows = self.fmt.max_rows_fitted + 1
else:
nrows = len(self.frame)
str_lst = []
start = 0
for i, end in enumerate(col_bins):
row = strcols[start:end]
if self.fmt.index:
row.insert(0, idx)
if nbins > 1:
if end <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
start = end
return "\n\n".join(str_lst)
def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:
from pandas import Series
lines = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(lines).str.len().max()
# plus truncate dot col
width, _ = get_terminal_size()
dif = max_len - width
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = round(n_cols / 2)
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_fitted = n_cols - self.fmt.index
# GH-21180. Ensure that we print at least two.
max_cols_fitted = max(max_cols_fitted, 2)
self.fmt.max_cols_fitted = max_cols_fitted
# Call again _truncate to cut frame appropriately
# and then generate string representation
self.fmt.truncate()
strcols = self.METHOD_NAME()
return self.adj.adjoin(1, *strcols)
def _binify(cols: list[int], line_width: int) -> list[int]:
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
|
1,174 |
test instant
|
#!/usr/bin/env python
import calendar
import datetime
import time
from saml2.time_util import add_duration
from saml2.time_util import after
from saml2.time_util import before
from saml2.time_util import f_quotient
from saml2.time_util import in_a_while
from saml2.time_util import instant
from saml2.time_util import modulo
from saml2.time_util import not_before
from saml2.time_util import not_on_or_after
from saml2.time_util import parse_duration
from saml2.time_util import str_to_time
from saml2.time_util import valid
def test_f_quotient():
assert f_quotient(0, 3) == 0
assert f_quotient(1, 3) == 0
assert f_quotient(2, 3) == 0
assert f_quotient(3, 3) == 1
assert f_quotient(3.123, 3) == 1
def test_modulo():
assert modulo(-1, 3) == 2
assert modulo(0, 3) == 0
assert modulo(1, 3) == 1
assert modulo(2, 3) == 2
assert modulo(3, 3) == 0
x = 3.123
assert modulo(3.123, 3) == x - 3
def test_f_quotient_2():
for i in range(1, 13):
assert f_quotient(i, 1, 13) == 0
assert f_quotient(13, 1, 13) == 1
assert f_quotient(13.123, 1, 13) == 1
def test_modulo_2():
assert modulo(0, 1, 13) == 12
for i in range(1, 13):
assert modulo(i, 1, 13) == i
assert modulo(13, 1, 13) == 1
# x = 0.123
# assert modulo(13+x, 1, 13) == 1+x
def test_parse_duration():
(sign, d) = parse_duration("P1Y3M5DT7H10M3.3S")
assert sign == "+"
assert d["tm_sec"] == 3.3
assert d["tm_mon"] == 3
assert d["tm_hour"] == 7
assert d["tm_mday"] == 5
assert d["tm_year"] == 1
assert d["tm_min"] == 10
def test_parse_duration2():
(sign, d) = parse_duration("PT30M")
assert sign == "+"
assert d["tm_sec"] == 0
assert d["tm_mon"] == 0
assert d["tm_hour"] == 0
assert d["tm_mday"] == 0
assert d["tm_year"] == 0
assert d["tm_min"] == 30
PATTERNS = {
"P3Y6M4DT12H30M5S": {"tm_sec": 5, "tm_hour": 12, "tm_mday": 4, "tm_year": 3, "tm_mon": 6, "tm_min": 30},
"P23DT23H": {"tm_sec": 0, "tm_hour": 23, "tm_mday": 23, "tm_year": 0, "tm_mon": 0, "tm_min": 0},
"P4Y": {"tm_sec": 0, "tm_hour": 0, "tm_mday": 0, "tm_year": 4, "tm_mon": 0, "tm_min": 0},
"P1M": {"tm_sec": 0, "tm_hour": 0, "tm_mday": 0, "tm_year": 0, "tm_mon": 1, "tm_min": 0},
"PT1M": {"tm_sec": 0, "tm_hour": 0, "tm_mday": 0, "tm_year": 0, "tm_mon": 0, "tm_min": 1},
"P0.5Y": {"tm_sec": 0, "tm_hour": 0, "tm_mday": 0, "tm_year": 0.5, "tm_mon": 0, "tm_min": 0},
"P0,5Y": {"tm_sec": 0, "tm_hour": 0, "tm_mday": 0, "tm_year": 0.5, "tm_mon": 0, "tm_min": 0},
"PT36H": {"tm_sec": 0, "tm_hour": 36, "tm_mday": 0, "tm_year": 0, "tm_mon": 0, "tm_min": 0},
"P1DT12H": {"tm_sec": 0, "tm_hour": 12, "tm_mday": 1, "tm_year": 0, "tm_mon": 0, "tm_min": 0},
}
def test_parse_duration_n():
for dur, _val in PATTERNS.items():
(sign, d) = parse_duration(dur)
assert d == _val
def test_add_duration_1():
# 2000-01-12T12:13:14Z P1Y3M5DT7H10M3S 2001-04-17T19:23:17Z
t = add_duration(str_to_time("2000-01-12T12:13:14Z"), "P1Y3M5DT7H10M3S")
assert t.tm_year == 2001
assert t.tm_mon == 4
assert t.tm_mday == 17
assert t.tm_hour == 19
assert t.tm_min == 23
assert t.tm_sec == 17
def test_add_duration_2():
# 2000-01-12 PT33H 2000-01-13
t = add_duration(str_to_time("2000-01-12T00:00:00Z"), "PT33H")
assert t.tm_year == 2000
assert t.tm_mon == 1
assert t.tm_mday == 13
assert t.tm_hour == 9
assert t.tm_min == 0
assert t.tm_sec == 0
def test_str_to_time():
t = calendar.timegm(str_to_time("2000-01-12T00:00:00Z"))
# TODO: Find all instances of time.mktime(.....)
# t = time.mktime(str_to_time("2000-01-12T00:00:00Z"))
# assert t == 947631600.0
# TODO: add something to show how this time was arrived at
# do this as an external method in the
assert t == 947635200
# some IdPs omit the trailing Z, and SAML spec is unclear if it is actually required
t = calendar.timegm(str_to_time("2000-01-12T00:00:00"))
assert t == 947635200
def METHOD_NAME():
inst = str_to_time(instant())
now = time.gmtime()
assert now >= inst
def test_valid():
assert valid("2000-01-12T00:00:00Z") == False
current_year = datetime.datetime.today().year
assert valid(f"{int(current_year + 1)}-01-12T00:00:00Z") == True
this_instance = instant()
time.sleep(1)
assert valid(this_instance) is False # unless on a very fast machine :-)
soon = in_a_while(seconds=10)
assert valid(soon) == True
def test_timeout():
soon = in_a_while(seconds=1)
time.sleep(2)
assert valid(soon) == False
def test_before():
current_year = datetime.datetime.today().year
assert before(f"{int(current_year - 1)}-01-01T00:00:00Z") == False
assert before(f"{int(current_year + 1)}-01-01T00:00:00Z") == True
def test_after():
current_year = datetime.datetime.today().year
assert after(f"{int(current_year + 1)}-01-01T00:00:00Z") == False
assert after(f"{int(current_year - 1)}-01-01T00:00:00Z") == True
def test_not_before():
current_year = datetime.datetime.today().year
assert not_before(f"{int(current_year + 1)}-01-01T00:00:00Z") == False
assert not_before(f"{int(current_year - 1)}-01-01T00:00:00Z") == True
def test_not_on_or_after():
current_year = datetime.datetime.today().year
assert not_on_or_after(f"{int(current_year + 1)}-01-01T00:00:00Z") == True
assert not_on_or_after(f"{int(current_year - 1)}-01-01T00:00:00Z") == False
if __name__ == "__main__":
test_str_to_time()
|
1,175 |
test calc rmsd rotational matrix
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:Author: Joshua L. Adelman, University of Pittsburgh
:Contact: [email protected]
Sample code to use the routine for fast RMSD & rotational matrix calculation.
For the example provided below, the minimum least-squares RMSD for the two
7-atom fragments should be 0.719106 A.
And the corresponding 3x3 rotation matrix is:
[[ 0.72216358 -0.52038257 -0.45572112]
[ 0.69118937 0.51700833 0.50493528]
[-0.0271479 -0.67963547 0.73304748]]
"""
import numpy as np
import MDAnalysis.lib.qcprot as qcp
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import MDAnalysis.analysis.rms as rms
import pytest
@pytest.fixture()
def atoms_a():
return np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]], dtype=np.float64)
@pytest.fixture()
def atoms_b():
return np.array([[13,14,15], [16,17,18], [19,20,21], [22,23,24]], dtype=np.float64)
# Calculate rmsd after applying rotation
def rmsd(a, b):
"""Returns RMSD between two coordinate sets a and b."""
return np.sqrt(np.sum(np.power(a - b, 2)) / a.shape[1])
def METHOD_NAME():
# Setup coordinates
frag_a = np.zeros((3, 7), dtype=np.float64)
frag_b = np.zeros((3, 7), dtype=np.float64)
N = 7
frag_a[0][0] = -2.803
frag_a[1][0] = -15.373
frag_a[2][0] = 24.556
frag_a[0][1] = 0.893
frag_a[1][1] = -16.062
frag_a[2][1] = 25.147
frag_a[0][2] = 1.368
frag_a[1][2] = -12.371
frag_a[2][2] = 25.885
frag_a[0][3] = -1.651
frag_a[1][3] = -12.153
frag_a[2][3] = 28.177
frag_a[0][4] = -0.440
frag_a[1][4] = -15.218
frag_a[2][4] = 30.068
frag_a[0][5] = 2.551
frag_a[1][5] = -13.273
frag_a[2][5] = 31.372
frag_a[0][6] = 0.105
frag_a[1][6] = -11.330
frag_a[2][6] = 33.567
frag_b[0][0] = -14.739
frag_b[1][0] = -18.673
frag_b[2][0] = 15.040
frag_b[0][1] = -12.473
frag_b[1][1] = -15.810
frag_b[2][1] = 16.074
frag_b[0][2] = -14.802
frag_b[1][2] = -13.307
frag_b[2][2] = 14.408
frag_b[0][3] = -17.782
frag_b[1][3] = -14.852
frag_b[2][3] = 16.171
frag_b[0][4] = -16.124
frag_b[1][4] = -14.617
frag_b[2][4] = 19.584
frag_b[0][5] = -15.029
frag_b[1][5] = -11.037
frag_b[2][5] = 18.902
frag_b[0][6] = -18.577
frag_b[1][6] = -10.001
frag_b[2][6] = 17.996
# Allocate rotation array
rot = np.zeros((9,), dtype=np.float64)
# Calculate center of geometry
comA = np.sum(frag_a, axis=1) / N
comB = np.sum(frag_b, axis=1) / N
# Center each fragment
frag_a = frag_a - comA.reshape(3, 1)
frag_b = frag_b - comB.reshape(3, 1)
# Calculate rmsd and rotation matrix
qcp_rmsd = qcp.CalcRMSDRotationalMatrix(frag_a.T, frag_b.T, N, rot, None)
#print 'qcp rmsd = ',rmsd
#print 'rotation matrix:'
#print rot.reshape((3,3))
# rotate frag_b to obtain optimal alignment
frag_br = np.dot(frag_b.T, rot.reshape((3, 3)))
aligned_rmsd = rmsd(frag_br.T, frag_a)
#print 'rmsd after applying rotation: ',rmsd
assert_almost_equal(aligned_rmsd, 0.719106, 6, "RMSD between fragments A and B does not match excpected value.")
expected_rot = np.array([
[0.72216358, -0.52038257, -0.45572112],
[0.69118937, 0.51700833, 0.50493528],
[-0.0271479, -0.67963547, 0.73304748]])
assert_almost_equal(rot.reshape((3, 3)), expected_rot, 6,
"Rotation matrix for aliging B to A does not have expected values.")
def test_innerproduct(atoms_a, atoms_b):
a = 2450.0
b = np.array([430, 452, 474, 500, 526, 552, 570, 600, 630])
number_of_atoms = 4
e = np.zeros(9, dtype=np.float64)
g = qcp.InnerProduct(e, atoms_a, atoms_b, number_of_atoms, None)
assert_almost_equal(a, g)
assert_array_almost_equal(b, e)
def test_RMSDmatrix(atoms_a, atoms_b):
number_of_atoms = 4
rotation = np.zeros(9, dtype=np.float64)
rmsd = qcp.CalcRMSDRotationalMatrix(atoms_a, atoms_b, number_of_atoms, rotation, None) # no weights
rmsd_ref = 20.73219522556076
assert_almost_equal(rmsd_ref, rmsd)
rotation_ref = np.array([0.9977195, 0.02926979, 0.06082009, -.0310942, 0.9990878, 0.02926979, -0.05990789, -.0310942, 0.9977195])
assert_array_almost_equal(rotation, rotation_ref, 6)
def test_RMSDmatrix_simple(atoms_a, atoms_b):
number_of_atoms = 4
rotation = np.zeros(9, dtype=np.float64)
rmsd = qcp.CalcRMSDRotationalMatrix(atoms_a, atoms_b, number_of_atoms, rotation, None) # no weights
rmsd_ref = 20.73219522556076
assert_almost_equal(rmsd_ref, rmsd)
rotation_ref = np.array([0.9977195, 0.02926979, 0.06082009, -.0310942, 0.9990878, 0.02926979, -0.05990789, -.0310942, 0.9977195])
assert_array_almost_equal(rotation, rotation_ref, 6)
def test_rmsd(atoms_a, atoms_b):
rotation_m = np.array([[.9977195, .02926979, .06082009], [-.0310942, .9990878, .02926979], [-.05990789, -.0310942, .9977195]])
atoms_b_aligned = np.dot(atoms_b, rotation_m)
rmsd = rms.rmsd(atoms_b_aligned, atoms_a)
rmsd_ref = 20.73219522556076
assert_almost_equal(rmsd, rmsd_ref, 6)
def test_weights(atoms_a, atoms_b):
no_of_atoms = 4
weights = np.array([1,2,3,4], dtype=np.float64)
rotation = np.zeros(9, dtype=np.float64)
rmsd = qcp.CalcRMSDRotationalMatrix(atoms_a, atoms_b, no_of_atoms, rotation, weights)
assert_almost_equal(rmsd, 32.798779202159416)
rotation_ref = np.array([0.99861395, .022982, .04735006, -.02409085, .99944556, .022982, -.04679564, -.02409085, .99861395])
np.testing.assert_almost_equal(rotation_ref, rotation)
|
1,176 |
is in list
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# CatUserBot #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Copyright (C) 2020-2023 by TgCatUB@Github.
# This file is part of: https://github.com/TgCatUB/catuserbot
# and is released under the "GNU v3.0 License Agreement".
# Please see: https://github.com/TgCatUB/catuserbot/blob/master/LICENSE
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import threading
from sqlalchemy import Column, String, UnicodeText, distinct, func
from . import BASE, SESSION
class CatGloballist(BASE):
__tablename__ = "catglobal_list"
keywoard = Column(UnicodeText, primary_key=True)
group_id = Column(String, primary_key=True, nullable=False)
def __init__(self, keywoard, group_id):
self.keywoard = keywoard
self.group_id = str(group_id)
def __repr__(self):
return f"<Cat global values '{self.group_id}' for {self.keywoard}>"
def __eq__(self, other):
return (
isinstance(other, CatGloballist)
and self.keywoard == other.keywoard
and self.group_id == other.group_id
)
CatGloballist.__table__.create(checkfirst=True)
CATGLOBALLIST_INSERTION_LOCK = threading.RLock()
class GLOBALLIST_SQL:
def __init__(self):
self.GLOBALLIST_VALUES = {}
GLOBALLIST_SQL_ = GLOBALLIST_SQL()
def add_to_list(keywoard, group_id):
with CATGLOBALLIST_INSERTION_LOCK:
broadcast_group = CatGloballist(keywoard, str(group_id))
SESSION.merge(broadcast_group)
SESSION.commit()
GLOBALLIST_SQL_.GLOBALLIST_VALUES.setdefault(keywoard, set()).add(str(group_id))
def rm_from_list(keywoard, group_id):
with CATGLOBALLIST_INSERTION_LOCK:
if broadcast_group := SESSION.query(CatGloballist).get(
(keywoard, str(group_id))
):
if str(group_id) in GLOBALLIST_SQL_.GLOBALLIST_VALUES.get(keywoard, set()):
GLOBALLIST_SQL_.GLOBALLIST_VALUES.get(keywoard, set()).remove(
str(group_id)
)
SESSION.delete(broadcast_group)
SESSION.commit()
return True
SESSION.close()
return False
def METHOD_NAME(keywoard, group_id):
with CATGLOBALLIST_INSERTION_LOCK:
broadcast_group = SESSION.query(CatGloballist).get((keywoard, str(group_id)))
return bool(broadcast_group)
def del_keyword_list(keywoard):
with CATGLOBALLIST_INSERTION_LOCK:
broadcast_group = (
SESSION.query(CatGloballist.keywoard)
.filter(CatGloballist.keywoard == keywoard)
.delete()
)
GLOBALLIST_SQL_.GLOBALLIST_VALUES.pop(keywoard)
SESSION.commit()
def get_collection_list(keywoard):
return GLOBALLIST_SQL_.GLOBALLIST_VALUES.get(keywoard, set())
def get_list_keywords():
try:
chats = SESSION.query(CatGloballist.keywoard).distinct().all()
return [i[0] for i in chats]
finally:
SESSION.close()
def num_list():
try:
return SESSION.query(CatGloballist).count()
finally:
SESSION.close()
def num_list_keyword(keywoard):
try:
return (
SESSION.query(CatGloballist.keywoard)
.filter(CatGloballist.keywoard == keywoard)
.count()
)
finally:
SESSION.close()
def num_list_keywords():
try:
return SESSION.query(func.count(distinct(CatGloballist.keywoard))).scalar()
finally:
SESSION.close()
def __load_chat_lists():
try:
chats = SESSION.query(CatGloballist.keywoard).distinct().all()
for (keywoard,) in chats:
GLOBALLIST_SQL_.GLOBALLIST_VALUES[keywoard] = []
all_groups = SESSION.query(CatGloballist).all()
for x in all_groups:
GLOBALLIST_SQL_.GLOBALLIST_VALUES[x.keywoard] += [x.group_id]
GLOBALLIST_SQL_.GLOBALLIST_VALUES = {
x: set(y) for x, y in GLOBALLIST_SQL_.GLOBALLIST_VALUES.items()
}
finally:
SESSION.close()
__load_chat_lists()
|
1,177 |
draw buttons
|
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve import SvCurve
from sverchok.utils.curve.nurbs import SvNurbsCurve
from sverchok.utils.curve.algorithms import concatenate_curves
from sverchok.utils.curve.nurbs_algorithms import concatenate_nurbs_curves
class SvConcatCurvesNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Concatenate Curves
Tooltip: Concatenate several curves into one
"""
bl_idname = 'SvExConcatCurvesNode'
bl_label = 'Concatenate Curves'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_CONCAT_CURVES'
check : BoolProperty(
name = "Check coincidence",
description = "If enabled, then the node will check that the end points of curves being concatenated do actually coincide (within threshold). If they do not, the node will give an error (become red), and the processing will stop",
default = False,
update = updateNode)
max_rho : FloatProperty(
name = "Max. distance",
description = "Maximum distance between end points of the curves, which is allowable to decide that they actually coincide",
min = 0.0,
default = 0.001,
precision = 4,
update = updateNode)
all_nurbs : BoolProperty(
name = "All NURBS",
description = "Convert all input curves to NURBS, and output NURBS - or fail if it is not possible",
default = False,
update = updateNode)
def METHOD_NAME(self, context, layout):
layout.prop(self, 'check')
layout.prop(self, 'all_nurbs')
if self.check or self.all_nurbs:
layout.prop(self, 'max_rho')
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curves")
self.outputs.new('SvCurveSocket', "Curve")
def run_check(self, curves):
for idx, (curve1, curve2) in enumerate(zip(curves, curves[1:])):
_, t_max_1 = curve1.get_u_bounds()
t_min_2, _ = curve2.get_u_bounds()
end1 = curve1.evaluate(t_max_1)
begin2 = curve2.evaluate(t_min_2)
distance = np.linalg.norm(begin2 - end1)
if distance > self.max_rho:
self.error("%s - %s", end1, begin2)
raise Exception("Distance between the end of {}'th curve and the start of {}'th curve is {} - too much".format(idx, idx+1, distance))
def to_nurbs(self, curves):
result = []
for i,c in enumerate(curves):
nurbs = SvNurbsCurve.to_nurbs(c)
if nurbs is None:
raise Exception(f"Curve #{i} - {c} - can not be converted to NURBS!")
result.append(nurbs)
return result
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
curve_s = self.inputs['Curves'].sv_get()
if isinstance(curve_s[0], SvCurve):
curve_s = [curve_s]
curves_out = []
for curves in curve_s:
if self.check:
self.run_check(curves)
if self.all_nurbs:
curves = self.to_nurbs(curves)
if self.all_nurbs:
new_curve = concatenate_nurbs_curves(curves, tolerance=self.max_rho)
else:
new_curve = concatenate_curves(curves)
curves_out.append(new_curve)
self.outputs['Curve'].sv_set(curves_out)
def register():
bpy.utils.register_class(SvConcatCurvesNode)
def unregister():
bpy.utils.unregister_class(SvConcatCurvesNode)
|
1,178 |
test threshold unstructured vtk multiblock output
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
#from vtk import *
import vtk
from Operation.PhactoriVtkDataExportOperation import *
import paraview.simple
import subprocess
import os
class TestPhactoriVtkDataExportOperation(unittest.TestCase):
def test_SimpleImageDataVtkMultiblockOutput(self):
testWavelet = Wavelet()
testWavelet.UpdatePipeline()
#testGroup = GroupDatasets(input=[testWavelet, wavelet2])
testGroup = GroupDatasets(registrationName='GroupDatasets1', Input=testWavelet)
#testGroup = GroupDatasets()
#print(dir(testGroup))
testGroup.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "test_vtkdataexport_simpleimagedata"
operationParams = {
"type":"vtkdataexport",
"input":"testgroup1",
"basedirectory":".",
"basename":"test_vtkdataexport_simpleimagedata"
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'slicewithplane',
PhactoriVtkDataExportOperation,
operationParams)
newOperationBlock.mOperationSpecifics.myCopyOfInputFilter = testGroup
newOperationBlock.mOperationSpecifics.mFilterToWriteDataFrom = testGroup
#newOperationBlock.GetPvFilter().updatePipeline()
newOperationBlock.ExportOperationData(None)
vtmGeneratedFlag = os.path.exists("test_vtkdataexport_simpleimagedata_0000.vtm")
self.assertTrue(vtmGeneratedFlag)
subprocess.run(["rm", "-rf",
"test_vtkdataexport_simpleimagedata_0000.vtm",
"test_vtkdataexport_simpleimagedata_0000"])
def localSetThresholdRange(self, threshOp, lowerValue, upperValue):
global gParaViewCatalystVersionFlag
if gParaViewCatalystVersionFlag < 51000:
threshOp.ThresholdRange = [lowerValue, upperValue]
else:
threshOp.LowerThreshold = lowerValue
threshOp.UpperThreshold = upperValue
threshOp.ThresholdMethod = vtk.vtkThreshold.THRESHOLD_BETWEEN
def METHOD_NAME(self):
testWavelet = Wavelet()
testWavelet.UpdatePipeline()
testThresh1 = Threshold(registrationName='Threshold3', Input=testWavelet)
testThresh1.Scalars = ['POINTS', 'RTData']
self.localSetThresholdRange(testThresh1, 150.0, 300.0)
#testGroup = GroupDatasets(input=[testWavelet, wavelet2])
testGroup = GroupDatasets(registrationName='GroupDatasets4', Input=testThresh1)
#testGroup = GroupDatasets()
#print(dir(testGroup))
testGroup.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "test_vtkdataexport_thresholdunstructured"
operationParams = {
"type":"vtkdataexport",
"input":"testgroup1",
"basedirectory":".",
"basename":"test_vtkdataexport_thresholdunstructured"
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'slicewithplane',
PhactoriVtkDataExportOperation,
operationParams)
newOperationBlock.mOperationSpecifics.myCopyOfInputFilter = testGroup
newOperationBlock.mOperationSpecifics.mFilterToWriteDataFrom = testGroup
#newOperationBlock.GetPvFilter().updatePipeline()
newOperationBlock.ExportOperationData(None)
vtmGeneratedFlag = os.path.exists("test_vtkdataexport_thresholdunstructured_0000.vtm")
self.assertTrue(vtmGeneratedFlag)
subprocess.run(["rm", "-rf",
"test_vtkdataexport_thresholdunstructured_0000.vtm",
"test_vtkdataexport_thresholdunstructured_0000"])
def test_EmptyThresholdVtkMultiblockOutput(self):
testWavelet = Wavelet()
testWavelet.UpdatePipeline()
testThresh1 = Threshold(registrationName='Threshold3', Input=testWavelet)
testThresh1.Scalars = ['POINTS', 'RTData']
self.localSetThresholdRange(testThresh1, 300.0, 400.0)
#testGroup = GroupDatasets(input=[testWavelet, wavelet2])
testGroup = GroupDatasets(registrationName='GroupDatasets3', Input=testThresh1)
#testGroup = GroupDatasets()
#print(dir(testGroup))
testGroup.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "test_vtkdataexport_emptythreshold"
operationParams = {
"type":"vtkdataexport",
"input":"testgroup1",
"basedirectory":".",
"basename":"test_vtkdataexport_emptythreshold"
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'slicewithplane',
PhactoriVtkDataExportOperation,
operationParams)
newOperationBlock.mOperationSpecifics.myCopyOfInputFilter = testGroup
newOperationBlock.mOperationSpecifics.mFilterToWriteDataFrom = testGroup
#newOperationBlock.GetPvFilter().updatePipeline()
newOperationBlock.ExportOperationData(None)
vtmGeneratedFlag = os.path.exists("test_vtkdataexport_emptythreshold_0000.vtm")
self.assertTrue(vtmGeneratedFlag)
subprocess.run(["rm", "-rf",
"test_vtkdataexport_emptythreshold_0000.vtm",
"test_vtkdataexport_emptythreshold_0000"])
if __name__ == '__main__':
cc = Cone()
rr = Show()
unittest.main()
|
1,179 |
get ac status text
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2022 NV Access Limited, Rui Batista
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""
Tracking was introduced so that NVDA has a mechanism to announce changes to the power state.
When NVDA receives a power status change Window Message,
we notify the user of the power status.
The power status can also be reported using script_say_battery_status.
"""
import ctypes
from enum import (
Enum,
IntEnum,
IntFlag,
auto,
unique,
)
from typing import (
List,
Optional,
)
from logHandler import log
import ui
import winKernel
BATTERY_LIFE_TIME_UNKNOWN = 0xffffffff
class PowerBroadcast(IntEnum):
# https://docs.microsoft.com/en-us/windows/win32/power/wm-powerbroadcast
APM_POWER_STATUS_CHANGE = 0xA
"""
Notifies applications of a change in the power status of the computer,
such as a switch from battery power to A/C.
The system also broadcasts this event when remaining battery power
slips below the threshold specified by the user
or if the battery power changes by a specified percentage.
A window receives this event through the WM_POWERBROADCAST message.
https://docs.microsoft.com/en-us/windows/win32/power/pbt-apmpowerstatuschange
"""
APM_RESUME_AUTOMATIC = 0x12
"""
Operation is resuming automatically from a low-power state.
This message is sent every time the system resumes.
"""
APM_RESUME_SUSPEND = 0x7
"""
Operation is resuming from a low-power state.
This message is sent after APM_RESUME_AUTOMATIC if the resume is triggered by user input,
such as pressing a key.
"""
APM_SUSPEND = 0x4
"""
System is suspending operation.
"""
POWER_SETTING_CHANGE = 0x8013
"""
A power setting change event has been received.
"""
class BatteryFlag(IntFlag):
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-system_power_status
HIGH = 0x1
"""More than 66%"""
LOW = 0x2
"""Less than 33%"""
CRITICAL = 0x4
"""Less than 5%"""
NO_SYSTEM_BATTERY = 0x80
UNKNOWN = 0xFF
class PowerState(IntFlag):
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-system_power_status
AC_OFFLINE = 0x0
AC_ONLINE = 0x1
UNKNOWN = 0xFF
class SystemPowerStatus(ctypes.Structure):
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-system_power_status
_fields_ = [
("ACLineStatus", ctypes.c_byte),
("BatteryFlag", ctypes.c_byte),
("BatteryLifePercent", ctypes.c_byte),
("Reserved1", ctypes.c_byte),
("BatteryLifeTime", ctypes.wintypes.DWORD),
("BatteryFullLiveTime", ctypes.wintypes.DWORD)
]
BatteryFlag: BatteryFlag
ACLineStatus: PowerState
BatteryLifePercent: int
BatteryLifeTime: int
_powerState: PowerState = PowerState.UNKNOWN
def initialize():
"""
The NVDA message window only handles changes of state.
As such, to correctly ignore an initial power change event,
which does not change the power state (e.g. a battery level drop),
we fetch the initial power state manually.
"""
global _powerState
systemPowerStatus = SystemPowerStatus()
if (
not winKernel.GetSystemPowerStatus(systemPowerStatus)
or systemPowerStatus.BatteryFlag == BatteryFlag.UNKNOWN
):
log.error("Error retrieving system power status")
return
if systemPowerStatus.BatteryFlag & BatteryFlag.NO_SYSTEM_BATTERY:
return
_powerState = systemPowerStatus.ACLineStatus
return
@unique
class _ReportContext(Enum):
"""
Used to determine the order of information, based on relevance to the user,
when announcing power status information
"""
AC_STATUS_CHANGE = auto()
"""e.g. a charger is connected/disconnected"""
FETCH_STATUS = auto()
"""e.g. when a user presses nvda+shift+b to fetch the current battery status"""
def reportACStateChange() -> None:
_reportPowerStatus(_ReportContext.AC_STATUS_CHANGE)
def reportCurrentBatteryStatus() -> None:
_reportPowerStatus(_ReportContext.FETCH_STATUS)
def _reportPowerStatus(context: _ReportContext) -> None:
"""
@param context: the context is used to order the announcement.
When the context is AC_STATUS_CHANGE, this reports the current AC status first.
When the context is FETCH_STATUS, this reports the remaining battery life first.
"""
global _powerState
systemPowerStatus = _getPowerStatus()
speechSequence = _getSpeechForBatteryStatus(systemPowerStatus, context, _powerState)
if speechSequence:
ui.message(" ".join(speechSequence))
if systemPowerStatus is not None:
_powerState = systemPowerStatus.ACLineStatus
def _getPowerStatus() -> Optional[SystemPowerStatus]:
sps = SystemPowerStatus()
systemPowerStatusUpdateResult = winKernel.GetSystemPowerStatus(sps)
if not systemPowerStatusUpdateResult:
log.error(f"Error retrieving power status: {ctypes.GetLastError()}")
return None
return sps
def _getSpeechForBatteryStatus(
systemPowerStatus: Optional[SystemPowerStatus],
context: _ReportContext,
oldPowerState: PowerState,
) -> List[str]:
if not systemPowerStatus or systemPowerStatus.BatteryFlag == BatteryFlag.UNKNOWN:
# Translators: This is presented when there is an error retrieving the battery status.
return [_("Unknown power status")]
if systemPowerStatus.BatteryFlag & BatteryFlag.NO_SYSTEM_BATTERY:
# Translators: This is presented when there is no battery such as desktop computers
# and laptops with battery pack removed.
return [_("No system battery")]
if (
context == _ReportContext.AC_STATUS_CHANGE
and systemPowerStatus.ACLineStatus == oldPowerState
):
# Sometimes, the power change event double fires.
# The power change event also fires when the battery level decreases by 3%.
return []
text: List[str] = []
if context == _ReportContext.AC_STATUS_CHANGE:
# When the AC status changes, users want to be alerted to the new AC status first.
text.append(METHOD_NAME(systemPowerStatus))
text.extend(_getBatteryInformation(systemPowerStatus))
elif context == _ReportContext.FETCH_STATUS:
# When fetching the current battery status,
# users want to know the current battery status first,
# rather than the AC status which should be unchanged.
text.extend(_getBatteryInformation(systemPowerStatus))
text.append(METHOD_NAME(systemPowerStatus))
else:
raise NotImplementedError(f"Unexpected _ReportContext: {context}")
return text
def METHOD_NAME(systemPowerStatus: SystemPowerStatus) -> str:
# Translators: This is presented to inform the user of the current battery percentage.
if systemPowerStatus.ACLineStatus & PowerState.AC_ONLINE:
# Translators: Reported when the battery is plugged in, and now is charging.
return _("Plugged in")
else:
# Translators: Reported when the battery is no longer plugged in, and now is not charging.
return _("Unplugged")
def _getBatteryInformation(systemPowerStatus: SystemPowerStatus) -> List[str]:
text: List[str] = []
# Translators: This is presented to inform the user of the current battery percentage.
text.append(_("%d percent") % systemPowerStatus.BatteryLifePercent)
SECONDS_PER_HOUR = 3600
SECONDS_PER_MIN = 60
if systemPowerStatus.BatteryLifeTime != BATTERY_LIFE_TIME_UNKNOWN:
# Translators: This is the estimated remaining runtime of the laptop battery.
text.append(_("{hours:d} hours and {minutes:d} minutes remaining").format(
hours=systemPowerStatus.BatteryLifeTime // SECONDS_PER_HOUR,
minutes=(systemPowerStatus.BatteryLifeTime % SECONDS_PER_HOUR) // SECONDS_PER_MIN
))
return text
|
1,180 |
return 2
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras utilities to split v1 and v2 classes."""
import abc
import numpy as np
import tensorflow.compat.v2 as tf
import keras
from keras.engine import base_layer
from keras.engine import base_layer_v1
from keras.engine import training
from keras.engine import training_v1
from keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes
class SplitUtilsTest(test_combinations.TestCase):
def _check_model_class(self, model_class):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertEqual(model_class, training.Model)
else:
self.assertEqual(model_class, training_v1.Model)
def _check_layer_class(self, layer):
if tf.compat.v1.executing_eagerly_outside_functions():
self.assertIsInstance(layer, base_layer.Layer)
self.assertNotIsInstance(layer, base_layer_v1.Layer)
else:
self.assertIsInstance(layer, base_layer_v1.Layer)
def test_functional_model(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
self._check_model_class(model.__class__.__bases__[0])
self._check_layer_class(model)
def test_subclass_model_with_functional_init(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model_with_functional_init_interleaved_v1_functional(
self,
):
with tf.Graph().as_default():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
_ = keras.Model(inputs, outputs)
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(1)])
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model(self):
class MyModel(keras.Model):
def call(self, x):
return 2 * x
model = MyModel()
model_class = model.__class__.__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_layer(self):
class IdentityLayer(base_layer.Layer):
"""A layer that returns it's input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
layer = IdentityLayer()
self._check_layer_class(layer)
def test_multiple_subclass_model(self):
class Model1(keras.Model):
pass
class Model2(Model1):
def call(self, x):
return 2 * x
model = Model2()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_user_provided_metaclass(self):
class AbstractModel(keras.Model, metaclass=abc.ABCMeta):
@abc.abstractmethod
def call(self, inputs):
"""Calls the model."""
class MyModel(AbstractModel):
def call(self, inputs):
return 2 * inputs
with self.assertRaisesRegex(TypeError, "instantiate abstract class"):
AbstractModel()
model = MyModel()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_multiple_inheritance(self):
class Return2:
def METHOD_NAME(self):
return 2
class MyModel(keras.Model, Return2):
def call(self, x):
return self.METHOD_NAME() * x
model = MyModel()
bases = model.__class__.__bases__
self._check_model_class(bases[0])
self.assertEqual(bases[1], Return2)
self.assertEqual(model.METHOD_NAME(), 2)
self._check_layer_class(model)
def test_fit_error(self):
if not tf.compat.v1.executing_eagerly_outside_functions():
# Error only appears on the v2 class.
return
model = keras.Sequential([keras.layers.Dense(1)])
model.compile("sgd", "mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
with tf.compat.v1.get_default_graph().as_default():
with self.assertRaisesRegex(
ValueError, "instance was constructed with eager mode enabled"
):
model.fit(x, y, batch_size=2)
if __name__ == "__main__":
tf.test.main()
|
1,181 |
to dict
|
from typing import List, Optional, Dict, Any, Union, BinaryIO, Literal, get_args, Sequence
import os
import json
import logging
from pathlib import Path
from haystack.preview.utils import request_with_retry
from haystack.preview import component, Document, default_to_dict, default_from_dict
logger = logging.getLogger(__name__)
OPENAI_TIMEOUT = float(os.environ.get("HAYSTACK_OPENAI_TIMEOUT_SEC", 600))
WhisperRemoteModel = Literal["whisper-1"]
@component
class RemoteWhisperTranscriber:
"""
Transcribes audio files using OpenAI's Whisper using OpenAI API. Requires an API key. See the
[OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.
You can get one by signing up for an [OpenAI account](https://beta.openai.com/).
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text)
"""
def __init__(
self,
api_key: str,
model_name: WhisperRemoteModel = "whisper-1",
api_base: str = "https://api.openai.com/v1",
whisper_params: Optional[Dict[str, Any]] = None,
):
"""
Transcribes a list of audio files into a list of Documents.
:param api_key: OpenAI API key.
:param model_name: Name of the model to use. It now accepts only `whisper-1`.
:param api_base: OpenAI base URL, defaults to `"https://api.openai.com/v1"`.
"""
if model_name not in get_args(WhisperRemoteModel):
raise ValueError(
f"Model name not recognized. Choose one among: " f"{', '.join(get_args(WhisperRemoteModel))}."
)
if not api_key:
raise ValueError("API key is None.")
self.model_name = model_name
self.api_key = api_key
self.api_base = api_base
self.whisper_params = whisper_params or {}
def METHOD_NAME(self) -> Dict[str, Any]:
"""
Serialize this component to a dictionary.
"""
return default_to_dict(
self,
model_name=self.model_name,
api_key=self.api_key,
api_base=self.api_base,
whisper_params=self.whisper_params,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RemoteWhisperTranscriber":
"""
Deserialize this component from a dictionary.
"""
return default_from_dict(cls, data)
@component.output_types(documents=List[Document])
def run(self, audio_files: List[Path], whisper_params: Optional[Dict[str, Any]] = None):
"""
Transcribe the audio files into a list of Documents, one for each input file.
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
:param audio_files: a list of paths or binary streams to transcribe
:returns: a list of Documents, one for each file. The content of the document is the transcription text,
while the document's metadata contains all the other values returned by the Whisper model, such as the
alignment data. Another key called `audio_file` contains the path to the audio file used for the
transcription.
"""
if whisper_params is None:
whisper_params = self.whisper_params
documents = self.transcribe(audio_files, **whisper_params)
return {"documents": documents}
def transcribe(self, audio_files: Sequence[Union[str, Path, BinaryIO]], **kwargs) -> List[Document]:
"""
Transcribe the audio files into a list of Documents, one for each input file.
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
:param audio_files: a list of paths or binary streams to transcribe
:returns: a list of transcriptions.
"""
transcriptions = self._raw_transcribe(audio_files=audio_files, **kwargs)
documents = []
for audio, transcript in zip(audio_files, transcriptions):
content = transcript.pop("text")
if not isinstance(audio, (str, Path)):
audio = "<<binary stream>>"
doc = Document(content=content, metadata={"audio_file": audio, **transcript})
documents.append(doc)
return documents
def _raw_transcribe(self, audio_files: Sequence[Union[str, Path, BinaryIO]], **kwargs) -> List[Dict[str, Any]]:
"""
Transcribe the given audio files. Returns a list of strings.
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
:param audio_files: a list of paths or binary streams to transcribe.
:param kwargs: any other parameters that Whisper API can understand.
:returns: a list of transcriptions as they are produced by the Whisper API (JSON).
"""
translate = kwargs.pop("translate", False)
url = f"{self.api_base}/audio/{'translations' if translate else 'transcriptions'}"
data = {"model": self.model_name, **kwargs}
headers = {"Authorization": f"Bearer {self.api_key}"}
transcriptions = []
for audio_file in audio_files:
if isinstance(audio_file, (str, Path)):
audio_file = open(audio_file, "rb")
request_files = ("file", (audio_file.name, audio_file, "application/octet-stream"))
response = request_with_retry(
method="post", url=url, data=data, headers=headers, files=[request_files], timeout=OPENAI_TIMEOUT
)
transcription = json.loads(response.content)
transcriptions.append(transcription)
return transcriptions
|
1,182 |
color
|
from ...shader_base import Nodes
from ..source1_shader_base import Source1ShaderBase
class UnlitGeneric(Source1ShaderBase):
SHADER: str = 'unlittwotexture'
@property
def basetexture(self):
texture_path = self._vmt.get_string('$basetexture', None)
if texture_path is not None:
return self.load_texture_or_default(texture_path, (0.3, 0, 0.3, 1.0))
return None
@property
def texture2(self):
texture_path = self._vmt.get_string('$texture2', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.0, 0.0, 0.0, 1.0))
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def color2(self):
color_value, value_type = self._vmt.get_vector('$color2', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def METHOD_NAME(self):
color_value, value_type = self._vmt.get_vector('$color', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def additive(self):
return self._vmt.get_int('$additive', 0) == 1
def create_nodes(self, material_name):
if super().create_nodes(material_name) in ['UNKNOWN', 'LOADED']:
return
material_output = self.create_node(Nodes.ShaderNodeOutputMaterial)
shader = self.create_node(Nodes.ShaderNodeBsdfPrincipled, self.SHADER)
self.connect_nodes(shader.outputs['BSDF'], material_output.inputs['Surface'])
basetexture = self.basetexture
texture2 = self.texture2
if basetexture:
basetexture_node = self.create_and_connect_texture_node(basetexture, name='$basetexture')
if texture2:
texture2_node = self.create_and_connect_texture_node(texture2, name='$basetexture')
color_mix = self.create_node(Nodes.ShaderNodeMixRGB)
color_mix.blend_type = 'MULTIPLY'
color_mix.inputs['Fac'].default_value = 1.0
self.connect_nodes(basetexture_node.outputs['Color'], color_mix.inputs['Color1'])
self.connect_nodes(texture2_node.outputs['Color'], color_mix.inputs['Color2'])
texture_output = color_mix.outputs['Color']
else:
texture_output = basetexture_node.outputs['Color']
if self.METHOD_NAME or self.color2:
color_mix = self.create_node(Nodes.ShaderNodeMixRGB)
color_mix.blend_type = 'MULTIPLY'
self.connect_nodes(texture_output, color_mix.inputs['Color1'])
color_mix.inputs['Color2'].default_value = (*(self.METHOD_NAME or self.color2), 1.0)
color_mix.inputs['Fac'].default_value = 1.0
self.connect_nodes(color_mix.outputs['Color'], shader.inputs['Base Color'])
else:
self.connect_nodes(texture_output, shader.inputs['Base Color'])
if self.additive:
if self.additive:
basetexture_invert_node = self.create_node(Nodes.ShaderNodeInvert)
basetexture_additive_mix_node = self.create_node(Nodes.ShaderNodeMixRGB)
self.insert_node(texture_output, basetexture_additive_mix_node.inputs['Color1'],
basetexture_additive_mix_node.outputs['Color'])
basetexture_additive_mix_node.inputs['Color2'].default_value = (1.0, 1.0, 1.0, 1.0)
self.connect_nodes(texture_output, basetexture_invert_node.inputs['Color'])
self.connect_nodes(basetexture_invert_node.outputs['Color'], shader.inputs['Transmission'])
self.connect_nodes(basetexture_invert_node.outputs['Color'],
basetexture_additive_mix_node.inputs['Fac'])
|
1,183 |
test dataloader repeats
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import sys
import time
import unittest
import torch
from monai.data import DataLoader, Dataset, ThreadBuffer, ThreadDataLoader
from monai.transforms import Compose, SimulateDelayd
from monai.utils import PerfContext, set_determinism
from tests.utils import assert_allclose
class TestDataLoader(unittest.TestCase):
def setUp(self):
super().setUp()
self.datalist = [
{"image": "spleen_19.nii.gz", "label": "spleen_label_19.nii.gz"},
{"image": "spleen_31.nii.gz", "label": "spleen_label_31.nii.gz"},
]
self.transform = Compose([SimulateDelayd(keys=["image", "label"], delay_time=0.1)])
def test_values(self):
dataset = Dataset(data=self.datalist, transform=self.transform)
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
tbuffer = ThreadBuffer(dataloader)
for d in tbuffer:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
def test_dataloader(self):
dataset = Dataset(data=self.datalist, transform=self.transform)
dataloader = ThreadDataLoader(dataset=dataset, batch_size=2, num_workers=0)
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
for d in dataloader:
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
def test_deterministic(self):
set_determinism(0)
res_1 = list(ThreadDataLoader(torch.arange(5), batch_size=2, buffer_size=2, shuffle=True, num_workers=0))
set_determinism(0)
num_workers = 2 if sys.platform == "linux" else 1
res_2 = list(
ThreadDataLoader(torch.arange(5), batch_size=2, buffer_size=3, shuffle=True, num_workers=num_workers)
)
set_determinism(None)
assert_allclose(torch.cat(res_1), torch.cat(res_2), type_test=False)
def test_time(self):
dataset = Dataset(data=self.datalist * 2, transform=self.transform) # contains data for 2 batches
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
tbuffer = ThreadBuffer(dataloader)
with PerfContext() as pc:
for _ in dataloader:
time.sleep(0.5) # each batch takes 0.8 s to generate on top of this time
unbuffered_time = pc.total_time
with PerfContext() as pc:
for _ in tbuffer:
time.sleep(0.5) # while "computation" is happening the next batch is being generated, saving 0.4 s
buffered_time = pc.total_time
if sys.platform == "darwin": # skip macOS measure
print(f"darwin: Buffered time {buffered_time} vs unbuffered time {unbuffered_time}")
else:
self.assertTrue(
buffered_time < unbuffered_time,
f"Buffered time {buffered_time} should be less than unbuffered time {unbuffered_time}",
)
def METHOD_NAME(self):
dataset = Dataset(data=self.datalist, transform=self.transform)
dataloader = ThreadDataLoader(dataset=dataset, batch_size=2, num_workers=0, repeats=2)
previous_batch = None
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
if previous_batch is None:
previous_batch = d
else:
self.assertTrue(previous_batch is d, "Batch object was not repeated")
previous_batch = None
def test_thread_workers(self):
dataset = Dataset(data=self.datalist, transform=self.transform)
dataloader = ThreadDataLoader(dataset=dataset, batch_size=2, num_workers=2, use_thread_workers=True)
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
if __name__ == "__main__":
unittest.main()
|
1,184 |
write history file
|
import os.path
import sys
from warnings import warn
try:
_console = sys._jy_console
_reader = _console.reader
except AttributeError:
raise ImportError("Cannot access JLine2 setup")
try:
# jarjar-ed version
from org.python.jline.console.history import MemoryHistory
except ImportError:
# dev version from extlibs
from jline.console.history import MemoryHistory
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def parse_and_bind(string):
pass
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded) as f:
_reader.history.load(f)
def METHOD_NAME(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.entries():
f.write(line.value().encode("utf-8"))
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.history.add(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return _reader.history.size()
def get_history_item(index):
# JLine indexes from 0 while readline indexes from 1 (at least in test_readline)
if index>0:
return _reader.history.get(index-1)
else:
return None
def remove_history_item(pos):
_reader.history.remove(pos)
def replace_history_item(pos, line):
_reader.history.set(pos, line)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
_console.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
try:
sys.ps2
have_ps2 = True
except AttributeError:
have_ps2 = False
if (have_ps2 and _reader.prompt == sys.ps2) and (not delimited or delimited.isspace()):
# Insert tab (as expanded to 4 spaces), but only if if
# preceding is whitespace/empty and in console
# continuation; this is a planned featue for Python 3 per
# http://bugs.python.org/issue5845
#
# Ideally this would not expand tabs, in case of mixed
# copy&paste of tab-indented code, however JLine2 gets
# confused as to the cursor position if certain, but not
# all, subsequent editing if the tab is backspaced
candidates.add(" " * 4)
return start
# TODO: if there are a reasonably large number of completions
# (need to get specific numbers), CPython 3.4 will show a
# message like so:
# >>>
# Display all 186 possibilities? (y or n)
# Currently Jython arbitrarily limits this to 100 and displays them
for state in xrange(100):
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompleter(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
|
1,185 |
test equal operator modifying operand
|
import sys
from test import list_tests
from test.support import cpython_only
import pickle
import unittest
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
self.assertEqual(list(x for x in range(10) if x % 2),
[1, 3, 5, 7, 9])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
list(sequence=[])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig):])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_reversed_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = reversed(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-1::-1])
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-2::-1])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), [])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
def METHOD_NAME(self):
# test fix for seg fault reported in bpo-38588 part 2.
class X:
def __eq__(self,other) :
list2.clear()
return NotImplemented
class Y:
def __eq__(self, other):
list1.clear()
return NotImplemented
class Z:
def __eq__(self, other):
list3.clear()
return NotImplemented
list1 = [X()]
list2 = [Y()]
self.assertTrue(list1 == list2)
list3 = [Z()]
list4 = [1]
self.assertFalse(list3 == list4)
@cpython_only
def test_preallocation(self):
iterable = [0] * 10
iter_size = sys.getsizeof(iterable)
self.assertEqual(iter_size, sys.getsizeof(list([0] * 10)))
self.assertEqual(iter_size, sys.getsizeof(list(range(10))))
def test_count_index_remove_crashes(self):
# bpo-38610: The count(), index(), and remove() methods were not
# holding strong references to list elements while calling
# PyObject_RichCompareBool().
class X:
def __eq__(self, other):
lst.clear()
return NotImplemented
lst = [X()]
with self.assertRaises(ValueError):
lst.index(lst)
class L(list):
def __eq__(self, other):
str(other)
return NotImplemented
lst = L([X()])
lst.count(lst)
lst = L([X()])
with self.assertRaises(ValueError):
lst.remove(lst)
# bpo-39453: list.__contains__ was not holding strong references
# to list elements while calling PyObject_RichCompareBool().
lst = [X(), X()]
3 in lst
lst = [X(), X()]
X() in lst
if __name__ == "__main__":
unittest.main()
|
1,186 |
test descendant of
|
import pytest
from sqlalchemy_utils import Ltree
class TestLtree:
def test_init(self):
assert Ltree('path.path') == Ltree(Ltree('path.path'))
def test_constructor_with_wrong_type(self):
with pytest.raises(TypeError) as e:
Ltree(None)
assert str(e.value) == (
"Ltree() argument must be a string or an Ltree, not 'NoneType'"
)
def test_constructor_with_invalid_code(self):
with pytest.raises(ValueError) as e:
Ltree('..')
assert str(e.value) == "'..' is not a valid ltree path."
@pytest.mark.parametrize(
'code',
(
'path',
'path.path',
'1_.2',
'_._',
)
)
def test_validate_with_valid_codes(self, code):
Ltree.validate(code)
@pytest.mark.parametrize(
'path',
(
'',
'.',
'path.',
'path..path',
'path.path..path',
'path.path..',
'path.äö',
)
)
def test_validate_with_invalid_path(self, path):
with pytest.raises(ValueError) as e:
Ltree.validate(path)
assert str(e.value) == (
f"'{path}' is not a valid ltree path."
)
@pytest.mark.parametrize(
('path', 'length'),
(
('path', 1),
('1.1', 2),
('1.2.3', 3)
)
)
def test_length(self, path, length):
assert len(Ltree(path)) == length
@pytest.mark.parametrize(
('path', 'subpath', 'index'),
(
('path.path', 'path', 0),
('1.2.3', '2.3', 1),
('1.2.3.4', '2.3', 1),
('1.2.3.4', '3.4', 2)
)
)
def test_index(self, path, subpath, index):
assert Ltree(path).index(subpath) == index
@pytest.mark.parametrize(
('path', 'item_slice', 'result'),
(
('path.path', 0, 'path'),
('1.1.2.3', slice(1, 3), '1.2'),
('1.1.2.3', slice(1, None), '1.2.3'),
)
)
def test_getitem(self, path, item_slice, result):
assert Ltree(path)[item_slice] == result
@pytest.mark.parametrize(
('path', 'others', 'result'),
(
('1.2.3', ['1.2.3', '1.2'], '1'),
('1.2.3.4.5', ['1.2.3', '1.2.3.4'], '1.2'),
('1.2.3.4.5', ['3.4', '1.2.3.4'], None),
('1.2', ['1.2.3', '1.2.4'], '1')
)
)
def test_lca(self, path, others, result):
assert Ltree(path).lca(*others) == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1.2.3', '4.5', '1.2.3.4.5'),
('1', '1', '1.1'),
)
)
def test_add(self, path, other, result):
assert Ltree(path) + other == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1.2.3', '4.5', '4.5.1.2.3'),
('1', '1', '1.1'),
)
)
def test_radd(self, path, other, result):
assert other + Ltree(path) == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1.2.3', '4.5', '1.2.3.4.5'),
('1', '1', '1.1'),
)
)
def test_iadd(self, path, other, result):
ltree = Ltree(path)
ltree += other
assert ltree == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1.2.3', '2', True),
('1.2.3', '3', True),
('1', '1', True),
('1', '2', False),
)
)
def test_contains(self, path, other, result):
assert (other in Ltree(path)) == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1', '1.2.3', True),
('1.2', '1.2.3', True),
('1.2.3', '1.2.3', True),
('1.2.3', '1', False),
('1.2.3', '1.2', False),
('1', '1', True),
('1', '2', False),
)
)
def test_ancestor_of(self, path, other, result):
assert Ltree(path).ancestor_of(other) == result
@pytest.mark.parametrize(
('path', 'other', 'result'),
(
('1', '1.2.3', False),
('1.2', '1.2.3', False),
('1.2', '1.2.3', False),
('1.2.3', '1', True),
('1.2.3', '1.2', True),
('1.2.3', '1.2.3', True),
('1', '1', True),
('1', '2', False),
)
)
def METHOD_NAME(self, path, other, result):
assert Ltree(path).descendant_of(other) == result
def test_getitem_with_other_than_slice_or_in(self):
with pytest.raises(TypeError):
Ltree('1.2')['something']
def test_index_raises_value_error_if_subpath_not_found(self):
with pytest.raises(ValueError):
Ltree('1.2').index('3')
def test_equality_operator(self):
assert Ltree('path.path') == 'path.path'
assert 'path.path' == Ltree('path.path')
assert Ltree('path.path') == Ltree('path.path')
def test_non_equality_operator(self):
assert Ltree('path.path') != 'path.'
assert not (Ltree('path.path') != 'path.path')
def test_hash(self):
assert hash(Ltree('path')) == hash('path')
def test_repr(self):
assert repr(Ltree('path')) == "Ltree('path')"
def test_str(self):
ltree = Ltree('path.path')
assert str(ltree) == 'path.path'
def test_lt(self):
assert Ltree('1') < Ltree('2')
assert Ltree('1.2.3') < Ltree('1.2.4')
assert Ltree('1.2.3') < Ltree('1.2.3.4')
def test_lte(self):
assert Ltree('1.2.3') <= Ltree('1.2.4')
assert Ltree('1') <= Ltree('1')
def test_gt(self):
assert Ltree('2') > Ltree('1')
assert Ltree('1.2.3') > Ltree('1.2.2')
assert Ltree('1.2.3.4') > Ltree('1.2.3')
def test_gte(self):
assert Ltree('1.2.3') >= Ltree('1.2.2')
assert Ltree('1') >= Ltree('1')
|
1,187 |
test check constraints no warnings given warn
|
# bluemira is an integrated inter-disciplinary design tool for future fusion
# reactors. It incorporates several modules, some of which rely on other
# codes, to carry out a range of typical conceptual fusion reactor design
# activities.
#
# Copyright (C) 2021-2023 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh,
# J. Morris, D. Short
#
# bluemira is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# bluemira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with bluemira; if not, see <https://www.gnu.org/licenses/>.
from typing import List, Tuple
import numpy as np
from bluemira.optimisation import OptimisationProblem
from bluemira.optimisation.typing import ConstraintT
class SimpleOptProblem(OptimisationProblem):
"""
Simple optimisation that expects solution (1/3, 8/27).
Minimises sqrt(x2) subject to constraints:
x2 > 0
x2 >= (2*x1)^3
x2 >= (-x1 + 1)^3
"""
df_call_count: int
"""
Number of times ``df_objective`` is called.
Useful for testing we're actually calling the given gradient and not
an approximation.
"""
def __init__(self) -> None:
self.df_call_count = 0
def objective(self, x: np.ndarray) -> float:
"""Objective to minimise (x - 1)^2 + x + 3."""
return np.sqrt(x[1])
def df_objective(self, x: np.ndarray) -> np.ndarray:
"""Gradient of the objective function."""
self.df_call_count += 1
return np.array([0.0, 0.5 / np.sqrt(x[1])])
def ineq_constraints(self) -> List[ConstraintT]:
return [
{
"f_constraint": self.constraint_1,
"df_constraint": self.df_constraint_1,
"tolerance": np.full(1, 1e-8),
},
{
"f_constraint": self.constraint_2,
"df_constraint": self.df_constraint_2,
"tolerance": np.full(1, 1e-8),
},
]
def bounds(self) -> Tuple[np.ndarray, np.ndarray]:
return np.array([-np.inf, 0]), np.array([np.inf, np.inf])
def constraint_1(self, x: np.ndarray) -> np.ndarray:
return (2 * x[0] + 0) ** 3 - x[1]
def df_constraint_1(self, x: np.ndarray) -> np.ndarray:
return np.array([3 * 2 * (2 * x[0]) * 2 * x[0], -1.0])
def constraint_2(self, x: np.ndarray) -> np.ndarray:
return (-1 * x[0] + 1) ** 3 - x[1]
def df_constraint_2(self, x: np.ndarray) -> np.ndarray:
return np.array([3 * -1 * (-1 * x[0] + 1) * (-1 * x[0] + 1), -1.0])
class OptProblemNoGrad(OptimisationProblem):
def objective(self, x: np.ndarray) -> float:
"""Objective to minimise (x - 1)^2 + x + 3."""
return np.sqrt(x[1])
def ineq_constraints(self) -> List[ConstraintT]:
return [
{
"f_constraint": self.constraint_1,
"tolerance": np.full(1, 1e-8),
},
{
"f_constraint": self.constraint_2,
"tolerance": np.full(1, 1e-8),
},
]
def bounds(self) -> Tuple[np.ndarray, np.ndarray]:
return np.array([-np.inf, 0]), np.array([np.inf, np.inf])
def constraint_1(self, x: np.ndarray) -> np.ndarray:
return (2 * x[0] + 0) ** 3 - x[1]
def constraint_2(self, x: np.ndarray) -> np.ndarray:
return (-1 * x[0] + 1) ** 3 - x[1]
class OptProblemEqConstraint(OptimisationProblem):
"""
Optimisation problem using an equality constraint.
Maximise x*y*z such that:
x^2 + y^2 = 4
x + z = 2
Example adapted from https://youtu.be/-t7EawoZPn8.
"""
def objective(self, x: np.ndarray) -> float:
return -np.prod(x)
def eq_constraints(self) -> List[ConstraintT]:
return [
{
"f_constraint": self.eq_constraint_1,
"df_constraint": self.df_eq_constraint_1,
"tolerance": np.array([1e-8]),
},
# no derivative for this constraint, to test approximation
{"f_constraint": self.eq_constraint_2, "tolerance": np.array([1e-8])},
]
def eq_constraint_1(self, x: np.ndarray) -> np.ndarray:
"""Equality constraint: x^2 + y^2 = 4."""
return x[0] ** 2 + x[1] ** 2 - 4
def df_eq_constraint_1(self, x: np.ndarray) -> np.ndarray:
"""Derivative of equality constraint: x^2 + y^2 = 4."""
return np.array([2 * x[0], 2 * x[1], 0])
def eq_constraint_2(self, x: np.ndarray) -> np.ndarray:
"""Equality constraint: x + z = 2."""
return x[0] + x[2] - 2
class TestOptimisationProblem:
def test_simple_optimisation_returns_correct_result(self):
op = SimpleOptProblem()
conditions = {"xtol_rel": 1e-6, "max_eval": 100}
result = op.optimise(np.array([1, 1]), opt_conditions=conditions)
np.testing.assert_allclose(result.x, [1 / 3, 8 / 27], rtol=1e-4)
assert op.df_call_count > 0
def test_check_constraints_prints_warnings_if_violated(self, caplog):
op = SimpleOptProblem()
constraints_ok = op.check_constraints(np.array([20, 30]))
assert not constraints_ok
messages = "\n".join(caplog.messages)
assert all(m in messages for m in ["constraints", "not", "satisfied"])
def METHOD_NAME(self, caplog):
op = SimpleOptProblem()
constraints_ok = op.check_constraints(np.array([20, 30]), warn=False)
assert not constraints_ok
assert not caplog.messages
def test_check_constraints_no_warnings_given_no_violation(self, caplog):
op = SimpleOptProblem()
constraints_ok = op.check_constraints(np.array([1 / 3, 8 / 27]))
assert constraints_ok
assert not caplog.messages
def test_opt_problem_with_no_gradient_defined(self):
# We should still get a good solution, as we should be
# approximating the gradient automatically.
op = OptProblemNoGrad()
conditions = {"xtol_rel": 1e-6, "max_eval": 100}
result = op.optimise(
np.array([1, 1]), algorithm="SLSQP", opt_conditions=conditions
)
np.testing.assert_allclose(result.x, [1 / 3, 8 / 27], rtol=1e-4)
def test_opt_problem_with_eq_constraint(self):
op = OptProblemEqConstraint()
conditions = {"xtol_rel": 1e-6, "max_eval": 200}
result = op.optimise(
np.array([1, 1, 1]), algorithm="SLSQP", opt_conditions=conditions
)
x = (np.sqrt(13) - 1) / 3
y = np.sqrt(4 - x**2)
z = 2 - x
np.testing.assert_allclose(result.x, [x, y, z])
|
1,188 |
test nonce
|
#!/usr/bin/python3
import pytest
from brownie import compile_source
from brownie.exceptions import VirtualMachineError
from brownie.network.transaction import TransactionReceipt
code = """
pragma solidity ^0.6.0;
contract Foo {
fallback () external payable {}
}
"""
def test_to_string(accounts):
"""Can send to a string"""
tx = accounts[0].transfer("0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E", 10000)
assert tx.receiver == "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E"
def test_to_string_without_checksum(accounts):
to = "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E".lower()
tx = accounts[0].transfer(to, 10000)
assert tx.receiver.lower() == to
def test_to_account(accounts):
"""Can send to an Account object"""
tx = accounts[0].transfer(accounts[1], 10000)
assert str(tx.receiver) == accounts[1].address
def test_to_contract(accounts, tester):
"""Can send to a Contract object"""
tx = accounts[0].transfer(tester, 0, data=tester.signatures["doNothing"])
assert str(tx.receiver) == tester.address
assert tx.gas_used > 21000
def test_to_contract_fallback(accounts, tester):
tx = accounts[0].transfer(tester, "1 ether")
assert str(tx.receiver) == tester.address
assert tx.gas_used > 21000
def test_returns_tx_on_success(accounts):
"""returns a TransactionReceipt on success"""
tx = accounts[0].transfer(accounts[1], 1000)
assert type(tx) == TransactionReceipt
def test_raises_on_revert(accounts, tester):
"""raises on revert"""
with pytest.raises(VirtualMachineError):
accounts[0].transfer(tester, 0)
def test_returns_tx_on_revert_in_console(accounts, tester, console_mode):
"""returns a tx on revert in console"""
tx = accounts[0].transfer(tester, 0)
assert type(tx) == TransactionReceipt
assert tx.status == 0
def test_allow_revert(accounts, tester, config):
with pytest.raises(VirtualMachineError):
accounts[1].transfer(tester, 0)
assert accounts[1].nonce == 1
with pytest.raises(ValueError):
accounts[1].transfer(tester, 0, allow_revert=False)
assert accounts[1].nonce == 1
def METHOD_NAME(accounts):
"""nonces increment properly"""
assert accounts[1].nonce == 0
accounts[1].transfer(accounts[2], 1000)
assert accounts[2].nonce == 0
assert accounts[1].nonce == 1
def test_balance_int(accounts, web3, chain):
"""transfers use the correct balance"""
balance = accounts[0].balance()
assert web3.eth.get_balance(accounts[0].address) == balance
accounts[1].transfer(accounts[0], 1000)
assert accounts[0].balance() == balance + 1000
chain.reset()
assert web3.eth.get_balance(accounts[0].address) == balance
def test_balance_wei(accounts, web3, chain):
"""transfer balances are converted using wei"""
balance = accounts[0].balance()
assert web3.eth.get_balance(accounts[0].address) == balance
accounts[1].transfer(accounts[0], "1 ether")
assert accounts[0].balance() == balance + 1000000000000000000
chain.reset()
assert web3.eth.get_balance(accounts[0].address) == balance
def test_gas_price_manual(accounts):
"""gas price is set correctly when specified in the call"""
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0, gas_price=100)
assert tx.gas_price == 100
assert accounts[0].balance() == balance - (100 * 21000)
@pytest.mark.parametrize("auto", (True, False, None, "auto"))
def test_gas_price_automatic(accounts, config, web3, auto):
"""gas price is set correctly using web3.eth.gas_price"""
config.active_network["settings"]["gas_price"] = auto
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0)
assert tx.gas_price == web3.eth.gas_price
assert accounts[0].balance() == balance - (tx.gas_price * 21000)
def test_gas_price_config(accounts, config):
"""gas price is set correctly from the config"""
config.active_network["settings"]["gas_price"] = 50
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 0)
assert tx.gas_price == 50
assert accounts[0].balance() == balance - (50 * 21000)
def test_gas_price_zero(accounts, config):
config.active_network["settings"]["gas_price"] = 0
balance = accounts[0].balance()
tx = accounts[0].transfer(accounts[1], 1337)
assert tx.gas_price == 0
assert accounts[0].balance() == balance - 1337
def test_gas_limit_manual(accounts):
"""gas limit is set correctly when specified in the call"""
tx = accounts[0].transfer(accounts[1], 1000, gas_limit=100000)
assert tx.gas_limit == 100000
assert tx.gas_used == 21000
def test_gas_buffer_manual(accounts, config):
"""gas limit is set correctly when specified in the call"""
config.active_network["settings"]["gas_limit"] = None
foo = compile_source(code).Foo.deploy({"from": accounts[0]})
tx = accounts[0].transfer(foo, 1000, gas_buffer=1.337)
assert int(tx.gas_used * 1.337) == tx.gas_limit
def test_gas_buffer_send_to_eoa(accounts, config):
"""gas limit is set correctly when specified in the call"""
config.active_network["settings"]["gas_limit"] = None
tx = accounts[0].transfer(accounts[1], 1000, gas_buffer=1.337)
assert tx.gas_limit == 21000
@pytest.mark.parametrize("gas_limit", (True, False, None, "auto"))
@pytest.mark.parametrize("gas_buffer", (1, 1.25))
def test_gas_limit_automatic(accounts, config, gas_limit, gas_buffer):
"""gas limit is set correctly using web3.eth.estimate_gas"""
config.active_network["settings"]["gas_limit"] = gas_limit
config.active_network["settings"]["gas_buffer"] = gas_buffer
foo = compile_source(code).Foo.deploy({"from": accounts[0]})
tx = accounts[0].transfer(foo, 1000)
assert int(tx.gas_used * gas_buffer) == tx.gas_limit
def test_gas_limit_config(accounts, config):
"""gas limit is set correctly from the config"""
config.active_network["settings"]["gas_limit"] = 50000
tx = accounts[0].transfer(accounts[1], 1000)
assert tx.gas_limit == 50000
assert tx.gas_used == 21000
config.active_network["settings"]["gas_limit"] = False
def test_nonce_manual(accounts):
"""returns a Contract instance on successful deployment with the correct nonce"""
assert accounts[0].nonce == 0
tx = accounts[0].transfer(accounts[1], 1000, nonce=0)
assert tx.nonce == 0
assert accounts[0].nonce == 1
tx = accounts[0].transfer(accounts[1], 1000, nonce=1)
assert tx.nonce == 1
# this behaviour changed in ganache7, if the test suite is updated to work
# in hardhat we should still include it
# @pytest.mark.parametrize("nonce", (1, -1, 15))
# def test_raises_on_wrong_nonce(accounts, nonce):
# """raises if invalid manual nonce is provided"""
# assert accounts[0].nonce == 0
# with pytest.raises(ValueError):
# accounts[0].transfer(accounts[1], 1000, nonce=nonce)
def test_data(accounts):
"""transaction data is set correctly"""
tx = accounts[0].transfer(accounts[1], 1000)
assert tx.input == "0x"
tx = accounts[0].transfer(accounts[1], 1000, data="0x1234")
assert tx.input == "0x1234"
def test_localaccount(accounts):
local = accounts.add()
assert local.balance() == 0
accounts[0].transfer(local, "10 ether")
assert local.balance() == "10 ether"
local.transfer(accounts[1], "1 ether")
assert accounts[1].balance() == "1001 ether"
assert local.nonce == 1
def test_deploy_via_transfer(accounts, web3):
bytecode = "0x3660006000376110006000366000732157a7894439191e520825fe9399ab8655e0f7085af41558576110006000f3" # NOQA: E501
tx = accounts[0].transfer(data=bytecode)
assert tx.contract_name == "UnknownContract"
assert web3.eth.get_code(tx.contract_address)
def test_gas_limit_and_buffer(accounts):
with pytest.raises(ValueError):
accounts[0].transfer(accounts[1], 1000, gas_limit=21000, gas_buffer=1.3)
|
1,189 |
saveconfig
|
from Components.config import config, configfile, ConfigSelection, ConfigSubList, ConfigSubsection
from Screens.MessageBox import MessageBox
from Screens.Setup import Setup
from Screens.Standby import TryQuitMainloop, QUIT_RESTART
from enigma import getPeerStreamingBoxes
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
class ClientModeScreen(Setup):
def __init__(self, session):
self.createConfig()
Setup.__init__(self, session=session, setup="clientmode")
def createConfig(self):
peerStreamingBoxes = []
for url in getPeerStreamingBoxes():
parsed = urlparse(url)
peerStreamingBoxes.append(parsed.hostname)
self.peerExist = len(peerStreamingBoxes) != 0
peerDefault = None
self.peer = None
if self.peerExist:
if config.clientmode.serverAddressType.value == "domain" and config.clientmode.serverDomain.value in peerStreamingBoxes:
peerDefault = config.clientmode.serverDomain.value
self.peer = ConfigSelection(default=peerDefault, choices=[(x, x) for x in peerStreamingBoxes])
addressChoices = [("ip", _("IP")), ("domain", _("URL"))]
if self.peerExist:
addressChoices.append(("peer", _("Network peer")))
addressTypeDefault = config.clientmode.serverAddressType.value
if addressTypeDefault != "ip" and (peerDefault or self.peerExist and config.clientmode.serverDomain.value == ""):
addressTypeDefault = "peer"
self.addressType = ConfigSelection(default=addressTypeDefault, choices=addressChoices)
def run(self): # for start wizard
self.METHOD_NAME()
def keySave(self):
if config.clientmode.enabled.value and not self.checkFTPconnection():
mbox = self.session.open(MessageBox, _("Connection using the supplied FTP parameters failed. Please recheck the details and try again."), MessageBox.TYPE_ERROR)
mbox.setTitle(_("FTP connection failure"))
return
if config.clientmode.enabled.isChanged():
restartbox = self.session.openWithCallback(self.restartGUI, MessageBox, _("GUI needs a restart to switch modes\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
else:
self.METHOD_NAME()
self.close()
def METHOD_NAME(self):
nim_config_list = []
remoteAddress = self.getRemoteAddress(update=True)
if config.clientmode.enabled.isChanged() and config.clientmode.enabled.value: # switching to client mode
# save normal mode config so it can be reinsated when returning to normal mode
nim_config_list = []
for x in config.Nims:
nim_config_list.append(x.getSavedValue())
import json
config.clientmode.nim_cache.value = json.dumps(nim_config_list)
config.clientmode.remote_fallback_enabled_cache.value = config.usage.remote_fallback_enabled.value
config.clientmode.remote_fallback_cache.value = config.usage.remote_fallback.value
# normal mode config values saved
if config.clientmode.enabled.value:
config.usage.remote_fallback_enabled.value = True
config.usage.remote_fallback.value = "http://%s:%d" % (remoteAddress, config.clientmode.serverStreamingPort.value)
elif config.clientmode.enabled.isChanged(): # switching back to normal mode
# load nim config from config.clientmode.nimcache
import json
nim_config_list = json.loads(config.clientmode.nim_cache.value)
config.clientmode.nim_cache.value = ""
config.Nims = ConfigSubList()
for x in nim_config_list:
tuner = ConfigSubsection()
tuner.setSavedValue(x)
config.Nims.append(tuner)
config.Nims.save()
# nim config loaded... but needs restart
# reinstate normal mode values
config.usage.remote_fallback_enabled.value = config.clientmode.remote_fallback_enabled_cache.value
config.usage.remote_fallback.value = config.clientmode.remote_fallback_cache.value
# reset some client mode settings
config.clientmode.remote_fallback_enabled_cache.value = False
config.clientmode.remote_fallback_cache.value = ""
config.usage.save()
config.clientmode.save()
configfile.save()
def getRemoteAddress(self, update=False):
if update:
if self.addressType.value == "peer":
config.clientmode.serverDomain.value = self.peer.value
config.clientmode.serverAddressType.value = "domain"
else:
config.clientmode.serverAddressType.value = self.addressType.value
if self.addressType.value == "peer":
return self.peer.value
elif self.addressType.value == "ip":
return '%d.%d.%d.%d' % (config.clientmode.serverIP.value[0], config.clientmode.serverIP.value[1], config.clientmode.serverIP.value[2], config.clientmode.serverIP.value[3])
else:
return config.clientmode.serverDomain.value
def checkFTPconnection(self):
print("[ClientMode][checkFTPconnection] Testing FTP connection...")
try:
from ftplib import FTP
ftp = FTP()
ftp.set_pasv(config.clientmode.passive.value)
ftp.connect(host=self.getRemoteAddress(), port=config.clientmode.serverFTPPort.value, timeout=5)
result = ftp.login(user=config.clientmode.serverFTPusername.value, passwd=config.clientmode.serverFTPpassword.value)
ftp.quit()
if result.startswith("230"):
print("[ClientMode][checkFTPconnection] FTP connection success:", result)
return True
print("[ClientMode][checkFTPconnection] FTP connection failure:", result)
return False
except Exception as err:
print("[ChannelsImporter][checkFTPconnection] Error:", err)
return False
def restartGUI(self, answer):
if answer is True:
self.METHOD_NAME()
self.session.open(TryQuitMainloop, QUIT_RESTART)
|
1,190 |
post action log
|
from datetime import datetime
from typing import TYPE_CHECKING
from discord import Member, User
from discord.utils import format_dt
from .managerbase import BaseManager
if TYPE_CHECKING:
from typing import Optional
from . import OptionalMember
action_messages = {
'warn': ('\N{WARNING SIGN}', 'Warn', 'warned {}'),
'ban': ('\N{NO ENTRY}', 'Ban', 'banned {}'),
'timeban': ('\N{NO ENTRY}', 'Time ban', 'banned {}'),
'silentban': ('\N{NO ENTRY}', 'Silent ban', 'banned {}'),
'softban': ('\N{NO ENTRY}', 'Soft-ban', 'soft-banned {}'),
'unban': ('\N{WARNING SIGN}', 'Unban', 'unbanned {}'),
'kick': ('\N{WOMANS BOOTS}', 'Kick', 'kicked {}'),
'timeout': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Timeout', 'timed out {}'),
'no-timeout': ('\N{SPEAKER}', 'Timeout Removed', 'removed a timeout from {}'),
# specific role changes
'mute': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Mute', 'muted {}'),
'unmute': ('\N{SPEAKER}', 'Unmute', 'unmuted {}'),
'time-mute': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Time Mute', 'muted {}'),
'take-help': ('\N{NO ENTRY SIGN}', 'Help access taken', 'took help access from {}'),
'give-help': ('\N{HEAVY LARGE CIRCLE}', 'Help access restored', 'restored help access for {}'),
'meta-mute': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Meta muted', 'meta muted {}'),
'meta-unmute': ('\N{SPEAKER}', 'Meta unmute', 'meta unmuted {}'),
'appeals-mute': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Appeals muted', 'appeals muted {}'),
'appeals-unmute': ('\N{SPEAKER}', 'Appeals unmute', 'appeals unmuted {}'),
'help-mute': ('\N{SPEAKER WITH CANCELLATION STROKE}', 'Help mute', 'removed speak access in help channels from {}'),
'help-unmute': ('\N{SPEAKER}', 'Help unmute', 'help unmuted {}'),
'give-art': ('\N{HEAVY LARGE CIRCLE}', 'Art access restore', 'restored art access for {}'),
'take-art': ('\N{NO ENTRY SIGN}', 'Art access taken', 'took art access from {}'),
'take-animals': ('\N{NO ENTRY SIGN}', 'Animal access taken', 'took animals access from {}'),
'give-tech': ('\N{HEAVY LARGE CIRCLE}', 'Tech access restore', 'restored tech access for {}'),
'take-tech': ('\N{NO ENTRY SIGN}', 'Tech access taken', 'took tech access from {}'),
'give-elsewhere': ('\N{HEAVY LARGE CIRCLE}', 'Elsewhere access restored', 'restored elsewhere access for {}'),
'take-elsewhere': ('\N{NO ENTRY SIGN}', 'Elsewhere access taken', 'took elsewhere access from {}'),
'no-embed': ('\N{NO ENTRY SIGN}', 'Permission Taken', 'removed embed permissions from {}'),
'embed': ('\N{HEAVY LARGE CIRCLE}', 'Permission Restored', 'restored embed permissions for {}'),
'probate': ('\N{NO ENTRY SIGN}', 'Probated', 'probated {}'),
'unprobate': ('\N{HEAVY LARGE CIRCLE}', 'Un-probated', 'un-probated {}'),
'tempstream': ('\N{HEAVY LARGE CIRCLE}', 'Permission Granted', 'granted streaming permissions to {}'),
'no-tempstream': ('\N{NO ENTRY SIGN}', 'Permission Revoked', 'revoked streaming permissions from {}'),
'take-memes': ('\N{NO ENTRY SIGN}', 'Permission Revoked', 'revoked meme permissions from {}'),
'nou': ('\N{NO ENTRY SIGN}', 'Sent to the void', 'sent {} to the void'),
'unnou': ('\N{HEAVY LARGE CIRCLE}', 'Retrieved from the void', 'retrieved {} from the void'),
# non-specific role changes
'watch': ('\N{EYES}', 'Watch', 'put {} on watch.'),
'unwatch': ('\N{CROSS MARK}', 'Unwatch', 'removed {} from watch.'),
'add-perm-role': ('\N{BLACK QUESTION MARK ORNAMENT}', 'Add role', 'added a permanent role to {}'),
'add-temp-role': ('\N{BLACK QUESTION MARK ORNAMENT}', 'Add role', 'added a temporary role to {}'),
'remove-role': ('\N{BLACK QUESTION MARK ORNAMENT}', 'Remove role', 'removed a role from {}'),
}
class UserLogManager(BaseManager):
"""Manages posting logs."""
async def METHOD_NAME(self, author: 'Member | User | OptionalMember',
target: 'Member | User | OptionalMember', kind: str, *, reason: 'Optional[str]' = None,
until: 'Optional[datetime]' = None):
member = target if isinstance(target, (Member, User)) else target.member
emoji, action, action_description = action_messages[kind]
target_str = f'<@!{target.id}>'
if member:
target_str += ' | ' + str(member)
msg = [f'{emoji} **{action}**: <@!{author.id}> {action_description.format(target_str)}']
if until:
now = datetime.now(self.bot.tz)
msg[0] += f' for {until - now}, until {format_dt(until)}'
msg.append(f'🏷 __User ID__: {target.id}')
if reason:
msg.append(f'\N{PENCIL} __Reason__: {reason}')
else:
msg.append('\N{PENCIL} __Reason__: No reason provided')
msg_final = '\n'.join(msg)
await self.bot.channels['mod-logs'].send(msg_final)
if 'ban' in kind or 'kick' in kind:
await self.bot.channels['server-logs'].send(msg_final)
|
1,191 |
validate premiumsku capacity
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=unused-variable
import re
from datetime import timedelta
from isodate import parse_duration
from knack.util import CLIError
# PARAMETER VALIDATORS
# Type ISO 8061 duration
iso8601pattern = re.compile("^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+.)?(\\d+S)?)?$")
timedeltapattern = re.compile("^\\d+:\\d+:\\d+$")
def _validate_lock_duration(namespace):
if namespace.lock_duration:
if iso8601pattern.match(namespace.lock_duration):
if parse_duration(namespace.lock_duration) > timedelta(days=0, minutes=6, seconds=0):
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
elif timedeltapattern.match(namespace.lock_duration):
day, miniute, seconds = namespace.lock_duration.split(":")
if int(day) > 0 or int(miniute) > 6:
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
else:
raise CLIError('--lock-duration Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g.'
' PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.lock_duration))
def _validate_default_message_time_to_live(namespace):
if namespace.default_message_time_to_live:
if not iso8601pattern.match(namespace.default_message_time_to_live) and not timedeltapattern.match(namespace.default_message_time_to_live):
raise CLIError('--default-message-time-to-live Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.default_message_time_to_live))
def _validate_duplicate_detection_history_time_window(namespace):
if namespace.duplicate_detection_history_time_window:
if iso8601pattern.match(namespace.duplicate_detection_history_time_window):
pass
elif timedeltapattern.match(namespace.duplicate_detection_history_time_window):
pass
else:
raise CLIError('--duplicate-detection-history-time-window Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.duplicate_detection_history_time_window))
def _validate_auto_delete_on_idle(namespace):
if namespace.auto_delete_on_idle:
if iso8601pattern.match(namespace.auto_delete_on_idle):
pass
elif timedeltapattern.match(namespace.auto_delete_on_idle):
pass
else:
raise CLIError('--auto-delete-on-idle Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.auto_delete_on_idle))
def validate_partner_namespace(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.partner_namespace:
if not is_valid_resource_id(namespace.partner_namespace):
namespace.partner_namespace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ServiceBus',
type='namespaces',
name=namespace.partner_namespace)
def validate_target_namespace(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.target_namespace:
if not is_valid_resource_id(namespace.target_namespace):
namespace.target_namespace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ServiceBus',
type='namespaces',
name=namespace.target_namespace)
def METHOD_NAME(namespace):
if namespace.sku and namespace.sku != 'Premium' and namespace.capacity:
raise CLIError('--capacity - This property is only applicable to namespaces of Premium SKU')
# Validates if a subnet id or name have been given by the user. If subnet id is given, vnet-name should not be provided.
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
pass
elif subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
delattr(namespace, 'vnet_name')
def validate_rights(namespace):
if 'Manage' in namespace.rights:
if 'Listen' not in namespace.rights or 'Send' not in namespace.rights:
raise CLIError('Error : Assigning \'Manage\' to --rights requires \'Listen\' and \'Send\' to be included with. e.g. --rights Manage Send Listen')
def validate_private_endpoint_connection_id(namespace):
from azure.cli.core.azclierror import RequiredArgumentMissingError
if namespace.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(namespace.connection_id)
namespace.resource_group_name = result['resource_group']
namespace.namespace_name = result['name']
namespace.private_endpoint_connection_name = result.get('child_name_1')
# if namespace.account_name and not namespace.resource_group_name:
# namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
if not all([namespace.namespace_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]):
raise RequiredArgumentMissingError("Please provide either `--Id` or `-g` value `--namespace-name` vaule `--name` value")
del namespace.connection_id
|
1,192 |
finish appoint
|
from datetime import datetime, timedelta
from Appointment.config import appointment_config as CONFIG
from Appointment.models import Appoint
from Appointment.appoint.judge import appoint_violate
from Appointment.utils.log import logger, get_user_logger
from Appointment.extern.wechat import MessageType, notify_appoint
def _adjusted_rate(original_rate: float, appoint: Appoint) -> float:
'''获取用于调整不同情况下的合格率要求'''
rate = original_rate
if appoint.Room.Rid in {'B109A', 'B207'}: # 公共区域
return 0
elif appoint.Room.Rid.startswith('R'): # 俄文楼
return 0
elif appoint.Room.Rid == 'B214': # 暂时无法识别躺姿
rate -= 0.15 # 建议在0.1-0.2之间 前者最严 后者最宽松
elif appoint.Room.Rid == 'B107B': # 无法监控摄像头正下方
rate -= 0.05 # 建议在0-0.1之间 因为主要是识别出的人数问题
elif appoint.Room.Rid == 'B217':
if appoint.Astart.hour >= 20 : # 电影关灯导致识别不准确
rate -= 0.05 # 建议在0-0.1之间 因为主要是识别出的人数问题
MIN31 = timedelta(minutes=31)
if appoint.Atype == Appoint.Type.TEMPORARY: # 临时预约不检查摄像头
return 0
if appoint.Atype == Appoint.Type.LONGTERM: # 长期预约不检查摄像头
return 0
if appoint.Afinish - appoint.Astart < MIN31: # 短预约早退晚到影响更大
rate -= 0.01 # 建议在0-0.1之间 基本取消了
if appoint.Areason == Appoint.Reason.R_LATE: # 迟到需要额外保证使用率
rate += 0.05 # 建议在0.2-0.4之间 极端可考虑0.5 目前仅测试
return rate
def start_appoint(appoint_id: int):
'''预约开始,切换状态'''
try:
appoint = Appoint.objects.get(Aid=appoint_id)
except:
return logger.exception(f"预约{appoint_id}意外消失")
if appoint.Astatus == Appoint.Status.APPOINTED: # 顺利开始
appoint.Astatus = Appoint.Status.PROCESSING
appoint.save()
logger.info(f"预约{appoint_id}成功开始: 状态变为进行中")
elif appoint.Astatus == Appoint.Status.PROCESSING: # 已经开始
logger.info(f"预约{appoint_id}在检查时已经开始")
elif appoint.Astatus != Appoint.Status.CANCELED: # 状态异常,本该不存在这个任务
logger.error(f"预约{appoint_id}的状态异常: {appoint.get_status()}")
def _teminate_handler(appoint: Appoint):
if appoint.Astatus == Appoint.Status.CONFIRMED: # 可能已经判定通过,如公共区域和俄文楼
rid = appoint.Room.Rid
if rid[:1] != 'R' and rid not in {'B109A', 'B207'}:
logger.warning(f"预约{appoint.pk}提前合格: {rid}房间")
elif appoint.Astatus != Appoint.Status.CANCELED: # 状态异常,多半是已经判定过了
logger.warning(f"预约{appoint.pk}提前终止: {appoint.get_status()}")
def METHOD_NAME(appoint_id: int):
'''
结束预约
- 接受单个预约id
- 可以处理任何状态的预约
- 对于非终止状态,判断人数是否合格,并转化为终止状态
要注意的是,由于定时任务可能执行多次,第二次的时候可能已经终止
'''
try:
appoint: Appoint = Appoint.objects.get(Aid=appoint_id)
except:
return logger.exception(f"预约{appoint_id}意外消失")
# 如果处于非终止状态,只需检查人数判断是否合格
if appoint.Astatus in Appoint.Status.Terminals():
return _teminate_handler(appoint)
# 希望接受的非终止状态只有进行中,但其他状态也同样判定是否合格
if appoint.Astatus != Appoint.Status.PROCESSING:
get_user_logger(appoint).error(
f"预约{appoint_id}结束时状态为{appoint.get_status()}:照常检查是否合格")
# 摄像头出现超时问题,直接通过
if datetime.now() - appoint.Room.Rlatest_time > timedelta(minutes=15):
appoint.Astatus = Appoint.Status.CONFIRMED # waiting
appoint.save()
get_user_logger(appoint).info(f"预约{appoint_id}的状态已确认: 顺利完成")
return
# 检查人数是否足够
adjusted_rate = _adjusted_rate(CONFIG.camera_qualify_rate, appoint)
need_num = appoint.Acamera_check_num * adjusted_rate - 0.01
check_failed = appoint.Acamera_ok_num < need_num
if check_failed:
# 迟到的预约通知在这里处理。如果迟到不扣分,删掉这个if的内容即可
# 让下面那个camera check的if判断是否违规。
if appoint.Areason == Appoint.Reason.R_LATE:
reason = Appoint.Reason.R_LATE
else:
reason = Appoint.Reason.R_TOOLITTLE
if appoint_violate(appoint, reason):
appoint.refresh_from_db()
notify_appoint(appoint, MessageType.VIOLATED, appoint.get_status(),
students_id=[appoint.get_major_id()])
else: # 通过
appoint.Astatus = Appoint.Status.CONFIRMED
appoint.save()
logger.info(f"预约{appoint_id}人数合格,已通过")
|
1,193 |
main
|
#!/usr/bin/env python
"""
This is a convenience script for the conversion of MTpy-style time series data files to miniSeed.
It needs the location of a folder with TS data. The folder structure is mimicked and the TS converted into miniSeed. The folder structure is then put into the destination directory.
The latter can be given as an argument, if not, a local directory 'miniSeed' will be generated in the current directory
1 mandatory argument:
- path to TS data files
3 optional arguments (in this order):
- name of the output directory
- 'location' code (2 characters)
- 'network' code (2 characters)
"""
import numpy as np
import re
import sys
import os
import glob
import os.path as op
import glob
import fnmatch
import mtpy.utils.exceptions as MTex
import mtpy.utils.mseed as MTms
import mtpy.utils.filehandling as MTfh
#reload(MTfh)
#reload(MTex)
#reload(MTms)
def METHOD_NAME():
if len(sys.argv) < 2:
sys.exit(
'\n\tNeed at least 1 argument: <path to files> [<output dir>] [<network code>] [<location code>]\n')
outdir = None
location = ''
network = ''
if len(sys.argv) > 2:
outdir = sys.argv[2]
if len(sys.argv) > 3:
network = sys.argv[3]
if len(sys.argv) > 4:
location = sys.argv[4]
pathname_raw = sys.argv[1]
# we need relative paths here!!!
indir = pathname_raw # op.abspath(op.realpath(pathname_raw))
if not op.isdir(indir):
raise MTex.MTpyError_inputarguments(
'Data file(s) path not existing: {0}'.format(indir))
# define output directory for storing miniSeed files
#outpath = op.join(os.curdir,'miniSeed')
if outdir is not None:
try:
outpath = op.abspath(op.join(os.curdir, outdir))
if not op.exists(outpath):
try:
os.makedirs(outpath)
except:
raise
if not os.access(outpath, os.W_OK):
raise
except:
print('Cannot generate writable output directory {0} - using generic location "miniSeed" instead'.format(outpath))
outdir = None
if outdir is None:
outpath = op.join(os.curdir, 'miniSeed')
try:
if not op.exists(outpath):
try:
os.makedirs(outpath)
except:
raise
if not os.access(outpath, os.W_OK):
raise
except:
sys.exit(
'Error ! - Cannot generate writable output directory "miniSeed" - abort...')
outdir = op.abspath(outpath)
lo_dirs = []
for i, j, k in os.walk(indir):
lofolders = [op.join(i, f) for f in j]
lo_dirs.extend(lofolders)
lo_dirs.append(indir)
pathname = list(set(lo_dirs))
if len(pathname) == 0:
pathname = [indir]
lo_indirs = pathname
lo_outdirs = []
#'pathname' is a list of relative pathnames. to be reconstructed under the given 'outdir'
try:
for i in lo_indirs:
outpath = op.abspath(op.join(outdir, i))
if not op.isdir(outpath):
os.makedirs(outpath)
lo_outdirs.append(outpath)
except:
raise MTex.MTpyError_inputarguments(
'ERROR - Cannot set up output directory {0}'.format(outpath))
for idx_ipath, inpath in enumerate(lo_indirs):
lo_infiles = [
i for i in os.listdir(inpath) if op.isfile(
op.abspath(
op.join(
inpath,
i)))]
lo_outfiles = [
op.abspath(
op.join(
lo_outdirs[idx_ipath],
i)) for i in lo_infiles]
lo_infiles = [op.abspath(op.join(inpath, i)) for i in lo_infiles]
for idx_fn, fn in enumerate(lo_infiles):
if MTfh.validate_ts_file(fn) is False:
print('Warning - MT ts data file {0} is not valid (check header)!!!'.format(fn))
# continue
print('reading file {0}'.format(fn))
outfn = MTms.convertfile_ts2miniseed(
fn, lo_outfiles[idx_fn], location=location, network=network)
print('wrote file {0}'.format(outfn))
if __name__ == '__main__':
METHOD_NAME()
|
1,194 |
test on modified state
|
import pytest
from numpy.testing import assert_allclose
from iminuit import Minuit
from iminuit.testing import rosenbrock, rosenbrock_grad
import numpy as np
scopt = pytest.importorskip("scipy.optimize")
def fcn(a, b):
return a**2 + ((b - 1) / 2.0) ** 2 + 3
def grad(a, b):
return 2 * a, b - 1
def hess(a, b):
return [[2, 0], [0, 0.5]]
def hessp(a, b, v):
return np.dot(hess(a, b), v)
@pytest.mark.parametrize("array_call", (False, True))
@pytest.mark.parametrize("fixed", (False, True))
@pytest.mark.parametrize(
"method",
(
"Nelder-Mead",
"Powell",
"CG",
"BFGS",
"Newton-CG",
"L-BFGS-B",
"TNC",
"COBYLA",
"SLSQP",
"trust-constr",
"dogleg",
"trust-ncg",
"trust-exact",
"trust-krylov",
),
)
def test_scipy_method(array_call, fixed, method):
fn = (lambda par: fcn(*par)) if array_call else fcn
gr = None
he = None
hep = None
if method in (
"Newton-CG",
"trust-constr",
"dogleg",
"trust-ncg",
"trust-exact",
"trust-krylov",
):
gr = (lambda par: grad(*par)) if array_call else grad
if method in ("Newton-CG", "dogleg", "trust-ncg", "trust-exact", "trust-krylov"):
he = (lambda par: hess(*par)) if array_call else hess
if method in ("Newton-CG", "trust-ncg", "trust-krylov", "trust-constr"):
hep = (lambda par, v: hessp(*par, v)) if array_call else hessp
if array_call:
m = Minuit(fn, (1, 2), grad=gr)
else:
m = Minuit(fn, a=1, b=2, grad=gr)
m.fixed[0] = fixed
m.scipy(method=method, hess=he)
assert m.valid
if fixed:
assert_allclose(m.values, [1, 1], atol=1e-3)
assert_allclose(m.errors[1], 2, rtol=1e-2)
else:
assert_allclose(m.values, [0, 1], atol=1e-3)
assert_allclose(m.errors, [1, 2], rtol=1e-2)
if hep:
m.scipy(method=method, hessp=hep)
assert m.valid
if fixed:
assert_allclose(m.values, [1, 1], atol=1e-3)
assert_allclose(m.errors[1], 2, rtol=1e-2)
else:
assert_allclose(m.values, [0, 1], atol=1e-3)
assert_allclose(m.errors, [1, 2], rtol=1e-2)
@pytest.mark.parametrize("stra", (0, 1))
@pytest.mark.parametrize("grad", (None, grad))
def test_scipy_unbounded(stra, grad):
m = Minuit(fcn, a=1, b=2, grad=grad)
m.strategy = stra
m.scipy()
assert m.valid
assert m.accurate == (stra == 1)
assert_allclose(m.values, [0, 1], atol=1e-3)
if stra == 1:
assert_allclose(m.errors, [1, 2], atol=3e-2)
if grad:
assert m.fmin.ngrad > 0
else:
assert m.fmin.ngrad == 0
@pytest.mark.parametrize("stra", (0, 1))
@pytest.mark.parametrize("grad", (None, grad))
@pytest.mark.parametrize(
"lower,upper",
(
(-0.1, None),
(0, None),
(0.1, None),
(None, -0.1),
(None, 0),
(None, 0.1),
(-0.1, 0.1),
),
)
def test_scipy_bounded(stra, grad, lower, upper):
m = Minuit(fcn, a=1, b=2, grad=grad)
m.limits["a"] = (lower, upper)
m.strategy = stra
m.scipy()
if stra == 1:
assert m.valid
assert m.accurate
lower = -np.inf if lower is None else lower
upper = np.inf if upper is None else upper
assert_allclose(m.values, [np.clip(0, lower, upper), 1], atol=1e-3)
if stra == 1:
assert_allclose(m.errors[1], 2, atol=3e-2)
if grad:
assert m.fmin.ngrad > 0
else:
assert m.fmin.ngrad == 0
@pytest.mark.parametrize("grad", (None, grad))
def test_scipy_fixed(grad):
m = Minuit(fcn, a=1, b=2, grad=grad)
m.fixed["a"] = True
m.scipy()
assert m.valid
assert_allclose(m.values, [1, 1], atol=1e-3)
assert_allclose(m.errors, [0.01, 2], atol=3e-2)
if grad:
assert m.fmin.ngrad > 0
else:
assert m.fmin.ngrad == 0
@pytest.mark.parametrize("stra", (0, 1))
@pytest.mark.parametrize("grad", (None, grad))
def test_scipy_errordef(stra, grad):
m = Minuit(fcn, a=1, b=2, grad=grad)
m.errordef = 4
m.strategy = stra
m.scipy()
assert m.valid
assert_allclose(m.values, [0, 1], atol=1e-3)
assert_allclose(m.errors, [2, 4], rtol=0.3)
if grad:
assert m.fmin.ngrad > 0
else:
assert m.fmin.ngrad == 0
@pytest.mark.parametrize("stra", (0, 1))
@pytest.mark.parametrize("grad", (None, rosenbrock_grad))
def test_scipy_ncall(stra, grad):
m = Minuit(rosenbrock, x=2, y=2, grad=grad)
m.strategy = stra
m.scipy()
assert m.valid
nfcn = m.fmin.nfcn
m.reset()
m.scipy(ncall=1)
assert m.fmin.nfcn < nfcn
assert not m.valid
@pytest.mark.parametrize("lb", (0, 0.1))
@pytest.mark.parametrize("fixed", (False, True))
def test_scipy_constraints_1(lb, fixed):
def fcn(a, x, y):
return a + x**2 + y**2
m = Minuit(fcn, a=3, x=1, y=2)
m.fixed["a"] = fixed
con_a = scopt.NonlinearConstraint(lambda a, x, y: a, lb, np.inf)
con_x = scopt.NonlinearConstraint(lambda a, x, y: x, lb, np.inf)
con_y = scopt.NonlinearConstraint(lambda a, x, y: y, lb, np.inf)
m.scipy(constraints=[con_a, con_x, con_y])
assert m.valid == (lb == 0 and fixed)
if fixed:
assert_allclose(m.values, [3, lb, lb], atol=1e-3)
else:
assert_allclose(m.values, [lb, lb, lb], atol=1e-3)
assert m.accurate
@pytest.mark.parametrize("fixed", (False, True))
def test_scipy_constraints_2(fixed):
def fcn(x, y):
return x**2 + y**2
m = Minuit(fcn, x=1, y=2)
m.fixed["x"] = fixed
con = scopt.LinearConstraint([1, 1], 0.1, np.inf)
m.scipy(method="COBYLA", constraints=con)
assert m.valid == fixed
if fixed:
assert_allclose(m.values, [1, 0.0], atol=1e-3)
assert_allclose(m.errors[1], 1, atol=1e-3)
else:
assert_allclose(m.values, [0.05, 0.05], atol=1e-3)
assert_allclose(m.errors, [1, 1], atol=1e-3)
def test_bad_constraint():
m = Minuit(fcn, a=1, b=2)
with pytest.raises(ValueError):
m.scipy(constraints={})
with pytest.raises(ValueError):
m.scipy(constraints=[{}])
def test_high_print_level(capsys):
m = Minuit(fcn, a=1, b=2)
m.scipy()
assert capsys.readouterr()[0] == ""
m.reset()
m.print_level = 1
m.scipy()
m.print_level = 0
assert capsys.readouterr()[0] != ""
def METHOD_NAME():
m = Minuit(fcn, a=1, b=2)
m.scipy()
assert m.valid
assert_allclose(m.values, [0, 1], atol=1e-3)
m.fixed[1] = True # modify latest state
m.values = 1, 2
m.scipy() # used to fail
assert m.valid
assert_allclose(m.values, [0, 2], atol=1e-3)
|
1,195 |
process notification data
|
# Copyright 2016-2017 Tecnativa - Sergio Teruel
# Copyright 2019 Ignacio Ibeas <[email protected]>
# Copyright 2020 Tecnativa - João Marques
# Copyright 2023 Planesnet - Luis Planes, Laia Espinosa, Raul Solana
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import base64
import json
import logging
import urllib
from odoo import _, api, fields, models
from odoo.tools import config
from odoo.addons.payment.models.payment_provider import ValidationError
_logger = logging.getLogger(__name__)
class TxRedsys(models.Model):
_inherit = "payment.transaction"
redsys_txnid = fields.Char("Transaction ID")
def merchant_params_json2dict(self, data):
parameters = data.get("Ds_MerchantParameters", "")
return json.loads(base64.b64decode(parameters).decode())
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def _get_tx_from_notification_data(self, provider, data):
"""Given a data dict coming from redsys, verify it and
find the related transaction record."""
tx = super()._get_tx_from_notification_data(provider, data)
if provider != "redsys":
return tx
parameters = data.get("Ds_MerchantParameters", "")
parameters_dic = json.loads(base64.b64decode(parameters).decode())
reference = urllib.parse.unquote(parameters_dic.get("Ds_Order", ""))
pay_id = parameters_dic.get("Ds_AuthorisationCode")
shasign = data.get("Ds_Signature", "").replace("_", "/").replace("-", "+")
test_env = config["test_enable"]
if not reference or not pay_id or not shasign:
error_msg = (
"Redsys: received data with missing reference"
" (%s) or pay_id (%s) or shashign (%s)" % (reference, pay_id, shasign)
)
if not test_env:
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.search([("reference", "=", reference)])
if not tx or len(tx) > 1:
error_msg = "Redsys: received data for reference %s" % (reference)
if not tx:
error_msg += "; no order found"
else:
error_msg += "; multiple order found"
_logger.info(error_msg)
raise ValidationError(error_msg)
if tx and not test_env:
# verify shasign
shasign_check = tx.provider_id.sign_parameters(
tx.provider_id.redsys_secret_key, parameters
)
if shasign_check != shasign:
error_msg = (
"Redsys: invalid shasign, received %s, computed %s, "
"for data %s" % (shasign, shasign_check, data)
)
_logger.info(error_msg)
raise ValidationError(error_msg)
return tx
@api.model
def _get_redsys_state(self, status_code):
if 0 <= status_code <= 100:
return "done"
elif status_code <= 203:
return "pending"
elif 912 <= status_code <= 9912:
return "cancel"
else:
return "error"
def METHOD_NAME(self, data):
super().METHOD_NAME(data)
if self.provider_code != "redsys":
return
params = self.merchant_params_json2dict(data)
status_code = int(params.get("Ds_Response", "29999"))
state = self._get_redsys_state(status_code)
vals = {
"state": state,
"redsys_txnid": params.get("Ds_AuthorisationCode"),
"create_date": fields.Datetime.now(),
}
state_message = ""
feedback_error = False
if state == "done":
vals["state_message"] = _("Ok: %s") % params.get("Ds_Response")
self._set_done()
self._finalize_post_processing()
elif state == "pending": # 'Payment error: code: %s.'
state_message = _("Error: %(status_code)s (%(error_code)s)")
self._set_pending()
elif state == "cancel": # 'Payment error: bank unavailable.'
state_message = _("Bank Error: %(status_code)s (%(error_code)s)")
self._set_canceled()
else:
state_message = _(
"Redsys: feedback error: %(status_code)s (%(error_code)s)"
)
self._set_error(state_message)
feedback_error = True
if state_message:
vals["state_message"] = state_message % {
"status_code": params.get("Ds_Response"),
"error_code": params.get("Ds_ErrorCode"),
}
if state == "error":
_logger.warning(vals["state_message"])
if feedback_error:
self._set_error(state_message)
self.write(vals)
def _get_specific_rendering_values(self, processing_values):
res = super()._get_specific_rendering_values(processing_values)
if self.provider_code != "redsys":
return res
redsys_values = dict(processing_values)
merchant_parameters = self.provider_id._prepare_merchant_parameters(
processing_values
)
redsys_values.update(
{
"api_url": self.provider_id._redsys_get_api_url(),
"Ds_SignatureVersion": str(self.provider_id.redsys_signature_version),
"Ds_MerchantParameters": merchant_parameters,
"Ds_Signature": self.provider_id.sign_parameters(
self.provider_id.redsys_secret_key, merchant_parameters
),
}
)
return redsys_values
|
1,196 |
draw
|
#!/usr/bin/python
'''
This is a pseudo-server that sends predefined pattern to any connected client.
It is used to test transport behaviour and throughput.
If you want to use it with a sketch, connect your PC and Blynk-enabled device
into the same network and configure Blynk to connect to this pseudo-server:
IPAddress serv(192,168,0,105); // IP address of your PC
Blynk.begin(auth, serv, 8888);
Author: Volodymyr Shymanskyy
License: The MIT license
'''
import select, socket, struct
import os, sys, time, getopt
from threading import Thread
# Configuration options
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hb:p:",
["help", "bind=", "port=", "sndbuf=", "rcvbuf=", "nodelay", "sleep=", "qty=", "freq=", "pin=", "dump"])
except getopt.GetoptError:
print >>sys.stderr, __doc__
sys.exit(2)
# Default options
HOST = '' # Bind to all interfaces
PORT = 8888 # Bind to port 8888
NODELAY = 0 # No TCP_NODELAY
SNDBUF = 0 # No SNDBUF override
RCVBUF = 0 # No RCVBUF override
MSG_QTY = 10 # Amount of messages
SLEEP = 1.0 # Wait some time between IO
HW_PIN = 14 # Pin #
DUMP = 0
for o, v in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
elif o in ("-b", "--bind"):
HOST = v
elif o in ("-p", "--port"):
PORT = int(v)
elif o in ("--sndbuf",):
SNDBUF = int(v)
elif o in ("--rcvbuf",):
RCVBUF = int(v)
elif o in ("--nodelay",):
NODELAY = 1
elif o in ("--sleep",):
SLEEP = float(v)
elif o in ("--freq",):
SLEEP = 1.0/float(v)
elif o in ("--qty",):
MSG_QTY = int(v)
elif o in ("--pin",):
HW_PIN = int(v)
elif o in ("--dump",):
DUMP = 1
# Blynk protocol helpers
hdr = struct.Struct("!BHH")
class MsgType:
RSP = 0
LOGIN = 2
PING = 6
HW = 20
class MsgStatus:
OK = 200
def hw(*args):
# Convert params to string and join using \0
data = "\0".join(map(str, args))
dump("< " + " ".join(map(str, args)))
# Prepend HW command header
return hdr.pack(MsgType.HW, 1, len(data)) + data
# Print utilities
start_time = time.time()
def log(msg):
print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg)
draw_col = 0
def METHOD_NAME(c):
global draw_col
if not DUMP:
sys.stdout.write(c)
draw_col = (draw_col + 1) % 120
if draw_col:
sys.stdout.flush()
else:
sys.stdout.write("\n")
def dump(msg):
if DUMP:
log(msg)
def receive(sock, length):
d = []
l = 0
while l < length:
r = sock.recv(length-l)
if not r:
return ''
d.append(r)
l += len(r)
return ''.join(d)
# Threads
def readthread(conn, addr):
global msgs_in, authenticated
while(msgs_in < MSG_QTY):
data = receive(conn, hdr.size)
if not data:
break
msg_type, msg_id, msg_len = hdr.unpack(data)
#dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len))
if msg_type == MsgType.RSP:
pass
elif msg_type == MsgType.LOGIN:
auth = receive(conn, msg_len)
log("Auth {0}".format(auth))
# Send auth OK and pin modes
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
conn.sendall(hw("pm", HW_PIN, "out"))
authenticated = True
elif msg_type == MsgType.PING:
log("Ping")
# Send Pong
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
elif msg_type == MsgType.HW:
data = receive(conn, msg_len)
# Print HW messages (just for fun :)
METHOD_NAME('v')
dump("> " + " ".join(data.split("\0")))
msgs_in += 1
else:
log("Unknown msg type")
break
def writethread(conn, addr):
global msgs_out, authenticated
val = 0
while (msgs_out < MSG_QTY):
if authenticated:
conn.sendall(hw("dw", HW_PIN, val))
val = 0 if val else 1
METHOD_NAME('.')
msgs_out += 1
time.sleep(SLEEP)
# Main code
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Set SO_REUSEADDR, this is needed to ignore WAIT state on next run
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((HOST, PORT))
except socket.error as msg:
log('Bind failed. Error Code: {0}, Msg: {1}'.format(str(msg[0]), msg[1]))
sys.exit()
serv.listen(1)
log('Listening on port %d' % PORT)
# Wait for clients
#while True:
conn, addr = serv.accept()
log('Connection from {0}:{1}'.format(addr[0], str(addr[1])))
if NODELAY != 0:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if SNDBUF != 0:
sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF)
if RCVBUF != 0:
rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF)
proc_start = time.time()
msgs_in = 0
msgs_out = 0
authenticated = False
wt = Thread(target=readthread, args=(conn, addr))
rt = Thread(target=writethread, args=(conn, addr))
wt.start()
rt.start()
wt.join()
#rt.join()
conn.close()
METHOD_NAME("\n")
log("Time %3.4f" % (time.time() - proc_start))
log("Sent {0} messages".format(msgs_out))
log("Recv {0} messages".format(msgs_in))
|
1,197 |
get static default value
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
"""
Create a new document with defaults set
"""
import copy
import frappe
import frappe.defaults
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
from frappe.model import data_fieldtypes
from frappe.permissions import filter_allowed_docs_for_doctype
from frappe.utils import cstr, now_datetime, nowdate, nowtime
def get_new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
if doctype not in frappe.local.new_doc_templates:
# cache a copy of new doc as it is called
# frequently for inserts
frappe.local.new_doc_templates[doctype] = make_new_doc(doctype)
doc = copy.deepcopy(frappe.local.new_doc_templates[doctype])
set_dynamic_default_values(doc, parent_doc, parentfield)
if as_dict:
return doc
else:
return frappe.get_doc(doc)
def make_new_doc(doctype):
doc = frappe.get_doc(
{"doctype": doctype, "__islocal": 1, "owner": frappe.session.user, "docstatus": 0}
)
set_user_and_static_default_values(doc)
doc._fix_numeric_types()
doc = doc.get_valid_dict(sanitize=False)
doc["doctype"] = doctype
doc["__islocal"] = 1
if not frappe.model.meta.is_single(doctype):
doc["__unsaved"] = 1
return doc
def set_user_and_static_default_values(doc):
user_permissions = get_user_permissions()
defaults = frappe.defaults.get_defaults()
for df in doc.meta.get("fields"):
if df.fieldtype in data_fieldtypes:
# user permissions for link options
doctype_user_permissions = user_permissions.get(df.options, [])
# Allowed records for the reference doctype (link field) along with default doc
allowed_records, default_doc = filter_allowed_docs_for_doctype(
doctype_user_permissions, df.parent, with_default_doc=True
)
user_default_value = get_user_default_value(
df, defaults, doctype_user_permissions, allowed_records, default_doc
)
if user_default_value is not None:
# if fieldtype is link check if doc exists
if not df.fieldtype == "Link" or frappe.db.exists(df.options, user_default_value):
doc.set(df.fieldname, user_default_value)
else:
if df.fieldname != doc.meta.title_field:
static_default_value = METHOD_NAME(df, doctype_user_permissions, allowed_records)
if static_default_value is not None:
doc.set(df.fieldname, static_default_value)
def get_user_default_value(df, defaults, doctype_user_permissions, allowed_records, default_doc):
# don't set defaults for "User" link field using User Permissions!
if df.fieldtype == "Link" and df.options != "User":
# If user permission has Is Default enabled or single-user permission has found against respective doctype.
if not df.ignore_user_permissions and default_doc:
return default_doc
# 2 - Look in user defaults
user_default = defaults.get(df.fieldname)
allowed_by_user_permission = validate_value_via_user_permissions(
df, doctype_user_permissions, allowed_records, user_default=user_default
)
# is this user default also allowed as per user permissions?
if user_default and allowed_by_user_permission:
return user_default
def METHOD_NAME(df, doctype_user_permissions, allowed_records):
# 3 - look in default of docfield
if df.get("default"):
if df.default == "__user":
return frappe.session.user
elif df.default == "Today":
return nowdate()
elif not cstr(df.default).startswith(":"):
# a simple default value
is_allowed_default_value = validate_value_via_user_permissions(
df, doctype_user_permissions, allowed_records
)
if df.fieldtype != "Link" or df.options == "User" or is_allowed_default_value:
return df.default
elif df.fieldtype == "Select" and df.options and df.options not in ("[Select]", "Loading..."):
return df.options.split("\n", 1)[0]
def validate_value_via_user_permissions(
df, doctype_user_permissions, allowed_records, user_default=None
):
is_valid = True
# If User Permission exists and allowed records is empty,
# that means there are User Perms, but none applicable to this new doctype.
if user_permissions_exist(df, doctype_user_permissions) and allowed_records:
# If allowed records is not empty,
# check if this field value is allowed via User Permissions applied to this doctype.
value = user_default if user_default else df.default
is_valid = value in allowed_records
return is_valid
def set_dynamic_default_values(doc, parent_doc, parentfield):
# these values should not be cached
user_permissions = get_user_permissions()
for df in frappe.get_meta(doc["doctype"]).get("fields"):
if df.get("default"):
if cstr(df.default).startswith(":"):
default_value = get_default_based_on_another_field(df, user_permissions, parent_doc)
if default_value is not None and not doc.get(df.fieldname):
doc[df.fieldname] = default_value
elif df.fieldtype == "Datetime" and df.default.lower() == "now":
doc[df.fieldname] = now_datetime()
if df.fieldtype == "Time":
doc[df.fieldname] = nowtime()
if parent_doc:
doc["parent"] = parent_doc.name
doc["parenttype"] = parent_doc.doctype
if parentfield:
doc["parentfield"] = parentfield
def user_permissions_exist(df, doctype_user_permissions):
return (
df.fieldtype == "Link"
and not getattr(df, "ignore_user_permissions", False)
and doctype_user_permissions
)
def get_default_based_on_another_field(df, user_permissions, parent_doc):
# default value based on another document
from frappe.permissions import get_allowed_docs_for_doctype
ref_doctype = df.default[1:]
ref_fieldname = ref_doctype.lower().replace(" ", "_")
reference_name = (
parent_doc.get(ref_fieldname) if parent_doc else frappe.db.get_default(ref_fieldname)
)
default_value = frappe.db.get_value(ref_doctype, reference_name, df.fieldname)
is_allowed_default_value = not user_permissions_exist(df, user_permissions.get(df.options)) or (
default_value in get_allowed_docs_for_doctype(user_permissions[df.options], df.parent)
)
# is this allowed as per user permissions
if is_allowed_default_value:
return default_value
|
1,198 |
test audit login failed should set properties
|
from contextlib import contextmanager
from unittest.mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test import RequestFactory
from django.test.utils import override_settings
from testil import Config, eq
import corehq.apps.auditcare.models as mod
from corehq.apps.auditcare.models import AccessAudit, NavigationEventAudit
from .test_middleware import make_view
from .testutils import AuditcareTest
from ..utils import to_django_header
TRACE_HEADER = "X-Test-1354321354-Trace-Id"
class TestAccessAudit(AuditcareTest):
def test_audit_login_should_set_properties(self):
with intercept_save(AccessAudit) as cfg:
AccessAudit.audit_login(make_request("/a/block/login"), make_user())
event = cfg.obj
self.assertEqual(event.user, "[email protected]")
self.assertEqual(event.path, "/a/block/login")
self.assertEqual(event.domain, "block")
self.assertEqual(event.ip_address, "127.0.0.1")
self.assertEqual(event.http_accept, "html")
self.assertEqual(event.user_agent, "Mozilla")
self.assertEqual(event.access_type, mod.ACCESS_LOGIN)
self.assertEqual(event.session_key, "abc")
self.assertEqual(event.description, "Login: [email protected]")
def METHOD_NAME(self):
request = make_request("/a/block/login", session_key=None)
with intercept_save(AccessAudit) as cfg:
AccessAudit.audit_login_failed(request, "[email protected]")
event = cfg.obj
self.assertEqual(event.user, "[email protected]")
self.assertEqual(event.path, "/a/block/login")
self.assertEqual(event.domain, "block")
self.assertEqual(event.ip_address, "127.0.0.1")
self.assertEqual(event.http_accept, "html")
self.assertEqual(event.user_agent, "Mozilla")
self.assertEqual(event.access_type, mod.ACCESS_FAILED)
self.assertEqual(event.session_key, None)
self.assertEqual(event.description, "Login failed: [email protected]")
def test_audit_logout_should_set_properties(self):
with intercept_save(AccessAudit) as cfg:
AccessAudit.audit_logout(make_request("/accounts/logout"), make_user())
event = cfg.obj
self.assertEqual(event.user, "[email protected]")
self.assertEqual(event.path, "/accounts/logout")
self.assertEqual(event.domain, None)
self.assertEqual(event.ip_address, "127.0.0.1")
self.assertEqual(event.http_accept, "html")
self.assertEqual(event.user_agent, "Mozilla")
self.assertEqual(event.access_type, mod.ACCESS_LOGOUT)
self.assertEqual(event.session_key, "abc")
self.assertEqual(event.description, "Logout: [email protected]")
def test_audit_logout_anonymous_should_set_properties(self):
with intercept_save(AccessAudit) as cfg:
AccessAudit.audit_logout(make_request("/accounts/logout"), None)
event = cfg.obj
self.assertEqual(event.user, None)
self.assertEqual(event.description, "Logout: ")
@override_settings(AUDIT_TRACE_ID_HEADER=TRACE_HEADER)
def test_audit_trace_id_header(self):
trace_id = "Root=1-67891233-abcdef012345678912345678"
headers = {to_django_header(TRACE_HEADER): trace_id}
request = make_request("/a/block/login", **headers)
# HACK verify that the header was set correctly
assert TRACE_HEADER in request.headers, request.headers
with intercept_save(AccessAudit) as cfg, patch_trace_id_header():
AccessAudit.audit_login(request, None)
event = cfg.obj
self.assertEqual(event.trace_id, trace_id)
class TestNavigationEventAudit(AuditcareTest):
def test_audit_view_should_set_properties(self):
path = "/a/block/path"
view = make_view(path)
request = make_request(path)
event = NavigationEventAudit.audit_view(request, "[email protected]", view, {})
self.assertEqual(event.path, path)
self.assertEqual(event.domain, "block")
self.assertEqual(event.request_path, f"{path}?key=value")
self.assertEqual(event.description, "[email protected]")
self.assertNotIn(to_django_header(TRACE_HEADER), event.headers)
event.save()
def test_audit_view_should_truncate_params(self):
path = "/path"
view = make_view(path)
request = make_request(path, params={f"a{x}": "b" for x in range(1000)})
event = NavigationEventAudit.audit_view(request, "[email protected]", view, {})
event.save()
event.refresh_from_db()
self.assertEqual(len(event.params), 4096)
@override_settings(AUDIT_TRACE_ID_HEADER=TRACE_HEADER)
def test_audit_trace_id_header(self):
trace_id = "Root=1-67891233-abcdef012345678912345678"
with patch_trace_id_header():
view = make_view()
request = make_request(**{to_django_header(TRACE_HEADER): trace_id})
event = NavigationEventAudit.audit_view(request, "[email protected]", view, {})
self.assertEqual(event.headers[to_django_header(TRACE_HEADER)], trace_id)
event.save()
def test_audit_view_should_not_save(self):
view = make_view()
event = NavigationEventAudit.audit_view(make_request(), "[email protected]", view, {})
self.assertIsNone(event.id)
def test_get_domain():
def test(cfg):
request = make_request(cfg.path)
if "request_domain" in cfg:
request.domain = cfg.request_domain
eq(mod.get_domain(request), cfg.expect)
cfg = Config(expect="block")
yield test, cfg(path="/path", expect=None)
yield test, cfg(path="/a/block/path")
yield test, cfg(path="/path", request_domain="block")
yield test, cfg(path="/a/block/path", request_domain="xx")
def make_request(path="/path", session_key="abc", params=None, **headers):
headers.setdefault("HTTP_ACCEPT", "html")
headers.setdefault("HTTP_USER_AGENT", "Mozilla")
request = RequestFactory().get(path, params or {"key": "value"}, **headers)
request.session = Config(session_key=session_key)
return request
def make_user():
return User(username="[email protected]", first_name="Melvin", last_name="Block")
@contextmanager
def intercept_save(cls):
def save(self):
real_save(self)
config.obj = self
config = Config()
real_save = cls.save
with patch.object(cls, "save", save):
yield config
@contextmanager
def patch_trace_id_header():
def assert_not_installed():
assert AccessAudit.trace_id_header != settings.AUDIT_TRACE_ID_HEADER, \
AccessAudit.trace_id_header
assert django_header not in mod.STANDARD_HEADER_KEYS, \
(django_header, mod.STANDARD_HEADER_KEYS)
from .. import install_trace_id_header
django_header = to_django_header(settings.AUDIT_TRACE_ID_HEADER)
assert_not_installed()
install_trace_id_header()
try:
yield
finally:
AccessAudit.trace_id_header = None
mod.STANDARD_HEADER_KEYS.remove(django_header)
assert_not_installed()
|
1,199 |
get biz dimension
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from api import BKMonitorClient
from gcloud.conf import settings
from pipeline.core.flow.io import StringItemSchema, ObjectItemSchema
from pipeline.component_framework.component import Component
from pipeline_plugins.base.utils.inject import supplier_account_for_business
from pipeline_plugins.components.collections.sites.open.monitor.base import MonitorBaseService
from pipeline_plugins.components.utils.sites.open.utils import get_module_id_list_by_name
from pipeline_plugins.variables.utils import (
get_set_list,
get_list_by_selected_names,
get_service_template_list,
get_service_template_list_by_names,
)
SCOPE = {"business": "bk_alarm_shield_business", "IP": "bk_alarm_shield_IP", "node": "bk_alarm_shield_node"}
ALL_SELECTED_STR = "all"
__group_name__ = _("监控平台(Monitor)")
class MonitorAlarmShieldService(MonitorBaseService):
def inputs_format(self):
return [
self.InputItem(
name=_("屏蔽范围类型"),
key="bk_alarm_shield_info",
type="object",
schema=ObjectItemSchema(description=_(u"屏蔽范围类型"), property_schemas={}),
),
self.InputItem(
name=_("策略 ID"),
key="bk_alarm_shield_target",
type="string",
schema=StringItemSchema(description=_("需要执行屏蔽的指标")),
),
self.InputItem(
name=_("屏蔽开始时间"),
key="bk_alarm_shield_begin_time",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽结束时间"),
key="bk_alarm_shield_end_time",
type="string",
schema=StringItemSchema(description=_("结束屏蔽的时间")),
),
]
def execute(self, data, parent_data):
bk_biz_id = parent_data.get_one_of_inputs("biz_cc_id")
executor = parent_data.get_one_of_inputs("executor")
client = BKMonitorClient(username=executor)
combine = data.get_one_of_inputs("bk_alarm_shield_info")
scope_type = combine.get("bk_alarm_shield_scope")
scope_value = combine.get(SCOPE[scope_type])
target = data.get_one_of_inputs("bk_alarm_shield_target")
begin_time = data.get_one_of_inputs("bk_alarm_shield_begin_time")
end_time = data.get_one_of_inputs("bk_alarm_shield_end_time")
if parent_data.get_one_of_inputs("language"):
setattr(client, "language", parent_data.get_one_of_inputs("language"))
translation.activate(parent_data.get_one_of_inputs("language"))
supplier_account = supplier_account_for_business(bk_biz_id)
request_body = self.get_request_body(
bk_biz_id, begin_time, end_time, scope_type, scope_value, executor, supplier_account
)
if "all" not in target:
request_body["dimension_config"].update({"metric_id": target})
result_flag = self.send_request(request_body, data, client)
return result_flag
def get_dimension_config(self, shied_type, shied_value, bk_biz_id, username, bk_supplier_account):
dimension_map = {
"business": self.METHOD_NAME,
"IP": self.get_ip_dimension,
"node": self.get_node_dimension,
}
return dimension_map[shied_type](shied_value, bk_biz_id, username, bk_supplier_account)
def get_request_body(self, bk_biz_id, begin_time, end_time, shied_type, shied_value, username, bk_supplier_account):
dimension_config = self.get_dimension_config(shied_type, shied_value, bk_biz_id, username, bk_supplier_account)
request_body = self.build_request_body(
begin_time=begin_time,
bk_biz_id=bk_biz_id,
shied_type=shied_type,
dimension_config=dimension_config,
end_time=end_time,
)
return request_body
def get_ip_dimension(self, scope_value, bk_biz_id, username, bk_supplier_account):
ip_dimension = super(MonitorAlarmShieldService, self).get_ip_dimension_config(scope_value, bk_biz_id, username)
return ip_dimension
@staticmethod
def METHOD_NAME(scope_value, bk_biz_id, username, bk_supplier_account):
return {"scope_type": "biz"}
@staticmethod
def get_node_dimension(scope_value, bk_biz_id, username, bk_supplier_account):
bk_set_method = scope_value["bk_set_method"]
if bk_set_method == "select":
bk_set_value = scope_value["bk_set_select"]
else:
bk_set_value = scope_value["bk_set_text"]
bk_module_method = scope_value["bk_module_method"]
if bk_module_method == "select":
bk_module_value = scope_value["bk_module_select"]
else:
bk_module_value = scope_value["bk_module_text"]
# 获取全部集群列表
set_list = get_set_list(username, bk_biz_id, bk_supplier_account)
# 集群全选,筛选条件不为空则调接口获取集群id列表
if ALL_SELECTED_STR not in bk_set_value:
selected_set_names = bk_set_value
# 根据选中的集群名称获取选中的集群列表
set_list = get_list_by_selected_names(selected_set_names, set_list)
# 获取全部服务模板列表
service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account)
# 服务模板全选,则调接口获取服务模板列表
if ALL_SELECTED_STR not in bk_module_value:
selected_service_template_names = bk_module_value
# 通过选中的或输入的集群模板获取集群模板列表
service_template_list = get_service_template_list_by_names(
selected_service_template_names, service_template_list
)
# 获取模块id列表
module_ids = get_module_id_list_by_name(bk_biz_id, username, set_list, service_template_list)
target = [{"bk_obj_id": "module", "bk_inst_id": module_id["bk_module_id"]} for module_id in module_ids]
return {"scope_type": "node", "target": target}
class MonitorAlarmShieldComponent(Component):
name = _("蓝鲸监控告警屏蔽(按范围)")
code = "monitor_alarm_shield"
bound_service = MonitorAlarmShieldService
form = "{static_url}components/atoms/monitor/alarm_shield/v1_0.js".format(static_url=settings.STATIC_URL)
version = "1.0"
desc = _('注意: 1.屏蔽方案选择"自定义监控"时,屏蔽范围CC大区和集群必须选择"all"')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.