id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
299,500 |
test is googler googler
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testing_config # Must be imported first
import datetime
from unittest import mock
import flask
import werkzeug.exceptions
# from google.appengine.api import users
from framework import users
from framework import rediscache
from api import metricsdata
from internals import metrics_models
test_app = flask.Flask(__name__)
class MetricsFunctionTests(testing_config.CustomTestCase):
def setUp(self):
self.datapoint = metrics_models.StableInstance(
day_percentage=0.0123456789, date=datetime.date.today(),
bucket_id=1, property_name='prop')
def test_is_googler__anon(self):
testing_config.sign_out()
user = users.get_current_user()
self.assertFalse(metricsdata._is_googler(user))
def test_is_googler__nongoogler(self):
testing_config.sign_in('[email protected]', 111)
user = users.get_current_user()
self.assertFalse(metricsdata._is_googler(user))
def METHOD_NAME(self):
testing_config.sign_in('[email protected]', 111)
user = users.get_current_user()
self.assertTrue(metricsdata._is_googler(user))
def test_datapoints_to_json_dicts__googler(self):
testing_config.sign_in('[email protected]', 111)
datapoints = [self.datapoint]
actual = metricsdata._datapoints_to_json_dicts(datapoints)
expected = [{
'bucket_id': 1,
'date': str(datetime.date.today()),
'day_percentage': 0.0123456789,
'property_name': 'prop',
}]
self.assertEqual(expected, actual)
def test_datapoints_to_json_dicts__nongoogler(self):
testing_config.sign_in('[email protected]', 222)
datapoints = [self.datapoint]
actual = metricsdata._datapoints_to_json_dicts(datapoints)
expected = [{
'bucket_id': 1,
'date': str(datetime.date.today()),
'day_percentage': 0.01234568, # rounded
'property_name': 'prop',
}]
self.assertEqual(expected, actual)
class PopularityTimelineHandlerTests(testing_config.CustomTestCase):
def setUp(self):
self.handler = metricsdata.PopularityTimelineHandler()
self.datapoint = metrics_models.StableInstance(
day_percentage=0.0123456789, date=datetime.date.today(),
bucket_id=1, property_name='prop')
self.datapoint.put()
def tearDown(self):
self.datapoint.key.delete()
def test_make_query(self):
actual_query = self.handler.make_query(1)
self.assertEqual(actual_query.kind, metrics_models.StableInstance._get_kind())
@mock.patch('flask.abort')
def test_get_template_data__bad_bucket(self, mock_abort):
url = '/data/timeline/csspopularity?bucket_id=not-a-number'
mock_abort.side_effect = werkzeug.exceptions.BadRequest
with test_app.test_request_context(url):
with self.assertRaises(werkzeug.exceptions.BadRequest):
actual = self.handler.get_template_data()
mock_abort.assert_called_once_with(
400, description="Request parameter 'bucket_id' was not an int")
def test_get_template_data__normal(self):
testing_config.sign_out()
url = '/data/timeline/csspopularity?bucket_id=1'
with test_app.test_request_context(url):
actual_datapoints = self.handler.get_template_data()
self.assertEqual(1, len(actual_datapoints))
self.assertEqual(0.01234568, actual_datapoints[0]['day_percentage'])
class CSSPopularityHandlerTests(testing_config.CustomTestCase):
def setUp(self):
self.handler = metricsdata.CSSPopularityHandler()
# Set up StableInstance data.
self.datapoint = metrics_models.StableInstance(
day_percentage=0.0123456789, date=datetime.date.today(),
bucket_id=1, property_name='b prop')
self.datapoint.put()
# Set up CssPropertyHistogram data.
self.prop_1 = metrics_models.CssPropertyHistogram(
bucket_id=1, property_name='b prop')
self.prop_1.put()
self.prop_2 = metrics_models.CssPropertyHistogram(
bucket_id=2, property_name='a prop')
self.prop_2.put()
self.prop_3 = metrics_models.FeatureObserverHistogram(
bucket_id=3, property_name='b feat')
self.prop_3.put()
self.prop_4 = metrics_models.FeatureObserverHistogram(
bucket_id=4, property_name='a feat')
self.prop_4.put()
def tearDown(self):
self.datapoint.key.delete()
self.prop_1.key.delete()
self.prop_2.key.delete()
self.prop_3.key.delete()
self.prop_4.key.delete()
rediscache.flushall()
def test_get_top_num_cache_key(self):
actual = self.handler.get_top_num_cache_key(30)
self.assertEqual('metrics|css_popularity_30', actual)
def test_get_template_data(self):
url = '/data/csspopularity'
with test_app.test_request_context(url):
actual_datapoints = self.handler.get_template_data()
self.assertEqual(1, len(actual_datapoints))
self.assertEqual(0.01234568, actual_datapoints[0]['day_percentage'])
def test_get_template_data_from_cache(self):
url = '/data/csspopularity'
with test_app.test_request_context(url):
self.handler.get_template_data()
actual_datapoints = rediscache.get('metrics|css_popularity')
self.assertEqual(1, len(actual_datapoints))
self.assertEqual(0.0123456789, actual_datapoints[0].day_percentage)
def test_should_refresh(self):
url = '/data/csspopularity?'
with test_app.test_request_context(url):
self.assertEqual(False, self.handler.should_refresh())
def test_get_template_data_with_num(self):
self.assertEqual(None, rediscache.get('metrics|css_popularity_30'))
url = '/data/csspopularity?num=30'
with test_app.test_request_context(url):
self.handler.get_template_data()
actual_datapoints = rediscache.get('metrics|css_popularity_30')
self.assertEqual(1, len(actual_datapoints))
self.assertEqual(0.0123456789, actual_datapoints[0].day_percentage)
class FeatureBucketsHandlerTest(testing_config.CustomTestCase):
def setUp(self):
self.handler = metricsdata.FeatureBucketsHandler()
self.prop_1 = metrics_models.CssPropertyHistogram(
bucket_id=1, property_name='b prop')
self.prop_1.put()
self.prop_2 = metrics_models.CssPropertyHistogram(
bucket_id=2, property_name='a prop')
self.prop_2.put()
self.prop_3 = metrics_models.FeatureObserverHistogram(
bucket_id=3, property_name='b feat')
self.prop_3.put()
self.prop_4 = metrics_models.FeatureObserverHistogram(
bucket_id=4, property_name='a feat')
self.prop_4.put()
def tearDown(self):
self.prop_1.key.delete()
self.prop_2.key.delete()
self.prop_3.key.delete()
self.prop_4.key.delete()
def test_get_template_data__css(self):
with test_app.test_request_context('/data/blink/cssprops'):
actual_buckets = self.handler.get_template_data(prop_type='cssprops')
self.assertEqual(
[(2, 'a prop'), (1, 'b prop')],
actual_buckets)
def test_get_template_data__js(self):
with test_app.test_request_context('/data/blink/features'):
actual_buckets = self.handler.get_template_data(prop_type='features')
self.assertEqual(
[(4, 'a feat'), (3, 'b feat')],
actual_buckets)
|
299,501 |
pre exec
|
# Sample Gunicorn configuration file.
#
# Server socket
#
# bind - The socket to bind.
#
# A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'.
# An IP is a valid HOST.
#
# backlog - The number of pending connections. This refers
# to the number of clients that can be waiting to be
# served. Exceeding this number results in the client
# getting an error when attempting to connect. It should
# only affect servers under significant load.
#
# Must be a positive integer. Generally set in the 64-2048
# range.
#
bind = '127.0.0.1:8000'
backlog = 2048
#
# Worker processes
#
# workers - The number of worker processes that this server
# should keep alive for handling requests.
#
# A positive integer generally in the 2-4 x $(NUM_CORES)
# range. You'll want to vary this a bit to find the best
# for your particular application's work load.
#
# worker_class - The type of workers to use. The default
# sync class should handle most 'normal' types of work
# loads. You'll want to read
# http://docs.gunicorn.org/en/latest/design.html#choosing-a-worker-type
# for information on when you might want to choose one
# of the other worker classes.
#
# A string referring to a Python path to a subclass of
# gunicorn.workers.base.Worker. The default provided values
# can be seen at
# http://docs.gunicorn.org/en/latest/settings.html#worker-class
#
# worker_connections - For the eventlet and gevent worker classes
# this limits the maximum number of simultaneous clients that
# a single process can handle.
#
# A positive integer generally set to around 1000.
#
# timeout - If a worker does not notify the master process in this
# number of seconds it is killed and a new worker is spawned
# to replace it.
#
# Generally set to thirty seconds. Only set this noticeably
# higher if you're sure of the repercussions for sync workers.
# For the non sync workers it just means that the worker
# process is still communicating and is not tied to the length
# of time required to handle a single request.
#
# keepalive - The number of seconds to wait for the next request
# on a Keep-Alive HTTP connection.
#
# A positive integer. Generally set in the 1-5 seconds range.
#
workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
#
# spew - Install a trace function that spews every line of Python
# that is executed when running the server. This is the
# nuclear option.
#
# True or False
#
spew = False
#
# Server mechanics
#
# daemon - Detach the main Gunicorn process from the controlling
# terminal with a standard fork/fork sequence.
#
# True or False
#
# raw_env - Pass environment variables to the execution environment.
#
# pidfile - The path to a pid file to write
#
# A path string or None to not write a pid file.
#
# user - Switch worker processes to run as this user.
#
# A valid user id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getpwnam(value) or None
# to not change the worker process user.
#
# group - Switch worker process to run as this group.
#
# A valid group id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getgrnam(value) or None
# to change the worker processes group.
#
# umask - A mask for file permissions written by Gunicorn. Note that
# this affects unix socket permissions.
#
# A valid value for the os.umask(mode) call or a string
# compatible with int(value, 0) (0 means Python guesses
# the base, so values like "0", "0xFF", "0022" are valid
# for decimal, hex, and octal representations)
#
# tmp_upload_dir - A directory to store temporary request data when
# requests are read. This will most likely be disappearing soon.
#
# A path to a directory where the process owner can write. Or
# None to signal that Python should choose one on its own.
#
daemon = False
raw_env = [
'DJANGO_SECRET_KEY=something',
'SPAM=eggs',
]
pidfile = None
umask = 0
user = None
group = None
tmp_upload_dir = None
#
# Logging
#
# logfile - The path to a log file to write to.
#
# A path string. "-" means log to stdout.
#
# loglevel - The granularity of log output
#
# A string of "debug", "info", "warning", "error", "critical"
#
errorlog = '-'
loglevel = 'info'
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
#
# Process naming
#
# proc_name - A base to use with setproctitle to change the way
# that Gunicorn processes are reported in the system process
# table. This affects things like 'ps' and 'top'. If you're
# going to be running more than one instance of Gunicorn you'll
# probably want to set a name to tell them apart. This requires
# that you install the setproctitle module.
#
# A string or None to choose a default of something like 'gunicorn'.
#
proc_name = None
#
# Server hooks
#
# post_fork - Called just after a worker has been forked.
#
# A callable that takes a server and worker instance
# as arguments.
#
# pre_fork - Called just prior to forking the worker subprocess.
#
# A callable that accepts the same arguments as after_fork
#
# pre_exec - Called just prior to forking off a secondary
# master process during things like config reloading.
#
# A callable that takes a server instance as the sole argument.
#
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def METHOD_NAME(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
## get traceback info
import threading, sys, traceback
id2name = {th.ident: th.name for th in threading.enumerate()}
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""),
threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
def ssl_context(conf, default_ssl_context_factory):
import ssl
# The default SSLContext returned by the factory function is initialized
# with the TLS parameters from config, including TLS certificates and other
# parameters.
context = default_ssl_context_factory()
# The SSLContext can be further customized, for example by enforcing
# minimum TLS version.
context.minimum_version = ssl.TLSVersion.TLSv1_3
# Server can also return different server certificate depending which
# hostname the client uses. Requires Python 3.7 or later.
def sni_callback(socket, server_hostname, context):
if server_hostname == "foo.127.0.0.1.nip.io":
new_context = default_ssl_context_factory()
new_context.load_cert_chain(certfile="foo.pem", keyfile="foo-key.pem")
socket.context = new_context
context.sni_callback = sni_callback
return context
|
299,502 |
can create from shape impl
|
from . import *
from pya import *
class ebeam_dc_halfring_straight(pya.PCellDeclarationHelper):
"""
The PCell declaration for the ebeam_dc_halfring_straight.
Consists of a half-ring with 1 waveguides.
"""
def __init__(self):
# Important: initialize the super class
super(ebeam_dc_halfring_straight, self).__init__()
TECHNOLOGY = get_technology_by_name('EBeam')
# declare the parameters
self.param("silayer", self.TypeLayer, "Si Layer", default = TECHNOLOGY['Si'])
self.param("r", self.TypeDouble, "Radius", default = 10)
self.param("w", self.TypeDouble, "Waveguide Width", default = 0.5)
self.param("g", self.TypeDouble, "Gap", default = 0.2)
self.param("Lc", self.TypeDouble, "Coupler Length", default = 0.0)
self.param("orthogonal_identifier", self.TypeInt, "Orthogonal identifier (1=TE, 2=TM)", default = 1)
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text'])
def display_text_impl(self):
# Provide a descriptive text for the cell
return "ebeam_dc_halfring_straight(R=" + ('%.3f' % self.r) + ",g=" + ('%g' % (1000*self.g)) + ",Lc=" + ('%g' % (1000*self.Lc)) + ",orthogonal_identifier=" + ('%s' % self.orthogonal_identifier) + ")"
def METHOD_NAME(self):
return False
def produce_impl(self):
# This is the main part of the implementation: create the layout
from math import pi, cos, sin
from SiEPIC.utils import arc_wg, arc_wg_xy
from SiEPIC._globals import PIN_LENGTH
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
shapes = self.cell.shapes
LayerSiN = ly.layer(self.silayer)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
TextLayerN = ly.layer(self.textl)
w = int(round(self.w/dbu))
r = int(round(self.r/dbu))
g = int(round(self.g/dbu))
Lc = int(round(self.Lc/dbu))
# draw the half-circle
x = 0
y = r+w+g
self.cell.shapes(LayerSiN).insert(arc_wg_xy(x-Lc/2, y, r, w, 180, 270))
self.cell.shapes(LayerSiN).insert(arc_wg_xy(x+Lc/2, y, r, w, 270, 360))
# Pins on the top side:
pin = Path([Point(-r-Lc/2, y-PIN_LENGTH/2), Point(-r-Lc/2, y+PIN_LENGTH/2)], w)
shapes(LayerPinRecN).insert(pin)
t = Trans(Trans.R0, -r-Lc/2, y)
text = Text ("pin2", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
pin = Path([Point(r+Lc/2, y-PIN_LENGTH/2), Point(r+Lc/2, y+PIN_LENGTH/2)], w)
shapes(LayerPinRecN).insert(pin)
t = Trans(Trans.R0, r+Lc/2, y)
text = Text ("pin4", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
if Lc > 0:
wg1 = Box(-Lc/2, -w/2+w+g, Lc/2, w/2+w+g)
shapes(LayerSiN).insert(wg1)
# Create the waveguide
wg1 = Box(-r-w/2-w-Lc/2, -w/2, r+w/2+w+Lc/2, w/2)
shapes(LayerSiN).insert(wg1)
# Pins on the bus waveguide side:
pin = Path([Point(-r-w/2-w+PIN_LENGTH/2-Lc/2, 0), Point(-r-w/2-w-PIN_LENGTH/2-Lc/2, 0)], w)
shapes(LayerPinRecN).insert(pin)
t = Trans(Trans.R0, -r-w/2-w-Lc/2, 0)
text = Text ("pin1", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
pin = Path([Point(r+w/2+w-PIN_LENGTH/2+Lc/2, 0), Point(r+w/2+w+PIN_LENGTH/2+Lc/2, 0)], w)
shapes(LayerPinRecN).insert(pin)
t = Trans(Trans.R0, r+w/2+w+Lc/2, 0)
text = Text ("pin3", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
# Merge all the waveguide shapes, to avoid any small gaps
layer_temp = self.layout.layer(LayerInfo(913, 0))
shapes_temp = self.cell.shapes(layer_temp)
ShapeProcessor().merge(self.layout,self.cell,LayerSiN,shapes_temp,True,0,True,True)
self.cell.shapes(LayerSiN).clear()
shapes_SiN = self.cell.shapes(LayerSiN)
ShapeProcessor().merge(self.layout,self.cell,layer_temp, shapes_SiN,True,0,True,True)
self.cell.shapes(layer_temp).clear()
# Create the device recognition layer -- make it 1 * wg_width away from the waveguides.
dev = Box(-r-w/2-w-Lc/2, -w/2-w, r+w/2+w+Lc/2, y )
shapes(LayerDevRecN).insert(dev)
# Compact model information
t = Trans(Trans.R0, -r, 0)
text = Text ("Lumerical_INTERCONNECT_library=Design kits/ebeam", t)
shape = shapes(LayerDevRecN).insert(text)
shape.text_size = self.r*0.03/dbu
t = Trans(Trans.R0, -r, r/4)
text = Text ('Component=ebeam_dc_halfring_straight', t)
shape = shapes(LayerDevRecN).insert(text)
shape.text_size = self.r*0.03/dbu
t = Trans(Trans.R0, -r, r/2)
text = Text ('Spice_param:wg_width=%.3g gap=%.3g radius=%.3g Lc=%.3g orthogonal_identifier=%s'% (self.w*1e-6,self.g*1e-6,self.r*1e-6,self.Lc*1e-6, self.orthogonal_identifier), t)
shape = shapes(LayerDevRecN).insert(text)
shape.text_size = self.r*0.03/dbu
print("Done drawing the layout for - ebeam_dc_halfring_straight: %.3f-%g" % ( self.r, self.g) )
|
299,503 |
test ebas file index al l variables
|
from __future__ import annotations
from pathlib import Path
import pytest
from pyaerocom.io.ebas_file_index import EbasFileIndex, EbasSQLRequest
from tests.fixtures.ebas import EBAS_FILEDIR
def test_EbasSQLRequest___init__():
EbasSQLRequest()
@pytest.mark.parametrize(
"var,output",
[
("bla", "('bla')"),
(("bla", "blub"), "('bla', 'blub')"),
(["bla", "blub"], "('bla', 'blub')"),
],
)
def test_EbasSQLRequest__var2sql(var: str | tuple[str] | list[str], output: str):
req = EbasSQLRequest()
assert req._var2sql(var) == output
def test_EbasSQLRequest__var2sql_error():
with pytest.raises(ValueError) as e:
EbasSQLRequest()._var2sql({})
assert str(e.value) == "Invalid value..."
@pytest.mark.parametrize(
"kwargs,output",
[
(
{},
"select distinct filename from variable join station on station.station_code=variable.station_code and not exists (select * from characteristic where var_id=variable.var_id and ct_type='Fraction');",
),
(
{"distinct": False},
"select filename from variable join station on station.station_code=variable.station_code and not exists (select * from characteristic where var_id=variable.var_id and ct_type='Fraction');",
),
],
)
def test_EbasSQLRequest_make_file_query_str(kwargs: dict, output: str):
req = EbasSQLRequest()
assert req.make_file_query_str(**kwargs) == output
@pytest.mark.parametrize(
"kwargs,output",
[
(
{
"altitude_range": [10, 21],
"lon_range": [10, 20],
"lat_range": [10, 20],
"start_date": "2010-01-21",
"stop_date": "2010-01-24",
"statistics": ["arithmetic_mean", "median"],
"datalevel": 2,
},
(
"select distinct filename from variable join station on "
"station.station_code=variable.station_code where "
"station_altitude>10 and station_altitude<21 and "
"station_longitude>10 and station_longitude<20 and "
"station_latitude>10 and station_latitude<20 and "
"first_end < '2010-01-24' and last_start > '2010-01-21' and "
"statistics in ('arithmetic_mean', 'median') and datalevel=2;"
),
),
(
{},
"select distinct filename from variable join station on station.station_code=variable.station_code;",
),
(
{"distinct": False},
"select filename from variable join station on station.station_code=variable.station_code;",
),
(
{"what": ("filename", "station_code", "bla")},
"select distinct filename,station_code,bla from variable join station on station.station_code=variable.station_code;",
),
],
)
def test_EbasSQLRequest_make_query_str(kwargs: dict, output: str):
req = EbasSQLRequest()
assert req.make_query_str(**kwargs) == output
def test_EbasSQLRequest___str__():
assert isinstance(str(EbasSQLRequest()), str)
def test_EbasFileIndex___init__():
EbasFileIndex()
def test_EbasFileIndex_database_getter():
with pytest.raises(AttributeError) as e:
EbasFileIndex().database
error = "EBAS SQLite database file could not be located but is needed in EbasFileIndex class"
assert str(e.value) == error
def test_EbasFileIndex_ALL_STATION_NAMES(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_STATION_NAMES, list)
def test_EbasFileIndex_ALL_STATION_CODES(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_STATION_CODES, list)
def test_EbasFileIndex_ALL_STATISTICS_PARAMS(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_STATISTICS_PARAMS, list)
def METHOD_NAME(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_VARIABLES, list)
def test_EbasFileIndex_ALL_MATRICES(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_MATRICES, list)
def test_EbasFileIndex_ALL_INSTRUMENTS(ebas: EbasFileIndex):
assert isinstance(ebas.ALL_INSTRUMENTS, list)
def test_EbasFileIndex_get_table_names(ebas: EbasFileIndex):
# assert ebas.get_table_names() == ["station", "variable"]
assert ebas.get_table_names() == ["station", "variable", "characteristic"]
table_comulns = dict(
station=[
"station_code",
"platform_code",
"station_name",
"station_wdca_id",
"station_gaw_name",
"station_gaw_id",
"station_airs_id",
"station_other_ids",
"station_state_code",
"station_landuse",
"station_setting",
"station_gaw_type",
"station_wmo_region",
"station_latitude",
"station_longitude",
"station_altitude",
],
variable=[
"var_id",
"station_code",
"matrix",
"comp_name",
"statistics",
"instr_type",
"instr_ref",
"method",
"first_start",
"first_end",
"last_start",
"last_end",
"revdate",
"period",
"resolution",
"datalevel",
"filename",
"vnum",
],
characteristic=[
"var_id",
"ct_type",
"datatype",
"val_int",
"val_dbl",
"val_chr",
],
)
@pytest.mark.parametrize("table,column_names", table_comulns.items())
def test_EbasFileIndex_get_column_names(ebas: EbasFileIndex, table: str, column_names: list[str]):
assert ebas.get_table_columns(table) == column_names
|
299,504 |
command acknowledged
|
import logging
from zentral.contrib.mdm.inventory import update_inventory_tree
from zentral.contrib.mdm.models import Channel, Platform, TargetArtifact
from .base import register_command, Command
logger = logging.getLogger("zentral.contrib.mdm.commands.installed_application_list")
class InstalledApplicationList(Command):
request_type = "InstalledApplicationList"
reschedule_notnow = True
@staticmethod
def verify_channel_and_device(channel, enrolled_device):
return (
(
channel == Channel.DEVICE
or enrolled_device.platform == Platform.MACOS
) and (
not enrolled_device.user_enrollment
or enrolled_device.platform == Platform.IOS
)
)
def load_kwargs(self):
self.managed_only = self.db_command.kwargs.get("managed_only", False)
self.retries = self.db_command.kwargs.get("retries", 0)
self.update_inventory = self.db_command.kwargs.get("update_inventory", False)
self.apps_to_check = self.db_command.kwargs.get("apps_to_check", [])
self.store_result = not self.artifact_version and not self.apps_to_check
def build_command(self):
command = {"ManagedAppsOnly": self.managed_only,
"Items": ["AdHocCodeSigned",
"AppStoreVendable",
"BetaApp",
"BundleSize",
"DeviceBasedVPP",
"DynamicSize",
"ExternalVersionIdentifier",
"HasUpdateAvailable",
"Identifier",
"Installing",
"IsAppClip",
"IsValidated",
"Name",
"ShortVersion",
"Version"]}
if self.apps_to_check:
command["Identifiers"] = [app["Identifier"] for app in self.apps_to_check]
return command
def _update_device_artifact(self):
# this command was sent to check on an install
found = False
error = False
extra_info = {}
installed = True
app_key_attrs = list(self.apps_to_check[0].keys())
for app in self.response.get("InstalledApplicationList", []):
app_key = {aka: app[aka] for aka in app_key_attrs}
if app_key not in self.apps_to_check:
continue
found = True
for attr in ("DownloadCancelled", "DownloadFailed",
"DownloadPaused", "DownloadWaiting",
"Installing"):
extra_info[attr] = app.get(attr)
if any(app.get(attr) for attr in ("DownloadWaiting", "DownloadPaused", "Installing")):
installed = False
elif any(app.get(attr) for attr in ("DownloadCancelled", "DownloadFailed")):
error = True
break
if found and installed:
self.target.update_target_artifact(
self.artifact_version,
TargetArtifact.Status.INSTALLED,
allow_reinstall=True,
)
elif error:
self.target.update_target_artifact(
self.enrolled_device,
self.artifact_version,
TargetArtifact.Status.FAILED,
extra_info=extra_info
)
else:
if not found:
logger.warning("Artifact version %s was not found.", self.artifact_version.pk)
if self.retries >= 10: # TODO hardcoded
logger.warning("Stop rescheduling %s command for artifact version %s",
self.request_type,
self.artifact_version.pk)
return
# queue a new installed application list command
first_delay_seconds = 15 # TODO hardcoded
self.create_for_target(
self.artifact_version,
kwargs={"apps_to_check": self.apps_to_check,
"retries": self.retries + 1},
queue=True, delay=first_delay_seconds
)
def get_inventory_partial_tree(self):
osx_app_instances = []
for item in self.response.get("InstalledApplicationList", []):
if any(item.get(k, False) for k in ("DownloadCancelled",
"DownloadFailed",
"DownloadPaused",
"DownloadWaiting",
"Installing")):
continue
osx_app_instance_tree = {
"app": {
"bundle_id": item.get("Identifier") or None,
"bundle_name": item.get("Name"),
"bundle_version": item.get("Version"),
"bundle_version_str": item.get("ShortVersion")
}
}
if osx_app_instance_tree not in osx_app_instances:
osx_app_instances.append(osx_app_instance_tree)
return {"osx_app_instances": osx_app_instances}
def METHOD_NAME(self):
if self.artifact_version and self.apps_to_check:
self._update_device_artifact()
elif self.update_inventory:
update_inventory_tree(self)
register_command(InstalledApplicationList)
|
299,505 |
build basic headers
|
"""Synchronous HTTP Client for split API."""
from collections import namedtuple
import requests
import logging
_LOGGER = logging.getLogger(__name__)
HttpResponse = namedtuple('HttpResponse', ['status_code', 'body'])
class HttpClientException(Exception):
"""HTTP Client exception."""
def __init__(self, message):
"""
Class constructor.
:param message: Information on why this exception happened.
:type message: str
"""
Exception.__init__(self, message)
class HttpClient(object):
"""HttpClient wrapper."""
SDK_URL = 'https://sdk.split.io/api'
EVENTS_URL = 'https://events.split.io/api'
AUTH_URL = 'https://auth.split.io/api'
TELEMETRY_URL = 'https://telemetry.split.io/api'
def __init__(self, timeout=None, sdk_url=None, events_url=None, auth_url=None, telemetry_url=None):
"""
Class constructor.
:param timeout: How many milliseconds to wait until the server responds.
:type timeout: int
:param sdk_url: Optional alternative sdk URL.
:type sdk_url: str
:param events_url: Optional alternative events URL.
:type events_url: str
:param auth_url: Optional alternative auth URL.
:type auth_url: str
:param telemetry_url: Optional alternative telemetry URL.
:type telemetry_url: str
"""
self._timeout = timeout/1000 if timeout else None # Convert ms to seconds.
self._urls = {
'sdk': sdk_url if sdk_url is not None else self.SDK_URL,
'events': events_url if events_url is not None else self.EVENTS_URL,
'auth': auth_url if auth_url is not None else self.AUTH_URL,
'telemetry': telemetry_url if telemetry_url is not None else self.TELEMETRY_URL,
}
def _build_url(self, server, path):
"""
Build URL according to server specified.
:param server: Server for whith the request is being made.
:type server: str
:param path: URL path to be appended to base host.
:type path: str
:return: A fully qualified URL.
:rtype: str
"""
return self._urls[server] + path
@staticmethod
def METHOD_NAME(sdk_key):
"""
Build basic headers with auth.
:param sdk_key: API token used to identify backend calls.
:type sdk_key: str
"""
return {
'Content-Type': 'application/json',
'Authorization': "Bearer %s" % sdk_key
}
def get(self, server, path, sdk_key, query=None, extra_headers=None): # pylint: disable=too-many-arguments
"""
Issue a get request.
:param server: Whether the request is for SDK server, Events server or Auth server.
:typee server: str
:param path: path to append to the host url.
:type path: str
:param sdk_key: sdk key.
:type sdk_key: str
:param query: Query string passed as dictionary.
:type query: dict
:param extra_headers: key/value pairs of possible extra headers.
:type extra_headers: dict
:return: Tuple of status_code & response text
:rtype: HttpResponse
"""
headers = self.METHOD_NAME(sdk_key)
if extra_headers is not None:
headers.update(extra_headers)
try:
response = requests.get(
self._build_url(server, path),
params=query,
headers=headers,
timeout=self._timeout
)
return HttpResponse(response.status_code, response.text)
except Exception as exc: # pylint: disable=broad-except
raise HttpClientException('requests library is throwing exceptions') from exc
def post(self, server, path, sdk_key, body, query=None, extra_headers=None): # pylint: disable=too-many-arguments
"""
Issue a POST request.
:param server: Whether the request is for SDK server or Events server.
:typee server: str
:param path: path to append to the host url.
:type path: str
:param sdk_key: sdk key.
:type sdk_key: str
:param body: body sent in the request.
:type body: str
:param query: Query string passed as dictionary.
:type query: dict
:param extra_headers: key/value pairs of possible extra headers.
:type extra_headers: dict
:return: Tuple of status_code & response text
:rtype: HttpResponse
"""
headers = self.METHOD_NAME(sdk_key)
if extra_headers is not None:
headers.update(extra_headers)
try:
response = requests.post(
self._build_url(server, path),
json=body,
params=query,
headers=headers,
timeout=self._timeout
)
return HttpResponse(response.status_code, response.text)
except Exception as exc: # pylint: disable=broad-except
raise HttpClientException('requests library is throwing exceptions') from exc
|
299,506 |
test instructions
|
# mypy: allow-untyped-defs
from recipe_scrapers.wellplated import WellPlated
from tests import ScraperTest
class TestWellPlatedScraper(ScraperTest):
scraper_class = WellPlated
def test_host(self):
self.assertEqual("wellplated.com", self.harvester_class.host())
def test_title(self):
self.assertEqual("Egg Fried Rice", self.harvester_class.title())
def test_category(self):
self.assertEqual("Main Course", self.harvester_class.category())
def test_author(self):
self.assertEqual("Erin Clarke", self.harvester_class.author())
def test_total_time(self):
self.assertEqual(25, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.wellplated.com/wp-content/uploads/2023/07/Egg-Fried-Rice-Recipe.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"1/4 cup oyster sauce*",
"1 tablespoon soy sauce (I use low-sodium) (plus additional to taste)",
"2 tablespoons unsalted butter (divided, or butter with canola oil spread)",
"3 large eggs (lightly beaten)",
"1 tablespoon canola oil",
"1 large red, yellow, or orange bell pepper (cut into 1/4-inch dice (about 1 1/4 cups))",
"1 bag frozen peas and carrots (thawed (12 ounces))",
"1 cup frozen shelled edamame (thawed (optional, but great for extra protein))",
"2 cloves garlic (minced)",
"2 1/2 cups COLD cooked brown rice (break up large clumps with your fingers)",
"1/2 cup chopped green onions (about 3 medium)",
"Red pepper flakes (Sriracha, or hot sauce of choice (optional))",
],
self.harvester_class.ingredients(),
)
def test_ratings(self):
self.assertEqual(4.96, self.harvester_class.ratings())
def test_cuisine(self):
self.assertEqual("Asian, Chinese", self.harvester_class.cuisine())
def test_description(self):
self.assertEqual(
"How to make easy egg fried rice from scratch that's better than takeout! Simple, healthy ingredients and ready in 15 minutes!",
self.harvester_class.description(),
)
def METHOD_NAME(self):
expected_instructions = "Stir\nIn a small bowl, stir together the oyster sauce and soy sauce. Set aside. Grab a small bowl and a large, flexible rubber spatula, and keep both handy.\nHeat a 12-inch nonstick skillet over medium heat until hot, about 2 minutes. Add 1/2 tablespoon butter and swirl to coat the bottom of the pan. Add the eggs, and cook without stirring until they barely start to set, about 20 seconds. With your spatula, scramble and break the eggs into little, bite-sized pieces. Continue to cook, stirring constantly, until eggs are just cooked through but not yet browned, about 1 additional minute. Transfer eggs to the small bowl and set aside.\nAdd\nReturn the skillet to the heat, and increase the heat to high. Let the skillet warm until it is nice and hot, about 1 minute. Add the canola oil, and swirl to coat. Add the diced bell pepper, and cook until it is crisp-tender, about 4 minutes.\nBrown\nAdd the remaining 1 1/2 tablespoons butter, peas and carrots, and edamame. Cook, stirring constantly, for 30 seconds. Stir in the garlic and cook until fragrant, about 30 seconds (do not let the garlic burn!).\nCook\nAdd the brown rice and the oyster sauce mixture. Continue cooking, stirring constantly and breaking up any remaining rice clumps, until the mixture is heated through, about 3 minutes.\nAdd reserved eggs and green onions. Cook and stir until the mixture is completely heated through, about 1 minute more. Enjoy immediately with a sprinkle of red pepper flakes or dash of hot sauce and additional soy sauce as desired."
self.assertEqual(expected_instructions, self.harvester_class.instructions())
|
299,507 |
get unsupported devices
|
from polychromatic.backends._backend import Backend as Backend
import os
class DummyMatrix(Backend.DeviceItem.Matrix):
"""
A simulated matrix that does absolutely nothing.
"""
def __init__(self, *args):
pass
def set(self, x, y, r, g, b):
pass
def draw(self):
pass
def clear(self):
pass
class DummyDPI(Backend.DeviceItem.DPI):
"""
Simulate DPI tracked using a text file.
"""
def __init__(self, serial):
super().__init__()
self.x = 800
self.y = 1800
self.min = 100
self.max = 16000
self.path = os.path.expanduser(f"~/.cache/polychromatic/test_dpi_{serial}")
# Write initial DPI files
dpi_dir = os.path.dirname(self.path)
if not os.path.exists(dpi_dir):
os.makedirs(dpi_dir)
self.set(self.x, self.y)
def refresh(self):
with open(self.path, "r") as f:
self.x = int(f.readline())
self.y = int(f.readline())
def set(self, x, y):
with open(self.path, "w") as f:
f.write(str(self.x) + "\n")
f.write(str(self.y) + "\n")
class DummyBackend(Backend):
"""
Simulates an imaginary backend with a few devices for testing logic.
Refer to the docstrings for the specifications for each device. There are
some 'unknown devices' too, for a scenario where the device is not supported,
or not set up properly.
"""
def __init__(self, *args):
super().__init__(*args)
self.backend_id = "dummy"
self.logo = "polychromatic.svg"
self.version = "9.9.9"
self.project_url = "https://polychromatic.app"
self.bug_url = "https://github.com/polychromatic/polychromatic/issues"
self.releases_url = "https://github.com/polychromatic/polychromatic/releases"
self.license = "GPLv3"
def init(self):
return True
def METHOD_NAME(self):
unknown_devices = []
for i in range(0, 3):
device = Backend.UnknownDeviceItem()
device.name = "Unknown Device " + str(i)
device.form_factor = self.get_form_factor()
unknown_devices.append(device)
return unknown_devices
def _get_keyboard_device(self):
"""
- 1 Zone
- RGB Support
- Implements every option type/combo:
- None EffectOption
- Static EffectOption with colour only
- Wave EffectOption with parameters only <<<
"""
device = Backend.DeviceItem()
device.name = "Dummy Keyboard"
device.form_factor = self.get_form_factor("keyboard")
device.serial = "DUMMY0001"
device.keyboard_layout = "en_GB"
device.matrix = DummyMatrix()
zone = Backend.DeviceItem.Zone()
zone.zone_id = "main"
zone.label = "Main Zone"
device.zones.append(zone)
class Nothing(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "none"
self.label = "None"
def apply(self):
pass
class Static(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "static"
self.label = "Static"
self.active = True
self.colours_required = 1
self.colours = ["#00FF00"]
def apply(self):
pass
class Wave(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "wave"
self.label = "Wave"
param_1 = Backend.EffectOption.Parameter()
param_1.data = 1
param_1.label = "Left"
param_2 = Backend.EffectOption.Parameter()
param_2.data = 2
param_2.label = "Right"
param_2.active = True
self.parameters = [param_1, param_2]
def apply(self, data):
pass
for option in [Nothing, Static, Wave]:
zone.options.append(option())
return device
def _get_mouse_device(self):
device = Backend.DeviceItem()
device.name = "Dummy Mouse"
device.form_factor = self.get_form_factor("mouse")
device.serial = "DUMMY0002"
device.dpi = DummyDPI(device.serial)
return device
def _get_headset_device(self):
device = Backend.DeviceItem()
device.name = "Dummy Headset"
device.form_factor = self.get_form_factor("headset")
device.serial = "DUMMY0003"
return device
def get_devices(self):
return [
self._get_keyboard_device(),
self._get_mouse_device(),
self._get_headset_device(),
]
def get_device_by_name(self, name):
names = {
"Dummy Keyboard": self._get_keyboard_device,
"Dummy Mouse": self._get_mouse_device,
"Dummy Headset": self._get_headset_device,
}
return names[name]()
def get_device_by_serial(self, serial):
serials = {
"DUMMY0001": self._get_keyboard_device,
"DUMMY0002": self._get_mouse_device,
"DUMMY0003": self._get_headset_device,
}
return serials[serial]()
def troubleshoot(self):
return None
def restart(self):
return True
|
299,508 |
test ensure source per variable multiple sources
|
from unittest import mock
import numpy as np
import pandas as pd
from owid.catalog import DatasetMeta, Origin, Source, Table, TableMeta, VariableMeta
from etl import grapher_helpers as gh
def test_yield_wide_table():
df = pd.DataFrame(
{
"year": [2019, 2020, 2021],
"entity_id": [1, 2, 3],
"_1": [1, 2, 3],
"a__pct": [1, 2, 3],
}
)
table = Table(df.set_index(["entity_id", "year"]))
table._1.metadata.unit = "kg"
table.a__pct.metadata.unit = "pct"
tables = list(gh._yield_wide_table(table))
assert tables[0].reset_index().to_dict(orient="list") == {
"_1": [1, 2, 3],
"entity_id": [1, 2, 3],
"year": [2019, 2020, 2021],
}
assert tables[0].metadata.short_name == "_1"
assert tables[0]["_1"].metadata.unit == "kg"
assert tables[1].reset_index().to_dict(orient="list") == {
"a__pct": [1, 2, 3],
"entity_id": [1, 2, 3],
"year": [2019, 2020, 2021],
}
assert tables[1].metadata.short_name == "a__pct"
assert tables[1]["a__pct"].metadata.unit == "pct"
def test_yield_wide_table_with_dimensions():
df = pd.DataFrame(
{
"year": [2019, 2019, 2019],
"entity_id": [1, 1, 1],
"age": ["10-18", "19-25", "19-25"],
"deaths": [1, 2, 3],
}
)
table = Table(df.set_index(["entity_id", "year", "age"]))
table.deaths.metadata.unit = "people"
table.deaths.metadata.title = "Deaths"
grapher_tables = list(gh._yield_wide_table(table, dim_titles=["Age group"]))
t = grapher_tables[0]
assert t.columns[0] == "deaths__age_10_18"
assert t[t.columns[0]].metadata.title == "Deaths - Age group: 10-18"
t = grapher_tables[1]
assert t.columns[0] == "deaths__age_19_25"
assert t[t.columns[0]].metadata.title == "Deaths - Age group: 19-25"
def test_long_to_wide_tables():
deaths_meta = VariableMeta(title="Deaths", unit="people")
births_meta = VariableMeta(title="Births", unit="people")
long = pd.DataFrame(
{
"year": [2019, 2019, 2019, 2019],
"entity_id": [1, 1, 1, 1],
"variable": ["deaths", "deaths", "births", "births"],
"meta": [deaths_meta, deaths_meta, births_meta, births_meta],
"value": [1, 2, 3, 4],
"sex": ["male", "female", "male", "female"],
}
).set_index(["year", "entity_id", "sex"])
table = Table(long, metadata=TableMeta(dataset=DatasetMeta(sources=[Source()])))
grapher_tables = list(gh.long_to_wide_tables(table))
t = grapher_tables[0]
assert t.index.names == ["year", "entity_id", "sex"]
assert t.columns[0] == "births"
assert t[t.columns[0]].metadata.title == "Births"
t = grapher_tables[1]
assert t.index.names == ["year", "entity_id", "sex"]
assert t.columns[0] == "deaths"
assert t[t.columns[0]].metadata.title == "Deaths"
def test_contains_inf():
assert gh.contains_inf(pd.Series([1, np.inf]))
assert not gh.contains_inf(pd.Series([1, 2]))
assert not gh.contains_inf(pd.Series(["a", 2]))
assert not gh.contains_inf(pd.Series(["a", "b"]))
assert not gh.contains_inf(pd.Series(["a", "b"]).astype("category"))
def METHOD_NAME():
table = Table(
pd.DataFrame(
{
"deaths": [0, 1],
}
)
)
table.metadata.dataset = DatasetMeta(
description="Dataset description", sources=[Source(name="s3", description="s3 description")]
)
table.metadata.description = "Table description"
# multiple sources
table.deaths.metadata.sources = [
Source(name="s1", description="s1 description"),
Source(name="s2", description="s2 description"),
]
new_table = gh._ensure_source_per_variable(table)
assert len(new_table.deaths.metadata.sources) == 1
source = new_table.deaths.metadata.sources[0]
assert source.name == "s1; s2"
assert source.description == "s1 description\ns2 description"
# no sources
table.deaths.metadata.sources = []
new_table = gh._ensure_source_per_variable(table)
assert len(new_table.deaths.metadata.sources) == 1
source = new_table.deaths.metadata.sources[0]
assert source.name == "s3"
assert source.description == "Dataset description\ns3 description"
# sources have no description, but table has
table.deaths.metadata.sources = [Source(name="s1")]
new_table = gh._ensure_source_per_variable(table)
assert len(new_table.deaths.metadata.sources) == 1
source = new_table.deaths.metadata.sources[0]
assert source.name == "s1"
assert source.description == "Table description"
def test_combine_metadata_sources():
sources = [
Source(name="s1", description="s1 description"),
Source(name="s2", description="s2 description"),
]
source = gh.combine_metadata_sources(sources)
assert source.name == "s1; s2"
assert source.description == "s1 description\ns2 description"
# make sure we haven't modified original sources
assert sources[0].name == "s1"
def _sample_table() -> Table:
table = Table(
pd.DataFrame(
{
"deaths": [0, 1],
"year": [2019, 2020],
"country": ["France", "Poland"],
"sex": ["female", "male"],
}
)
)
table.metadata.dataset = DatasetMeta(
description="Dataset description", sources=[Source(name="s3", description="s3 description")]
)
table.metadata.description = "Table description"
return table
def test_adapt_table_for_grapher_multiindex():
with mock.patch("etl.grapher_helpers._get_entities_from_db") as mock_get_entities_from_db:
mock_get_entities_from_db.return_value = {"Poland": 1, "France": 2}
table = _sample_table()
out_table = gh._adapt_table_for_grapher(table)
assert out_table.index.names == ["entity_id", "year"]
assert out_table.columns.tolist() == ["deaths", "sex"]
table = _sample_table().set_index(["country", "year", "sex"])
out_table = gh._adapt_table_for_grapher(table)
assert out_table.index.names == ["entity_id", "year", "sex"]
assert out_table.columns.tolist() == ["deaths"]
table = _sample_table().set_index(["sex"])
out_table = gh._adapt_table_for_grapher(table)
assert out_table.index.names == ["entity_id", "year", "sex"]
assert out_table.columns.tolist() == ["deaths"]
def test_add_dataset_origins_to_variables():
table = _sample_table()
assert table.deaths.metadata.origins == []
origins = [Origin(dataset_title_owid="A")]
table.metadata.dataset = DatasetMeta(origins=origins)
table = gh._add_dataset_origins_to_variables(table)
assert table.deaths.metadata.origins == origins
|
299,509 |
test plus definition
|
import textwrap
import yaml
from collections import OrderedDict
import unittest
from dbt.config.selectors import SelectorDict
from dbt.exceptions import DbtSelectorsError
def get_selector_dict(txt: str) -> OrderedDict:
txt = textwrap.dedent(txt)
dct = OrderedDict(yaml.safe_load(txt))
return dct
class SelectorUnitTest(unittest.TestCase):
def test_compare_cli_non_cli(self):
dct = get_selector_dict(
"""\
selectors:
- name: nightly_diet_snowplow
description: "This uses more CLI-style syntax"
definition:
union:
- intersection:
- '@source:snowplow'
- 'tag:nightly'
- 'models/export'
- exclude:
- intersection:
- 'package:snowplow'
- 'config.materialized:incremental'
- export_performance_timing
- name: nightly_diet_snowplow_full
description: "This is a fuller YAML specification"
definition:
union:
- intersection:
- method: source
value: snowplow
childrens_parents: true
- method: tag
value: nightly
- method: path
value: models/export
- exclude:
- intersection:
- method: package
value: snowplow
- method: config.materialized
value: incremental
- method: fqn
value: export_performance_timing
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
with_strings = sel_dict["nightly_diet_snowplow"]["definition"]
no_strings = sel_dict["nightly_diet_snowplow_full"]["definition"]
self.assertEqual(with_strings, no_strings)
def test_single_string_definition(self):
dct = get_selector_dict(
"""\
selectors:
- name: nightly_selector
definition:
'tag:nightly'
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
expected = {"method": "tag", "value": "nightly"}
definition = sel_dict["nightly_selector"]["definition"]
self.assertEqual(expected, definition)
def test_single_key_value_definition(self):
dct = get_selector_dict(
"""\
selectors:
- name: nightly_selector
definition:
tag: nightly
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
expected = {"method": "tag", "value": "nightly"}
definition = sel_dict["nightly_selector"]["definition"]
self.assertEqual(expected, definition)
def test_parent_definition(self):
dct = get_selector_dict(
"""\
selectors:
- name: kpi_nightly_selector
definition:
'+exposure:kpi_nightly'
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
expected = {"method": "exposure", "value": "kpi_nightly", "parents": True}
definition = sel_dict["kpi_nightly_selector"]["definition"]
self.assertEqual(expected, definition)
def METHOD_NAME(self):
dct = get_selector_dict(
"""\
selectors:
- name: my_model_children_selector
definition:
'my_model+2'
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
expected = {"method": "fqn", "value": "my_model", "children": True, "children_depth": "2"}
definition = sel_dict["my_model_children_selector"]["definition"]
self.assertEqual(expected, definition)
def test_selector_definition(self):
dct = get_selector_dict(
"""\
selectors:
- name: default
definition:
union:
- intersection:
- tag: foo
- tag: bar
- name: inherited
definition:
method: selector
value: default
"""
)
sel_dict = SelectorDict.parse_from_selectors_list(dct["selectors"])
assert sel_dict
definition = sel_dict["default"]["definition"]
expected = sel_dict["inherited"]["definition"]
self.assertEqual(expected, definition)
def test_selector_definition_with_exclusion(self):
dct = get_selector_dict(
"""\
selectors:
- name: default
definition:
union:
- intersection:
- tag: foo
- tag: bar
- name: inherited
definition:
union:
- method: selector
value: default
- exclude:
- tag: bar
- name: comparison
definition:
union:
- union:
- intersection:
- tag: foo
- tag: bar
- exclude:
- tag: bar
"""
)
sel_dict = SelectorDict.parse_from_selectors_list((dct["selectors"]))
assert sel_dict
definition = sel_dict["inherited"]["definition"]
expected = sel_dict["comparison"]["definition"]
self.assertEqual(expected, definition)
def test_missing_selector(self):
dct = get_selector_dict(
"""\
selectors:
- name: inherited
definition:
method: selector
value: default
"""
)
with self.assertRaises(DbtSelectorsError) as err:
SelectorDict.parse_from_selectors_list((dct["selectors"]))
self.assertEqual(
"Existing selector definition for default not found.", str(err.exception.msg)
)
|
299,510 |
report
|
from datetime import timedelta
from socket import gethostbyname
from time import sleep
from django.conf import settings
from django.core.mail import get_connection, mail_admins
from django.core.management import BaseCommand
from django.utils import timezone
import dns.exception, dns.message, dns.query, dns.rdatatype
from desecapi import pdns
from desecapi.models import Domain
def query_serial(zone, server):
"""
Checks a zone's serial on a server.
:return: serial if received; None if the server did not know; False on error
"""
query = dns.message.make_query(zone, "SOA")
try:
response = dns.query.tcp(query, server, timeout=5)
except dns.exception.Timeout:
return False
for rrset in response.answer:
if rrset.rdtype == dns.rdatatype.SOA:
return int(rrset[0].serial)
return None
class Command(BaseCommand):
help = "Check secondaries for consistency with nsmaster."
def __init__(self, *args, **kwargs):
self.servers = {
gethostbyname(server): server for server in settings.WATCHDOG_SECONDARIES
}
super().__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument(
"domain-name",
nargs="*",
help="Domain name to check. If omitted, will check all recently published domains.",
)
parser.add_argument(
"--delay",
type=int,
default=120,
help="Delay SOA checks to allow pending AXFRs to finish.",
)
parser.add_argument(
"--window",
type=int,
default=settings.WATCHDOG_WINDOW_SEC,
help="Check domains that were published no longer than this many seconds ago.",
)
def find_outdated_servers(self, zone, local_serial):
"""
Returns a dict, the key being the outdated secondary name, and the value being the node's current zone serial.
"""
outdated = {}
for server in self.servers:
remote_serial = query_serial(zone, server)
if not remote_serial or remote_serial < local_serial:
outdated[self.servers[server]] = remote_serial
return outdated
def handle(self, *args, **options):
threshold = timezone.now() - timedelta(seconds=options["window"])
recent_domain_names = Domain.objects.filter(
published__gt=threshold
).values_list("name", flat=True)
serials = {
zone: s
for zone, s in pdns.get_serials().items()
if zone.rstrip(".") in recent_domain_names
}
if options["domain-name"]:
serials = {
zone: serial
for zone, serial in serials.items()
if zone.rstrip(".") in options["domain-name"]
}
print(
"Sleeping for {} seconds before checking {} domains ...".format(
options["delay"], len(serials)
)
)
sleep(options["delay"])
outdated_zone_count = 0
outdated_secondaries = set()
output = []
timeouts = {}
for zone, local_serial in serials.items():
outdated_serials = self.find_outdated_servers(zone, local_serial)
for server, serial in outdated_serials.items():
if serial is False:
timeouts.setdefault(server, [])
timeouts[server].append(zone)
outdated_serials = {
k: serial
for k, serial in outdated_serials.items()
if serial is not False
}
if outdated_serials:
outdated_secondaries.update(outdated_serials.keys())
output.append(
f"{zone} ({local_serial}) is outdated on {outdated_serials}"
)
print(output[-1])
outdated_zone_count += 1
else:
print(f"{zone} ok")
output.append(
f"Checked {len(serials)} domains, {outdated_zone_count} were outdated."
)
print(output[-1])
self.METHOD_NAME(outdated_secondaries, output, timeouts)
def METHOD_NAME(self, outdated_secondaries, output, timeouts):
if not outdated_secondaries and not timeouts:
return
subject = f'{timeouts and "CRITICAL ALERT" or "ALERT"} {len(outdated_secondaries)} secondaries out of sync'
message = ""
if timeouts:
message += f"The following servers had timeouts:\n\n{timeouts}\n\n"
if outdated_secondaries:
message += f"The following {len(outdated_secondaries)} secondaries are out of sync:\n"
for outdated_secondary in outdated_secondaries:
message += f"* {outdated_secondary}\n"
message += "\n"
message += f"Current secondary IPs: {self.servers}\n"
message += "\n".join(output)
mail_admins(
subject,
message,
connection=get_connection("django.core.mail.backends.smtp.EmailBackend"),
)
|
299,511 |
call async
|
"""ZEO Protocol.
A ZEO protocol instance can be used as a connection.
It exchanges ``bytes`` messages.
Messages are sent via the methods
``write_message`` (send a single message) and
``write_message_iter`` (send the messages generated by an iterator).
Received messages are reported via callbacks.
Messages are received in the same order as they have been written;
especially, the messages wrote with ``write_message_iter``
are received as contiguous messages.
The first message transmits the protocol version.
Its callback is ``finish_connection``.
The first byte of the protocol version message identifies
an encoding type; the remaining bytes specify the version.
``finish_connection`` is expected to set up
methods ``encode`` and ``decode`` corresponding to the
encoding type.
Followup messages carry encoded tuples
*msgid*, *async_flag*, *name*, *args*
representing either calls (synchronous or asynchronous) or replies.
Their callback is ``message_received``.
ZEO protocol instances can be used concurrently from coroutines (executed
in the same thread).
They are not thread safe.
The ZEO protocol sits on top of a sized message protocol.
The ZEO protocol has client and server variants.
"""
from asyncio import Protocol
import logging
from .smp import SizedMessageProtocol
logger = logging.getLogger(__name__)
class ZEOBaseProtocol(Protocol):
"""ZEO protocol base class for the common features."""
protocol_version = None
def __init__(self, loop, name):
self.loop = loop
self.name = name
# API -- defined in ``connection_made``
# write_message(message)
# write_message_iter(message_iter)
def METHOD_NAME(self, method, args):
"""call method named *method* asynchronously with *args*."""
self.write_message(self.encode(0, True, method, args))
def call_async_iter(self, it):
self.write_message_iter(self.encode(0, True, method, args)
for method, args in it)
def get_peername(self):
return self.sm_protocol.transport.get_extra_info('peername')
def protocol_factory(self):
return self
closing = None # ``None`` or closed future
sm_protocol = None
def close(self):
"""schedule closing, return closed future."""
# with ``asyncio``, ``close`` only schedules the closing;
# close completion is signalled via a call to ``connection_lost``.
closing = self.closing
if closing is None:
closing = self.closing = self.loop.create_future()
# can get closed before ``sm_protocol`` set up
if self.sm_protocol is not None:
# will eventually cause ``connection_lost``
self.sm_protocol.close()
else:
closing.set_result(True)
elif self.sm_protocol is not None:
self.sm_protocol.close() # no problem if repeated
return closing
def __repr__(self):
cls = self.__class__
return f'{cls.__module__}.{cls.__name__}({self.name})'
# to be defined by deriving classes
# def finish_connection(protocol_version_message)
# def message_received(message)
# ``Protocol`` responsibilities -- defined in ``connection_made``
# data_received
# eof_received
# pause_writing
# resume_writing
def connection_made(self, transport):
logger.info("Connected %s", self)
# set up lower level sized message protocol
# creates reference cycle
smp = self.sm_protocol = SizedMessageProtocol(self._first_message)
smp.connection_made(transport) # takes over ``transport``
self.data_received = smp.data_received
self.eof_received = smp.eof_received
self.pause_writing = smp.pause_writing
self.resume_writing = smp.resume_writing
self.write_message = smp.write_message
self.write_message_iter = smp.write_message_iter
# In real life ``connection_lost`` is only called by
# the transport and ``asyncio.Protocol`` guarantees that
# it is called exactly once (if ``connection_made`` has
# been called) or not at all.
# Some tests, however, call ``connection_lost`` themselves.
# The following attribute helps to ensure that ``connection_lost``
# is called exactly once.
connection_lost_called = False
def connection_lost(self, exc):
"""The call signals close completion."""
self.connection_lost_called = True
self.sm_protocol.connection_lost(exc)
closing = self.closing
if closing is None:
closing = self.closing = self.loop.create_future()
if not closing.done():
closing.set_result(True)
# internal
def _first_message(self, protocol_version):
self.sm_protocol.set_receive(self.message_received)
self.finish_connection(protocol_version)
# ``uvloop`` workaround
# We define ``data_received`` in ``connection_made``.
# ``uvloop``, however, caches ``protocol.data_received`` before
# it calls ``connection_made`` - at a consequence, data is not
# received
# The method below is overridden in ``connection_made``.
def data_received(self, data):
self.data_received(data) # not an infinite loop, because overridden
|
299,512 |
transform
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import collect_set, it, replace
from lale.lib.dataframe import count, get_columns
from lale.lib.sklearn import ordinal_encoder
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _OrdinalEncoderMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, categories_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.categories_ = categories_
def combine(self, other: "_OrdinalEncoderMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
assert len(self.categories_) == len(other.categories_)
combined_categories = [
np.sort(
np.unique(np.concatenate([self.categories_[i], other.categories_[i]]))
)
for i in range(len(self.categories_))
]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=self.feature_names_in_,
categories_=combined_categories,
)
class _OrdinalEncoderImpl(MonoidableOperator[_OrdinalEncoderMonoid]):
def __init__(
self,
*,
categories="auto",
dtype="float64",
handle_unknown="error",
unknown_value=None,
):
self._hyperparams = {
"categories": categories,
"dtype": dtype,
"handle_unknown": handle_unknown,
"unknown_value": unknown_value,
}
def METHOD_NAME(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.METHOD_NAME(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def categories_(self):
return getattr(self._monoid, "categories_", None)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _OrdinalEncoderMonoid):
self._monoid = monoid
self.n_features_in_ = len(monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
assert self._monoid is not None
result = Map(
columns={
col_name: replace(
it[col_name],
{
cat_value: cat_idx
for cat_idx, cat_value in enumerate(
self._monoid.categories_[col_idx]
)
},
handle_unknown="use_encoded_value",
unknown_value=self._hyperparams["unknown_value"],
)
for col_idx, col_name in enumerate(self._monoid.feature_names_in_)
}
)
return result
def to_monoid(self, batch: Tuple[Any, Any]):
hyperparams = self._hyperparams
X, _ = batch
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
if hyperparams["categories"] == "auto":
agg_op = Aggregate(
columns={c: collect_set(it[c]) for c in feature_names_in_}
)
agg_data = agg_op.METHOD_NAME(X)
if lale.helpers._is_spark_df(agg_data):
agg_data = agg_data.toPandas()
categories_ = [np.sort(agg_data.loc[0, c]) for c in feature_names_in_]
else:
categories_ = hyperparams["categories"]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
categories_=categories_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `OrdinalEncoder`_ transformer that encodes categorical features as numbers.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`OrdinalEncoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.ordinal_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": ordinal_encoder._hyperparams_schema,
"input_fit": ordinal_encoder._input_fit_schema,
"input_transform": ordinal_encoder._input_transform_schema,
"output_transform": ordinal_encoder._output_transform_schema,
},
}
OrdinalEncoder = lale.operators.make_operator(_OrdinalEncoderImpl, _combined_schemas)
OrdinalEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
OrdinalEncoder.customize_schema(
encode_unknown_with=None,
dtype={
"enum": ["float64"],
"description": "This implementation only supports `dtype='float64'`.",
"default": "float64",
},
handle_unknown={
"enum": ["use_encoded_value"],
"description": "This implementation only supports `handle_unknown='use_encoded_value'`.",
"default": "use_encoded_value",
},
unknown_value={
"anyOf": [
{"type": "integer"},
{"enum": [np.nan, None]},
],
"description": "The encoded value of unknown categories to use when `handle_unknown='use_encoded_value'`. It has to be distinct from the values used to encode any of the categories in fit. If set to np.nan, the dtype hyperparameter must be a float dtype.",
},
),
)
lale.docstrings.set_docstrings(OrdinalEncoder)
|
299,513 |
check swift
|
"""
Swift utility class
===================
Author: Anthony Stanton <[email protected]>
"""
import logging
import sys
from errno import EEXIST
from os import makedirs
from os.path import dirname, isdir
import salt.utils.files
# Get logging started
log = logging.getLogger(__name__)
HAS_SWIFT = False
try:
from swiftclient import client
HAS_SWIFT = True
except ImportError:
pass
def METHOD_NAME():
return HAS_SWIFT
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
# we've been playing fast and loose with kwargs, but the swiftclient isn't
# going to accept any old thing
def _sanitize(kwargs):
variables = (
"user",
"key",
"authurl",
"retries",
"preauthurl",
"preauthtoken",
"snet",
"starting_backoff",
"max_backoff",
"tenant_name",
"os_options",
"auth_version",
"cacert",
"insecure",
"ssl_compression",
)
ret = {}
for var in kwargs:
if var in variables:
ret[var] = kwargs[var]
return ret
class SaltSwift:
"""
Class for all swiftclient functions
"""
def __init__(
self, user, tenant_name, auth_url, password=None, auth_version=2, **kwargs
):
"""
Set up openstack credentials
"""
if not HAS_SWIFT:
log.error(
"Error:: unable to find swiftclient. Try installing it from the"
" appropriate repository."
)
return None
self.kwargs = kwargs.copy()
self.kwargs["user"] = user
self.kwargs["password"] = password
self.kwargs["tenant_name"] = tenant_name
self.kwargs["authurl"] = auth_url
self.kwargs["auth_version"] = auth_version
if "key" not in self.kwargs:
self.kwargs["key"] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
"""
List Swift containers
"""
try:
listing = self.conn.get_account()
return listing
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def get_container(self, cont):
"""
List files in a Swift container
"""
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def put_container(self, cont):
"""
Create a new Swift container
"""
try:
self.conn.put_container(cont)
return True
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def delete_container(self, cont):
"""
Delete a Swift container
"""
try:
self.conn.delete_container(cont)
return True
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def post_container(self, cont, metadata=None):
"""
Update container metadata
"""
def head_container(self, cont):
"""
Get container metadata
"""
def get_object(self, cont, obj, local_file=None, return_bin=False):
"""
Retrieve a file from Swift
"""
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
# pylint: disable=resource-leakage
fp = salt.utils.files.fopen(local_file, "wb")
# pylint: enable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def put_object(self, cont, obj, local_file):
"""
Upload a file to Swift
"""
try:
with salt.utils.files.fopen(local_file, "rb") as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def delete_object(self, cont, obj):
"""
Delete a file from Swift
"""
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc: # pylint: disable=broad-except
log.error("There was an error::")
if hasattr(exc, "code") and hasattr(exc, "msg"):
log.error(" Code: %s: %s", exc.code, exc.msg)
log.error(" Content: \n%s", getattr(exc, "read", lambda: str(exc))())
return False
def head_object(self, cont, obj):
"""
Get object metadata
"""
def post_object(self, cont, obj, metadata):
"""
Update object metadata
"""
|
299,514 |
push log capturer
|
import abc
import colorlog
import contextlib
import inspect
import io
import logging
import sys
import traceback
from magma.backend.util import make_relative
from magma.common import Stack
from magma.config import config, EnvConfig
config._register(
log_stream=EnvConfig("MAGMA_LOG_STREAM", "stderr"),
log_level=EnvConfig("MAGMA_LOG_LEVEL", "INFO"),
include_traceback=EnvConfig("MAGMA_INCLUDE_WIRE_TRACEBACK", False, bool),
traceback_limit=EnvConfig("MAGMA_ERROR_TRACEBACK_LIMIT", 5, int),
)
_staged_logs_stack = Stack()
_log_capturer_stack = Stack()
def _make_bold(string):
return f"\033[1m{string}\033[0m"
def _get_source_line(filename, lineno):
with open(filename, "r") as f:
return f.readlines()[lineno - 1]
def _attach_debug_info(msg, debug_info):
file = debug_info.filename
line = debug_info.lineno
line_info = _make_bold(f"{make_relative(file)}:{line}")
msg = f"{line_info}: {msg}"
try:
source = _get_source_line(file, line).rstrip("\n")
source = f">> {source}"
except FileNotFoundError:
source = f"(Could not file file {file})"
msg = f"{msg}\n{source}"
return msg
def _attach_traceback(msg, frame_selector, limit):
"""
Attaches traceback string to @msg and returns new string.
@frame_selector is a function which takes a list of stack frames and selects
one. For example, it could select the frame based on an index, or based on
the function names.
"""
frame = frame_selector(inspect.stack()).frame
with io.StringIO() as io_:
traceback.print_stack(f=frame, limit=limit, file=io_)
tb = io_.getvalue()
msg = f"{msg}\n{tb}"
return msg
def _frame_selector(frames):
return frames[3]
def _get_additional_kwarg(kwargs, key):
try:
value = kwargs.pop(key)
return value
except KeyError:
return None
def get_staged_logs_stack() -> Stack:
global _staged_logs_stack
return _staged_logs_stack
class _MagmaLogger(logging.Logger):
"""
Derivative of logging.Logger class, with two additional keyword args:
* 'debug_info': Tuple of (file_name, line_no). If 'debug_info' is included,
this source-level information is logged along with the message.
* 'include_traceback': If True, a traceback is printed along with the
message.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._raw = False
@property
def raw(self) -> bool:
return self._raw
@raw.setter
def raw(self, raw: bool):
self._raw = raw
@contextlib.contextmanager
def as_raw(self):
prev_raw = self.raw
self.raw = True
try:
yield self
finally:
self.raw = prev_raw
def _log(self, level, msg, args, **kwargs):
if not self.raw and self._staged_log(level, msg, args, **kwargs):
return
self._raw_log(level, msg, args, **kwargs)
def _staged_log(self, level, msg, args, **kwargs) -> bool:
staged_logs_stack = get_staged_logs_stack()
try:
staged_logs = staged_logs_stack.peek()
except IndexError:
return False
staged_logs.append((self, level, msg, args, kwargs))
return True
def _capture_log(self, level, msg, args, **kwargs):
try:
log_capturer = get_log_capturer()
except IndexError:
return
log_capturer.add_log((level, msg, args, kwargs))
def _raw_log(self, level, msg, args, **kwargs):
debug_info = _get_additional_kwarg(kwargs, "debug_info")
if debug_info:
msg = _attach_debug_info(msg, debug_info)
include_traceback = _get_additional_kwarg(kwargs, "include_traceback")
if include_traceback or config.include_traceback:
msg = _attach_traceback(
msg, _frame_selector, config.traceback_limit)
self._capture_log(level, msg, args, **kwargs)
super()._log(level, msg, args, **kwargs)
# Set logging class to _MagmaLogger to override logging behavior. Also, setup
# root logger parameters.
logging.setLoggerClass(_MagmaLogger)
_log_stream = getattr(sys, config.log_stream)
_root_logger = logging.getLogger("magma")
_handler = colorlog.StreamHandler(_log_stream)
_handler.setFormatter(colorlog.ColoredFormatter(
'%(log_color)s%(levelname)s%(reset)s:%(name)s:%(message)s'))
_root_logger.addHandler(_handler)
_root_logger.setLevel(config.log_level)
def root_logger():
return logging.getLogger("magma")
# NOTE(rsetaluri): For some reason the following code which uses
# contextlib.contextmanager results in the context manager being entered into
# twice. It may be cached somewhere in the pipeline.
#
# @contextlib.contextmanager
# def logging_level(level):
# root = root_logger()
# prev_level = root.level
# root.setLevel(level)
# try:
# yield
# finally:
# root.setLevel(prev_level)
class logging_level:
def __init__(self, level):
self.level = level
self.root = root_logger()
def __enter__(self):
self.prev_level = self.root.level
self.root.setLevel(self.level)
def __exit__(self, *_):
self.root.setLevel(self.prev_level)
def stage_logger():
get_staged_logs_stack().push([])
def _flush(staged_logs):
for logger, level, obj, args, kwargs in staged_logs:
with logger.as_raw():
logger.log(level, obj, *args, **kwargs)
def flush():
staged_logs = get_staged_logs_stack().pop()
_flush(staged_logs)
return staged_logs
def unstage_logger():
return flush()
def flush_all():
staged_logs_stack = get_staged_logs_stack()
while staged_logs_stack:
staged_logs = staged_logs_stack.pop()
_flush(staged_logs)
@contextlib.contextmanager
def staged_logs():
stage_logger()
staged_logs = get_staged_logs_stack().peek()
try:
yield staged_logs
finally:
unstage_logger()
class StagedLogRecord(abc.ABC):
def __init__(self, tpl: str):
self._tpl = tpl
@abc.abstractmethod
def args(self):
raise NotImplementedError()
def __str__(self):
return self._tpl.format(*self.args())
def _get_log_capturer_stack() -> Stack:
global _log_capturer_stack
return _log_capturer_stack
def METHOD_NAME(log_capturer):
_get_log_capturer_stack().push(log_capturer)
def pop_log_capturer():
_get_log_capturer_stack().pop()
def get_log_capturer():
return _get_log_capturer_stack().peek()
@contextlib.contextmanager
def capture_logs(log_capturer):
METHOD_NAME(log_capturer)
try:
yield
finally:
pop_log_capturer()
|
299,515 |
check hashmap values
|
#!/usr/bin/env python3
#
# USAGE: test_map_batch_ops.py
#
# Copyright (c) Emilien Gobillot
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from unittest import main, skipUnless, TestCase
from utils import kernel_version_ge
from bcc import BPF
import os
import ctypes as ct
@skipUnless(kernel_version_ge(5, 6), "requires kernel >= 5.6")
class TestMapBatch(TestCase):
MAPSIZE = 1024
SUBSET_SIZE = 32
def fill_hashmap(self):
b = BPF(text=b"""BPF_HASH(map, int, int, %d);""" % self.MAPSIZE)
hmap = b[b"map"]
for i in range(0, self.MAPSIZE):
hmap[ct.c_int(i)] = ct.c_int(i)
return hmap
def prepare_keys_subset(self, hmap, count=None):
if not count:
count = self.SUBSET_SIZE
keys = (hmap.Key * count)()
i = 0
for k, _ in sorted(hmap.items_lookup_batch(), key=lambda k:k[0].value):
if i < count:
keys[i] = k.value
i += 1
else:
break
return keys
def prepare_values_subset(self, hmap, count=None):
if not count:
count = self.SUBSET_SIZE
values = (hmap.Leaf * count)()
i = 0
for _, v in sorted(hmap.items_lookup_batch(), key=lambda k:k[0].value):
if i < count:
values[i] = v.value * v.value
i += 1
else:
break
return values
def METHOD_NAME(self, it):
i = 0
for k, v in sorted(it, key=lambda kv:kv[0].value):
self.assertEqual(k.value, i)
self.assertEqual(v.value, i)
i += 1
return i
def test_lookup_and_delete_batch_all_keys(self):
# fill the hashmap
hmap = self.fill_hashmap()
# check values and count them
count = self.METHOD_NAME(hmap.items_lookup_and_delete_batch())
self.assertEqual(count, self.MAPSIZE)
# and check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
self.assertEqual(count, 0)
def test_lookup_batch_all_keys(self):
# fill the hashmap
hmap = self.fill_hashmap()
# check values and count them
count = self.METHOD_NAME(hmap.items_lookup_batch())
self.assertEqual(count, self.MAPSIZE)
def test_delete_batch_all_keys(self):
# Delete all key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
hmap.items_delete_batch()
# check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
self.assertEqual(count, 0)
def test_delete_batch_subset(self):
# Delete only a subset of key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
keys = self.prepare_keys_subset(hmap)
hmap.items_delete_batch(keys)
# check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
self.assertEqual(count, self.MAPSIZE - self.SUBSET_SIZE)
def test_update_batch_all_keys(self):
hmap = self.fill_hashmap()
# preparing keys and new values arrays
keys = (hmap.Key * self.MAPSIZE)()
new_values = (hmap.Leaf * self.MAPSIZE)()
for i in range(self.MAPSIZE):
keys[i] = ct.c_int(i)
new_values[i] = ct.c_int(-1)
hmap.items_update_batch(keys, new_values)
# check the update has worked, i.e sum of values is -NUM_KEYS
count = sum(v.value for v in hmap.values())
self.assertEqual(count, -1*self.MAPSIZE)
def test_update_batch_subset(self):
# fill the hashmap
hmap = self.fill_hashmap()
keys = self.prepare_keys_subset(hmap, count=self.SUBSET_SIZE)
new_values = self.prepare_values_subset(hmap, count=self.SUBSET_SIZE)
hmap.items_update_batch(keys, new_values)
# check all the values in the map
# the first self.SUBSET_SIZE keys follow this rule value = keys * keys
# the remaning keys follow this rule : value = keys
i = 0
for k, v in sorted(hmap.items_lookup_batch(),
key=lambda kv:kv[0].value):
if i < self.SUBSET_SIZE:
# values are the square of the keys
self.assertEqual(v.value, k.value * k.value)
i += 1
else:
# values = keys
self.assertEqual(v.value, k.value)
self.assertEqual(i, self.SUBSET_SIZE)
if __name__ == "__main__":
main()
|
299,516 |
test gaussian dyn
|
import numpy as np
from numpy import linalg
from utils import get_rstate, get_printing
import dynesty # noqa
import dynesty.pool as dypool
from dynesty import utils as dyfunc # noqa
"""
Run a series of basic tests to check whether anything huge is broken.
"""
nlive = 500
printing = get_printing()
# GAUSSIAN TEST
class Gaussian:
def __init__(self, corr=.95, prior_win=10):
self.ndim = 3
self.mean = np.linspace(-1, 1, self.ndim)
self.cov = np.identity(self.ndim) # set covariance to identity matrix
self.cov[self.cov ==
0] = corr # set off-diagonal terms (strongly correlated)
self.cov_inv = linalg.inv(self.cov) # precision matrix
self.lnorm = -0.5 * (np.log(2 * np.pi) * self.ndim +
np.log(linalg.det(self.cov)))
self.prior_win = prior_win # +/- on both sides
self.logz_truth = self.ndim * (-np.log(2 * self.prior_win))
# 3-D correlated multivariate normal log-likelihood
def loglikelihood(self, x):
"""Multivariate normal log-likelihood."""
ret = -0.5 * np.dot(
(x - self.mean), np.dot(self.cov_inv,
(x - self.mean))) + self.lnorm
# notice here we overwrite the input array just to test
# that this is not a problem
x[:] = -np.ones(len(x))
return ret
# 3-D correlated multivariate normal log-likelihood
def loglikelihood_with_blob(self, x):
"""Multivariate normal log-likelihood."""
ret = -0.5 * np.dot(
(x - self.mean), np.dot(self.cov_inv,
(x - self.mean))) + self.lnorm
# notice here we overwrite the input array just to test
# that this is not a problem
blob = x * 1
x[:] = -np.ones(len(x))
return ret, blob
# prior transform
def prior_transform(self, u):
"""Flat prior between -10. and 10."""
ret = self.prior_win * (2. * u - 1.)
# notice here we overwrite the input array just to test
# that this is not a problem
u[:] = -np.ones(len(u))
return ret
def test_gaussian():
rstate = get_rstate()
g = Gaussian()
sampler = 'rslice' # doing this sampler for static
# unifor for dynamic
sampler = dynesty.NestedSampler(g.loglikelihood_with_blob,
g.prior_transform,
g.ndim,
nlive=nlive,
rstate=rstate,
sample=sampler,
blob=True)
sampler.run_nested(print_progress=printing)
res = sampler.results
assert res['blob'].shape == (len(res['samples']), 3)
assert np.all(res['blob'] == res['samples'])
def test_gaussian_livepts():
# test we can provide starting points while using blobs
rstate = get_rstate()
g = Gaussian()
sampler = 'rslice' # doing this sampler for static
# unifor for dynamic
us = rstate.uniform(size=(nlive, g.ndim))
# note multiplication by 1 because our functions overwrite inputs
vs = [g.prior_transform(1 * u) for u in us]
lbs = [g.loglikelihood_with_blob(1 * v) for v in vs]
logls = [_[0] for _ in lbs]
blobs = [_[1] for _ in lbs]
sampler = dynesty.NestedSampler(g.loglikelihood_with_blob,
g.prior_transform,
g.ndim,
nlive=nlive,
rstate=rstate,
sample=sampler,
blob=True,
live_points=[us, vs, logls, blobs])
sampler.run_nested(print_progress=printing)
res = sampler.results
assert res['blob'].shape == (len(res['samples']), 3)
assert np.all(res['blob'] == res['samples'])
def test_gaussian_pool():
rstate = get_rstate()
g = Gaussian()
with dypool.Pool(2, g.loglikelihood_with_blob, g.prior_transform) as pool:
sampler = dynesty.NestedSampler(pool.loglike,
pool.prior_transform,
g.ndim,
nlive=nlive,
rstate=rstate,
blob=True,
pool=pool)
sampler.run_nested(print_progress=printing)
res = sampler.results
assert res['blob'].shape == (len(res['samples']), 3)
assert np.all(res['blob'] == res['samples'])
def METHOD_NAME():
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.DynamicNestedSampler(g.loglikelihood_with_blob,
g.prior_transform,
g.ndim,
nlive=nlive,
rstate=rstate,
blob=True)
sampler.run_nested(print_progress=printing, dlogz_init=1)
res = sampler.results
assert res['blob'].shape == (len(res['samples']), 3)
assert np.all(res['blob'] == res['samples'])
|
299,517 |
get motion model
|
"""
A* grid based planning
author: Nikos Kanargias ([email protected])
See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
"""
import heapq
import math
import matplotlib.pyplot as plt
show_animation = False
class Node:
def __init__(self, x, y, cost, parent_index):
self.x = x
self.y = y
self.cost = cost
self.parent_index = parent_index
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.parent_index)
def calc_final_path(goal_node, closed_node_set, resolution):
# generate final course
rx, ry = [goal_node.x * resolution], [goal_node.y * resolution]
parent_index = goal_node.parent_index
while parent_index != -1:
n = closed_node_set[parent_index]
rx.append(n.x * resolution)
ry.append(n.y * resolution)
parent_index = n.parent_index
return rx, ry
def calc_distance_heuristic(gx, gy, ox, oy, resolution, rr):
"""
gx: goal x position [m]
gx: goal x position [m]
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
resolution: grid resolution [m]
rr: robot radius[m]
"""
goal_node = Node(round(gx / resolution), round(gy / resolution), 0.0, -1)
ox = [iox / resolution for iox in ox]
oy = [ioy / resolution for ioy in oy]
obstacle_map, min_x, min_y, max_x, max_y, x_w, y_w = calc_obstacle_map(
ox, oy, resolution, rr)
motion = METHOD_NAME()
open_set, closed_set = dict(), dict()
open_set[calc_index(goal_node, x_w, min_x, min_y)] = goal_node
priority_queue = [(0, calc_index(goal_node, x_w, min_x, min_y))]
while True:
if not priority_queue:
break
cost, c_id = heapq.heappop(priority_queue)
if c_id in open_set:
current = open_set[c_id]
closed_set[c_id] = current
open_set.pop(c_id)
else:
continue
# show graph
if show_animation: # pragma: no cover
plt.plot(current.x * resolution, current.y * resolution, "xc")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if len(closed_set.keys()) % 10 == 0:
plt.pause(0.001)
# Remove the item from the open set
# expand search grid based on motion model
for i, _ in enumerate(motion):
node = Node(current.x + motion[i][0],
current.y + motion[i][1],
current.cost + motion[i][2], c_id)
n_id = calc_index(node, x_w, min_x, min_y)
if n_id in closed_set:
continue
if not verify_node(node, obstacle_map, min_x, min_y, max_x, max_y):
continue
if n_id not in open_set:
open_set[n_id] = node # Discover a new node
heapq.heappush(
priority_queue,
(node.cost, calc_index(node, x_w, min_x, min_y)))
else:
if open_set[n_id].cost >= node.cost:
# This path is the best until now. record it!
open_set[n_id] = node
heapq.heappush(
priority_queue,
(node.cost, calc_index(node, x_w, min_x, min_y)))
return closed_set
def verify_node(node, obstacle_map, min_x, min_y, max_x, max_y):
if node.x < min_x:
return False
elif node.y < min_y:
return False
elif node.x >= max_x:
return False
elif node.y >= max_y:
return False
if obstacle_map[node.x][node.y]:
return False
return True
def calc_obstacle_map(ox, oy, resolution, vr):
min_x = round(min(ox))
min_y = round(min(oy))
max_x = round(max(ox))
max_y = round(max(oy))
x_width = round(max_x - min_x)
y_width = round(max_y - min_y)
# obstacle map generation
obstacle_map = [[False for _ in range(y_width)] for _ in range(x_width)]
for ix in range(x_width):
x = ix + min_x
for iy in range(y_width):
y = iy + min_y
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.hypot(iox - x, ioy - y)
if d <= vr / resolution:
obstacle_map[ix][iy] = True
break
return obstacle_map, min_x, min_y, max_x, max_y, x_width, y_width
def calc_index(node, x_width, x_min, y_min):
return (node.y - y_min) * x_width + (node.x - x_min)
def METHOD_NAME():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
|
299,518 |
reset
|
import logging
import time
import uuid
from dataclasses import dataclass, field
from PyQt5 import uic
from tribler.core.components.metadata_store.db.serialization import REGULAR_TORRENT
from tribler.core.utilities.utilities import Query, to_fts_query
from tribler.gui.network.request_manager import request_manager
from tribler.gui.sentry_mixin import AddBreadcrumbOnShowMixin
from tribler.gui.utilities import connect, get_ui_file_path, tr
from tribler.gui.widgets.tablecontentmodel import SearchResultsModel
widget_form, widget_class = uic.loadUiType(get_ui_file_path('search_results.ui'))
def format_search_loading_label(search_request):
data = {
"total_peers": len(search_request.peers),
"num_complete_peers": len(search_request.peers_complete),
"num_remote_results": len(search_request.remote_results),
}
return (
tr(
"Remote responses: %(num_complete_peers)i / %(total_peers)i"
"\nNew remote results received: %(num_remote_results)i"
)
% data
)
@dataclass
class SearchRequest:
uuid: uuid
query: Query
peers: set
peers_complete: set = field(default_factory=set)
remote_results: list = field(default_factory=list)
@property
def complete(self):
return self.peers == self.peers_complete
class SearchResultsWidget(AddBreadcrumbOnShowMixin, widget_form, widget_class):
def __init__(self, parent=None):
widget_class.__init__(self, parent=parent)
self._logger = logging.getLogger(self.__class__.__name__)
try:
self.setupUi(self)
except SystemError:
pass
self.last_search_time = None
self.last_search_query = None
self.hide_xxx = None
self.search_request = None
connect(self.results_page_content.model_query_completed, self.on_local_query_completed)
connect(self.search_progress_bar.ready_to_update_results, self.on_ready_to_update_results)
def initialize(self, hide_xxx=False):
self.hide_xxx = hide_xxx
self.results_page_content.initialize_content_page(hide_xxx=hide_xxx)
self.results_page_content.channel_torrents_filter_input.setHidden(True)
@property
def has_results(self):
return self.last_search_query is not None
def check_can_show(self, query):
if (
self.last_search_query == query
and self.last_search_time is not None
and time.time() - self.last_search_time < 1
):
self._logger.info("Same search query already sent within 500ms so dropping this one")
return False
return True
def search(self, query: Query) -> bool:
if not self.check_can_show(query.original_query):
return False
fts_query = to_fts_query(query.original_query)
if not fts_query:
return False
self.last_search_query = query.original_query
self.last_search_time = time.time()
model = SearchResultsModel(
endpoint_url="search",
hide_xxx=self.results_page_content.hide_xxx,
original_query=query.original_query,
text_filter=to_fts_query(query.fts_text),
tags=list(query.tags),
type_filter=[REGULAR_TORRENT],
exclude_deleted=True,
)
self.results_page_content.initialize_root_model(model)
self.setCurrentWidget(self.results_page)
self.results_page_content.format_search_title()
self.search_progress_bar.start()
# After transitioning to the page with search results, we refresh the viewport since some rows might have been
# rendered already with an incorrect row height.
self.results_page_content.run_brain_dead_refresh()
def register_request(response):
peers = set(response["peers"])
self.search_request = SearchRequest(response["request_uuid"], query, peers)
self.search_progress_bar.set_remote_total(len(peers))
params = {'txt_filter': fts_query, 'hide_xxx': self.hide_xxx, 'tags': list(query.tags),
'metadata_type': REGULAR_TORRENT, 'exclude_deleted': True}
request_manager.put('remote_query', register_request, url_params=params)
return True
def on_local_query_completed(self):
self.search_progress_bar.on_local_results()
def METHOD_NAME(self):
if self.currentWidget() == self.results_page:
self.results_page_content.go_back_to_level(0)
def update_loading_page(self, remote_results):
if not self.search_request or self.search_request.uuid != remote_results.get("uuid"):
return
peer = remote_results["peer"]
results = remote_results.get("results", [])
self.search_request.peers_complete.add(peer)
self.search_request.remote_results.append(results)
new_items = self.results_page_content.model.add_remote_results(results)
self.search_progress_bar.on_remote_results(len(new_items), len(self.search_request.peers_complete))
def on_ready_to_update_results(self):
self.results_page_content.root_model.show_remote_results()
|
299,519 |
test parse with valid formatter arguments
|
from unittest.mock import patch
import pytest
from faker import Faker, Generator
class BarProvider:
def foo_formatter(self):
return "barfoo"
class FooProvider:
def foo_formatter(self):
return "foobar"
def foo_formatter_with_arguments(self, param="", append=""):
return "baz" + str(param) + str(append)
@pytest.fixture(autouse=True)
def generator():
generator = Generator()
generator.add_provider(FooProvider())
return generator
class TestGenerator:
"""Test Generator class"""
def test_get_formatter_returns_correct_formatter(self, generator):
foo_provider = generator.providers[0]
formatter = generator.get_formatter("foo_formatter")
assert callable(formatter) and formatter == foo_provider.foo_formatter
def test_get_formatter_with_unknown_formatter(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.get_formatter("barFormatter")
assert str(excinfo.value) == "Unknown formatter 'barFormatter'"
fake = Faker("it_IT")
with pytest.raises(AttributeError) as excinfo:
fake.get_formatter("barFormatter")
assert str(excinfo.value) == "Unknown formatter 'barFormatter' with locale 'it_IT'"
def test_format_calls_formatter_on_provider(self, generator):
assert generator.format("foo_formatter") == "foobar"
def test_format_passes_arguments_to_formatter(self, generator):
result = generator.format("foo_formatter_with_arguments", "foo", append="!")
assert result == "bazfoo!"
def test_add_provider_overrides_old_provider(self, generator):
assert generator.format("foo_formatter") == "foobar"
generator.add_provider(BarProvider())
assert generator.format("foo_formatter") == "barfoo"
def test_parse_without_formatter_tokens(self, generator):
assert generator.parse("fooBar#?") == "fooBar#?"
def test_parse_with_valid_formatter_tokens(self, generator):
result = generator.parse('This is {{foo_formatter}} a text with "{{ foo_formatter }}"')
assert result == 'This is foobar a text with "foobar"'
def test_arguments_group_with_values(self, generator):
generator.set_arguments("group1", "argument1", 1)
generator.set_arguments("group1", "argument2", 2)
assert generator.get_arguments("group1", "argument1") == 1
assert generator.del_arguments("group1", "argument2") == 2
assert generator.get_arguments("group1", "argument2") is None
assert generator.get_arguments("group1") == {"argument1": 1}
def test_arguments_group_with_dictionaries(self, generator):
generator.set_arguments("group2", {"argument1": 3, "argument2": 4})
assert generator.get_arguments("group2") == {"argument1": 3, "argument2": 4}
assert generator.del_arguments("group2") == {"argument1": 3, "argument2": 4}
assert generator.get_arguments("group2") is None
def test_arguments_group_with_invalid_name(self, generator):
assert generator.get_arguments("group3") is None
assert generator.del_arguments("group3") is None
def test_arguments_group_with_invalid_argument_type(self, generator):
with pytest.raises(ValueError) as excinfo:
generator.set_arguments("group", ["foo", "bar"])
assert str(excinfo.value) == "Arguments must be either a string or dictionary"
def METHOD_NAME(self, generator):
generator.set_arguments("format_name", {"param": "foo", "append": "bar"})
result = generator.parse('This is "{{foo_formatter_with_arguments:format_name}}"')
generator.del_arguments("format_name")
assert result == 'This is "bazfoobar"'
def test_parse_with_unknown_arguments_group(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.parse('This is "{{foo_formatter_with_arguments:unknown}}"')
assert str(excinfo.value) == "Unknown argument group 'unknown'"
def test_parse_with_unknown_formatter_token(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.parse("{{barFormatter}}")
assert str(excinfo.value) == "Unknown formatter 'barFormatter'"
def test_magic_call_calls_format(self, generator):
assert generator.foo_formatter() == "foobar"
def test_magic_call_calls_format_with_arguments(self, generator):
assert generator.foo_formatter_with_arguments("foo") == "bazfoo"
@patch("faker.generator.random_module.getstate")
def test_get_random(self, mock_system_random, generator):
random_instance = generator.random
random_instance.getstate()
mock_system_random.assert_not_called()
@patch("faker.generator.random_module.seed")
def test_random_seed_doesnt_seed_system_random(self, mock_system_random, generator):
# Save original state of shared random instance to avoid affecting other tests
state = generator.random.getstate()
generator.seed(0)
mock_system_random.assert_not_called()
# Restore state of shared random instance
generator.random.setstate(state)
|
299,520 |
build
|
import numpy as np
from numpy import zeros
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank)
from pyNastran.dev.bdf_vectorized.cards.loads.vectorized_load import VectorizedLoad
class GRAV(VectorizedLoad):
"""
+------+-----+-----+------+-----+-----+------+-----+
| GRAV | SID | CID | A | N1 | N2 | N3 | MB |
+------+-----+-----+------+-----+-----+------+-----+
| GRAV | 1 | 3 | 32.2 | 0.0 | 0.0 | -1.0 | |
+------+-----+-----+------+-----+-----+------+-----+
"""
type = 'GRAV'
def __init__(self, model):
"""
Defines the GRAV object.
Parameters
----------
model : BDF
the BDF object
.. todo:: collapse loads
"""
VectorizedLoad.__init__(self, model)
#self.model = model
#self.n = 0
#self._cards = []
#self._comments = []
def __getitem__(self, i):
#unique_lid = unique(self.load_id)
if len(i):
obj = GRAV(self.model)
obj.load_id = self.load_id[i]
obj.coord_id = self.coord_id[i]
obj.scale = self.scale[i]
obj.N = self.N[i]
obj.mb = self.mb[i]
obj.n = len(i)
return obj
raise RuntimeError('len(i) = 0')
def __mul__(self, value):
obj = GRAV(self.model)
obj.load_id = self.load_id
obj.coord_id = self.coord_id
obj.scale = self.scale * value
obj.N = self.N
obj.mb = self.mb
obj.n = self.n
return obj
def __rmul__(self, value):
return self.__mul__(value)
def allocate(self, card_count):
ncards = card_count[self.type]
if ncards:
self.n = ncards
float_fmt = self.model.float_fmt
#: Set identification number
self.load_id = zeros(ncards, 'int32')
#: Coordinate system identification number.
self.coord_id = zeros(ncards, 'int32')
#: scale factor
self.scale = zeros(ncards, float_fmt)
self.N = zeros((ncards, 3), float_fmt)
self.mb = zeros(ncards, 'int32')
def add_card(self, card, comment=''):
#self._cards.append(card)
#self._comments.append(comment)
i = self.i
self.load_id[i] = integer(card, 1, 'sid')
#self.node_id[i] = integer(card, 1, 'node_id')
self.coord_id[i] = integer_or_blank(card, 2, 'cid', 0)
self.scale[i] = double(card, 3, 'scale')
#: Acceleration vector components measured in coordinate system CID
self.N[i, :] = [double_or_blank(card, 4, 'N1', 0.0),
double_or_blank(card, 5, 'N2', 0.0),
double_or_blank(card, 6, 'N3', 0.0)]
#: Indicates whether the CID coordinate system is defined in the
#: main Bulk Data Section (MB = -1) or the partitioned superelement
#: Bulk Data Section (MB = 0). Coordinate systems referenced in the
#: main Bulk Data Section are considered stationary with respect to
#: the assembly basic coordinate system. See Remark 10.
#: (Integer; Default = 0)
self.mb[i] = integer_or_blank(card, 7, 'mb', 0)
assert len(card) <= 8, 'len(GRAV card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def METHOD_NAME(self):
"""
Parameters
----------
:param cards: the list of GRAV cards
"""
if self.n:
i = self.load_id.argsort()
self.load_id = self.load_id[i]
#self.node_id = self.node_id[i]
self.coord_id = self.coord_id[i]
self.scale = self.scale[i]
self.N = self.N[i]
self._cards = []
self._comments = []
def get_stats(self):
msg = []
if self.n:
msg.append(' %-8s: %i' % ('GRAV', self.n))
return msg
def write_card_by_index(self, bdf_file, size=8, is_double=False, i=None):
for (lid, cid, scale, N, mb) in zip(
self.load_id[i], self.coord_id[i], self.scale[i], self.N[i, :], self.mb[i]):
card = ['GRAV', lid, cid, scale, N[0], N[1], N[2], mb]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def get_load_ids(self):
return np.unique(self.load_id)
|
299,521 |
url parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network security-partner-provider wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/securitypartnerproviders/{}", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the Security Partner Provider.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SecurityPartnerProvidersGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class SecurityPartnerProvidersGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}",
**self.METHOD_NAME
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"securityPartnerProviderName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.connection_status = AAZStrType(
serialized_name="connectionStatus",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.security_provider_name = AAZStrType(
serialized_name="securityProviderName",
)
properties.virtual_hub = AAZObjectType(
serialized_name="virtualHub",
)
virtual_hub = cls._schema_on_200.properties.virtual_hub
virtual_hub.id = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"]
|
299,522 |
test view activate active
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from ...conf.test import override_dynamic_settings
from ...core.utils import encode_json_html
from ..models import Ban
from ..test import create_test_user
from ..tokens import make_activation_token
User = get_user_model()
class ActivationViewsTests(TestCase):
def test_request_view_returns_200(self):
"""request new activation link view returns 200"""
response = self.client.get(reverse("misago:request-activation"))
self.assertEqual(response.status_code, 200)
def test_view_activate_banned(self):
"""activate banned user shows error"""
user = create_test_user("User", "[email protected]", requires_activation=1)
activation_token = make_activation_token(user)
Ban.objects.create(
check_type=Ban.USERNAME, banned_value="user", user_message="Nope!"
)
response = self.client.get(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token},
)
)
self.assertContains(response, encode_json_html("<p>Nope!</p>"), status_code=403)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.requires_activation, 1)
def test_view_activate_invalid_token(self):
"""activate with invalid token shows error"""
user = create_test_user("User", "[email protected]", requires_activation=1)
activation_token = make_activation_token(user)
response = self.client.get(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token + "acd"},
)
)
self.assertEqual(response.status_code, 400)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.requires_activation, 1)
def test_view_activate_disabled(self):
"""activate disabled user shows error"""
user = create_test_user("User", "[email protected]", is_active=False)
activation_token = make_activation_token(user)
response = self.client.get(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token},
)
)
self.assertEqual(response.status_code, 404)
def METHOD_NAME(self):
"""activate active user shows error"""
user = create_test_user("User", "[email protected]")
activation_token = make_activation_token(user)
response = self.client.get(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token},
)
)
self.assertEqual(response.status_code, 200)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.requires_activation, 0)
def test_view_activate_inactive(self):
"""activate inactive user passess"""
user = create_test_user("User", "[email protected]", requires_activation=1)
activation_token = make_activation_token(user)
response = self.client.get(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token},
)
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "your account has been activated!")
user = User.objects.get(pk=user.pk)
self.assertEqual(user.requires_activation, 0)
@override_dynamic_settings(
enable_oauth2_client=True,
oauth2_provider="Lorem",
)
def test_request_activatiom_view_returns_403_if_oauth_is_enabled(db, client):
response = client.get(reverse("misago:request-activation"))
assert response.status_code == 403
@override_dynamic_settings(
enable_oauth2_client=True,
oauth2_provider="Lorem",
)
def test_activate_with_token_view_returns_403_if_oauth_is_enabled(db, client):
user = create_test_user("User", "[email protected]", requires_activation=1)
activation_token = make_activation_token(user)
response = client.post(
reverse(
"misago:activate-by-token",
kwargs={"pk": user.pk, "token": activation_token},
)
)
assert response.status_code == 403
|
299,523 |
get collection stats
|
import logging
from pprint import pprint # noqa
from normality import normalize
from followthemoney import model
from aleph.core import es, cache
from aleph.model import Collection, Entity
from aleph.index.indexes import entities_read_index
from aleph.index.util import index_name, index_settings, configure_index
from aleph.index.util import query_delete, index_safe, delete_safe
from aleph.index.util import KEYWORD_COPY, KEYWORD
STATS_FACETS = [
"schema",
"names",
"addresses",
"phones",
"emails",
"countries",
"languages",
"ibans",
]
log = logging.getLogger(__name__)
def collections_index():
"""Combined index to run all queries against."""
return index_name("collection", "v1")
def configure_collections():
mapping = {
"date_detection": False,
"dynamic": False,
"dynamic_templates": [
{"fields": {"match": "schemata.*", "mapping": {"type": "long"}}}
],
"_source": {"excludes": ["text"]},
"properties": {
"label": {
"type": "text",
"copy_to": "text",
"analyzer": "latin_index",
"fields": {"kw": KEYWORD},
},
"collection_id": KEYWORD,
"foreign_id": KEYWORD_COPY,
"languages": KEYWORD_COPY,
"countries": KEYWORD_COPY,
"category": KEYWORD_COPY,
"frequency": KEYWORD_COPY,
"summary": {"type": "text", "copy_to": "text", "index": False},
"publisher": KEYWORD_COPY,
"publisher_url": KEYWORD_COPY,
"data_url": KEYWORD_COPY,
"info_url": KEYWORD_COPY,
"creator_id": KEYWORD,
"team_id": KEYWORD,
"text": {
"type": "text",
"analyzer": "latin_index",
"term_vector": "with_positions_offsets",
"store": True,
},
"casefile": {"type": "boolean"},
"restricted": {"type": "boolean"},
"secret": {"type": "boolean"},
"xref": {"type": "boolean"},
"created_at": {"type": "date"},
"updated_at": {"type": "date"},
"count": {"type": "long"},
"schemata": {"dynamic": True, "type": "object"},
},
}
index = collections_index()
settings = index_settings(shards=1)
return configure_index(index, mapping, settings)
def index_collection(collection, sync=False):
"""Index a collection."""
if collection.deleted_at is not None:
return delete_collection(collection.id)
data = get_collection(collection.id)
if data is None:
return
log.info(
"[%s] Index: %s (%s things)...",
collection,
data.get("label"),
data.get("count"),
)
text = [data.get("label")]
text.append(normalize(data.get("label")))
text.append(normalize(data.get("foreign_id")))
text.append(normalize(data.get("summary")))
data["text"] = text
data.pop("id", None)
return index_safe(collections_index(), collection.id, data, sync=sync)
def get_collection(collection_id):
"""Fetch a collection from the index."""
if collection_id is None:
return
key = cache.object_key(Collection, collection_id)
data = cache.get_complex(key)
if data is not None:
return data
collection = Collection.by_id(collection_id)
if collection is None:
return
data = collection.to_dict()
index = entities_read_index(schema=Entity.THING)
query = {"term": {"collection_id": collection_id}}
result = es.count(index=index, body={"query": query})
data["count"] = result.get("count", 0)
cache.set_complex(key, data, expires=cache.EXPIRE)
return data
def _facet_key(collection_id, facet):
return cache.object_key(Collection, collection_id, facet)
def METHOD_NAME(collection_id):
"""Retrieve statistics on the content of a collection."""
keys = {_facet_key(collection_id, f): f for f in STATS_FACETS}
empty = {"values": [], "total": 0}
stats = {}
for key, result in cache.get_many_complex(keys.keys(), empty):
stats[keys[key]] = result
return stats
def update_collection_stats(collection_id, facets=STATS_FACETS):
"""Compute some statistics on the content of a collection."""
aggs = {}
for facet in facets:
# Regarding facet size, 300 would be optimal because it's
# guaranteed to capture all schemata and countries. But it
# adds a whole lot to the compute time, so let's see how
# this goes.
aggs[facet + ".values"] = {"terms": {"field": facet, "size": 100}}
aggs[facet + ".total"] = {"cardinality": {"field": facet}}
query = {"term": {"collection_id": collection_id}}
query = {"size": 0, "query": query, "aggs": aggs}
index = entities_read_index()
result = es.search(index=index, body=query, request_timeout=3600, timeout="20m")
results = result.get("aggregations", {})
for facet in facets:
buckets = results.get(facet + ".values").get("buckets", [])
values = {b["key"]: b["doc_count"] for b in buckets}
total = results.get(facet + ".total", {}).get("value", 0)
data = {"values": values, "total": total}
cache.set_complex(_facet_key(collection_id, facet), data)
def get_collection_things(collection_id):
"""Showing the number of things in a collection is more indicative
of its size than the overall collection entity count."""
schemata = cache.get_complex(_facet_key(collection_id, "schema"))
if schemata is None:
return {}
things = {}
for schema, count in schemata.get("values", {}).items():
schema = model.get(schema)
if schema is not None and schema.is_a(Entity.THING):
things[schema.name] = count
return things
def delete_collection(collection_id, sync=False):
"""Delete all documents from a particular collection."""
delete_safe(collections_index(), collection_id)
def delete_entities(collection_id, origin=None, schema=None, sync=False):
"""Delete entities from a collection."""
filters = [{"term": {"collection_id": collection_id}}]
if origin is not None:
filters.append({"term": {"origin": origin}})
query = {"bool": {"filter": filters}}
query_delete(entities_read_index(schema), query, sync=sync)
|
299,524 |
set many
|
"""
RedisCache with a fallback cache to prevent denial of service if Redis is down
Freely inspired by django-cache-fallback
Credits:
- https://github.com/Kub-AT/django-cache-fallback/
"""
import logging
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.base import BaseCache
from django_redis.cache import RedisCache
from .utils.throttle import throttle
FALLBACK_CACHE_INVALIDATION_INTERVAL = 60 # seconds
DJANGO_REDIS_LOGGER = getattr(settings, "DJANGO_REDIS_LOGGER", __name__)
logger = logging.getLogger(DJANGO_REDIS_LOGGER)
class RedisCacheWithFallback(BaseCache):
"""
BaseCache object with a redis_cache used as main cache
and the "fallback" aliased cache which takes over
in case redis_cache is down.
"""
def __init__(self, server, params):
"""
Instantiate the Redis Cache with server and params
and retrieve the cache with alias "fallback"
"""
super().__init__(params)
self._redis_cache = RedisCache(server, params)
self._fallback_cache = caches["memory_cache"]
def _call_with_fallback(self, method, *args, **kwargs):
"""
Try first to exec provided method through Redis cache instance,
in case of success, invalidate the fallback cache so it is clean and
ready for next failure,
in case of failure, logger reports the exception and
the fallback cache takes over.
"""
try:
next_cache_state = self._call_redis_cache(method, args, kwargs)
# pylint: disable=broad-except
except Exception as exception:
logger.warning("[DEGRADED CACHE MODE] - Switch to fallback cache")
logger.exception(exception)
return self._call_fallback_cache(method, args, kwargs)
self._invalidate_fallback_cache()
return next_cache_state
@throttle(FALLBACK_CACHE_INVALIDATION_INTERVAL) # 60 seconds
def _invalidate_fallback_cache(self):
"""
Clear asynchronously cache in the fallback cache.
"""
self._fallback_cache.clear()
def _call_redis_cache(self, method, args, kwargs):
"""
Exec the provided method through the redis cache instance
"""
return getattr(self._redis_cache, method)(*args, **kwargs)
def _call_fallback_cache(self, method, args, kwargs):
"""
Exec the provided method through the fallback cache instance
"""
return getattr(self._fallback_cache, method)(*args, **kwargs)
def get_backend_timeout(self, *args, **kwargs):
"""
Pass get_backend_timeout cache method to _call_with_fallback
"""
return self._call_with_fallback("get_backend_timeout", *args, **kwargs)
def make_key(self, *args, **kwargs):
"""
Pass make_key cache method to _call_with_fallback
"""
return self._call_with_fallback("make_key", *args, **kwargs)
def add(self, *args, **kwargs):
"""
Pass add cache method to _call_with_fallback
"""
return self._call_with_fallback("add", *args, **kwargs)
def get(self, *args, **kwargs):
"""
Pass get cache method to _call_with_fallback
"""
return self._call_with_fallback("get", *args, **kwargs)
def set(self, *args, **kwargs):
"""
Pass set cache method to _call_with_fallback
"""
return self._call_with_fallback("set", *args, **kwargs)
def touch(self, *args, **kwargs):
"""
Pass touch cache method to _call_with_fallback
"""
return self._call_with_fallback("touch", *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Pass delete cache method to _call_with_fallback
"""
return self._call_with_fallback("delete", *args, **kwargs)
def get_many(self, *args, **kwargs):
"""
Pass get_many cache method to _call_with_fallback
"""
return self._call_with_fallback("get_many", *args, **kwargs)
def get_or_set(self, *args, **kwargs):
"""
Pass get_or_set cache method to _call_with_fallback
"""
return self._call_with_fallback("get_or_set", *args, **kwargs)
def has_key(self, *args, **kwargs):
"""
Pass has_key cache method to _call_with_fallback
"""
return self._call_with_fallback("has_key", *args, **kwargs)
def incr(self, *args, **kwargs):
"""
Pass incr cache method to _call_with_fallback
"""
return self._call_with_fallback("incr", *args, **kwargs)
def decr(self, *args, **kwargs):
"""
Pass decr cache method to _call_with_fallback
"""
return self._call_with_fallback("decr", *args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
"""
Pass set_many cache method to _call_with_fallback
"""
return self._call_with_fallback("set_many", *args, **kwargs)
def delete_many(self, *args, **kwargs):
"""
Pass delete_many cache method to _call_with_fallback
"""
return self._call_with_fallback("delete_many", *args, **kwargs)
def clear(self):
"""
Pass clear cache method to _call_with_fallback
"""
return self._call_with_fallback("clear")
def validate_key(self, *args, **kwargs):
"""
Pass validate_key cache method to _call_with_fallback
"""
return self._call_with_fallback("validate_key", *args, **kwargs)
def incr_version(self, *args, **kwargs):
"""
Pass incr_version cache method to _call_with_fallback
"""
return self._call_with_fallback("incr_version", *args, **kwargs)
def decr_version(self, *args, **kwargs):
"""
Pass decr_version cache method to _call_with_fallback
"""
return self._call_with_fallback("decr_version", *args, **kwargs)
|
299,525 |
dataclass
|
################### Dataclasses_fallback ###############################
# This is the fallback dataclass code if the stdlib module isn't available.
# It defines enough of the support types to be used with cdef classes
# and to fail if used on regular types.
# (Intended to be included as py code - not compiled)
from collections import namedtuple
try:
from types import MappingProxyType
except ImportError:
# mutable fallback if unavailable
MappingProxyType = lambda x: x
class _MISSING_TYPE(object):
pass
MISSING = _MISSING_TYPE()
_DataclassParams = namedtuple('_DataclassParams',
["init", "repr", "eq", "order", "unsafe_hash", "frozen",
"match_args", "kw_only", "slots", "weakref_slot"])
class Field(object):
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'kw_only',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata, kw_only):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
# Be aware that if MappingProxyType is unavailable (i.e. py2?) then we
# don't enforce non-mutability that the real module does
self.metadata = (MappingProxyType({})
if metadata is None else
MappingProxyType(metadata))
self.kw_only = kw_only
self._field_type = None
def __repr__(self):
return ('Field('
'name={0!r},'
'type={1!r},'
'default={2!r},'
'default_factory={3!r},'
'init={4!r},'
'repr={5!r},'
'hash={6!r},'
'compare={7!r},'
'metadata={8!r},'
'kwonly={9!r},'
')'.format(self.name, self.type, self.default,
self.default_factory, self.init,
self.repr, self.hash, self.compare,
self.metadata, self.kw_only))
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
def METHOD_NAME(*args, **kwds):
raise NotImplementedError("Standard library 'dataclasses' module"
"is unavailable, likely due to the version of Python you're using.")
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE('_FIELD')
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
def field(*ignore, **kwds):
default = kwds.pop("default", MISSING)
default_factory = kwds.pop("default_factory", MISSING)
init = kwds.pop("init", True)
repr = kwds.pop("repr", True)
hash = kwds.pop("hash", None)
compare = kwds.pop("compare", True)
metadata = kwds.pop("metadata", None)
kw_only = kwds.pop("kw_only", None)
if kwds:
raise ValueError("field received unexpected keyword arguments: %s"
% list(kwds.keys()))
if default is not MISSING and default_factory is not MISSING:
raise ValueError('cannot specify both default and default_factory')
if ignore:
raise ValueError("'field' does not take any positional arguments")
return Field(default, default_factory, init,
repr, hash, compare, metadata, kw_only)
|
299,526 |
compute bounds
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Show vector field flow
"""
from __future__ import division
from vispy import app, scene, visuals, gloo
from vispy.util import ptime
import numpy as np
class VectorFieldVisual(visuals.Visual):
vertex = """
uniform sampler2D field;
attribute vec2 index;
uniform vec2 shape;
uniform vec2 field_shape;
uniform float spacing;
varying float dist; // distance along path for this vertex
varying vec2 ij;
uniform sampler2D offset;
uniform float seg_len;
uniform int n_iter; // iterations to integrate along field per vertex
uniform vec2 attractor;
varying vec4 base_color;
uniform sampler2D color;
void main() {
// distance along one line
dist = index.y * seg_len;
vec2 local;
ij = vec2(mod(index.x, shape.x), floor(index.x / shape.x));
// *off* is a random offset to the starting location, which prevents
// the appearance of combs in the field
vec2 off = texture2D(offset, ij / shape).xy - 0.5;
local = spacing * (ij + off);
vec2 uv;
vec2 dir;
vec2 da;
int index_y = int(index.y);
for( int i=0; i<index.y; i+=1 ) {
for ( int j=0; j<n_iter; j += 1 ) {
uv = local / field_shape;
dir = texture2D(field, uv).xy;
// add influence of variable attractor (mouse)
da = attractor - local;
float al = 0.1 * length(da);
da /= 0.5 * (1 + al*al);
dir += da;
// maybe pick a more accurate integration method?
local += seg_len * dir / n_iter;
}
}
base_color = texture2D(color, uv);
gl_Position = $transform(vec4(local, 0, 1));
}
"""
fragment = """
uniform float time;
uniform float speed;
varying float dist;
varying vec2 ij;
uniform sampler2D offset;
uniform vec2 shape;
uniform float nseg;
uniform float seg_len;
varying vec4 base_color;
void main() {
float totlen = nseg * seg_len;
float phase = texture2D(offset, ij / shape).b;
float alpha;
// vary alpha along the length of the line to give the appearance of
// motion
alpha = mod((dist / totlen) + phase - time * speed, 1);
// add a cosine envelope to fade in and out smoothly at the ends
alpha *= (1 - cos(2 * 3.141592 * dist / totlen)) * 0.5;
gl_FragColor = vec4(base_color.rgb, base_color.a * alpha);
}
"""
def __init__(self, field, spacing=10, segments=3, seg_len=0.5,
color=(1, 1, 1, 0.3)):
self._time = 0.0
self._last_time = ptime.time()
rows = int(field.shape[0] / spacing)
cols = int(field.shape[1] / spacing)
index = np.empty((rows * cols, int(segments) * 2, 2), dtype=np.float32)
# encodes starting position within vector field
index[:, :, 0] = np.arange(rows * cols)[:, np.newaxis]
# encodes distance along length of line
index[:, ::2, 1] = np.arange(segments)[np.newaxis, :]
index[:, 1::2, 1] = np.arange(segments)[np.newaxis, :] + 1
self._index = gloo.VertexBuffer(index)
if not isinstance(color, np.ndarray):
color = np.array([[list(color)]], dtype='float32')
self._color = gloo.Texture2D(color)
offset = np.random.uniform(256, size=(rows, cols, 3)).astype(np.ubyte)
self._offset = gloo.Texture2D(offset, format='rgb')
self._field = gloo.Texture2D(field, format='rg',
internalformat='rg32f',
interpolation='linear')
self._field_shape = field.shape[:2]
visuals.Visual.__init__(self, vcode=self.vertex, fcode=self.fragment)
self.timer = app.Timer(interval='auto', connect=self.update_time,
start=False)
self.freeze()
self.shared_program['field'] = self._field
self.shared_program['field_shape'] = self._field.shape[:2]
self.shared_program['shape'] = (rows, cols)
self.shared_program['index'] = self._index
self.shared_program['spacing'] = spacing
self.shared_program['t'] = self._time
self.shared_program['offset'] = self._offset
self.shared_program['speed'] = 1
self.shared_program['color'] = self._color
self.shared_program['seg_len'] = seg_len
self.shared_program['nseg'] = segments
self.shared_program['n_iter'] = 1
self.shared_program['attractor'] = (0, 0)
self.shared_program['time'] = 0
self._draw_mode = 'lines'
self.set_gl_state('translucent', depth_test=False)
self.timer.start()
def _prepare_transforms(self, view):
view.view_program.vert['transform'] = view.get_transform()
def _prepare_draw(self, view):
pass
def METHOD_NAME(self, axis, view):
if axis > 1:
return (0, 0)
return (0, self._field_shape[axis])
def update_time(self, ev):
t = ptime.time()
self._time += t - self._last_time
self._last_time = t
self.shared_program['time'] = self._time
self.update()
VectorField = scene.visuals.create_visual_node(VectorFieldVisual)
def fn(y, x):
dx = x-50
dy = y-30
hyp = (dx**2 + dy**2)**0.5 + 0.01
return np.array([100 * dy / hyp**1.7, -100 * dx / hyp**1.8])
field = np.fromfunction(fn, (100, 100)).transpose(1, 2, 0).astype('float32')
field[..., 0] += 10 * np.cos(np.linspace(0, 2 * 3.1415, 100))
color = np.zeros((100, 100, 4), dtype='float32')
color[..., :2] = (field + 5) / 10.
color[..., 2] = 0.5
color[..., 3] = 0.5
canvas = scene.SceneCanvas(keys='interactive', show=True)
view = canvas.central_widget.add_view(camera='panzoom')
vfield = VectorField(field[..., :2], spacing=0.5, segments=30, seg_len=0.05,
parent=view.scene, color=color)
view.camera.set_range()
@canvas.connect
def on_mouse_move(event):
if 3 in event.buttons:
tr = canvas.scene.node_transform(vfield)
vfield.shared_program['attractor'] = tr.map(event.pos)[:2]
if __name__ == '__main__':
app.run()
|
299,527 |
test timeout missing default
|
import os
import ssl
from pathlib import Path
import certifi
import pytest
import httpx
def test_load_ssl_config():
context = httpx.create_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
def test_load_ssl_config_verify_non_existing_path():
with pytest.raises(IOError):
httpx.create_ssl_context(verify="/path/to/nowhere")
def test_load_ssl_config_verify_existing_file():
context = httpx.create_ssl_context(verify=certifi.where())
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.parametrize("config", ("SSL_CERT_FILE", "SSL_CERT_DIR"))
def test_load_ssl_config_verify_env_file(
https_server, ca_cert_pem_file, config, cert_authority
):
os.environ[config] = (
ca_cert_pem_file
if config.endswith("_FILE")
else str(Path(ca_cert_pem_file).parent)
)
context = httpx.create_ssl_context(trust_env=True)
cert_authority.configure_trust(context)
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
assert len(context.get_ca_certs()) == 1
def test_load_ssl_config_verify_directory():
path = Path(certifi.where()).parent
context = httpx.create_ssl_context(verify=str(path))
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
def test_load_ssl_config_cert_and_key(cert_pem_file, cert_private_key_file):
context = httpx.create_ssl_context(cert=(cert_pem_file, cert_private_key_file))
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.parametrize("password", [b"password", "password"])
def test_load_ssl_config_cert_and_encrypted_key(
cert_pem_file, cert_encrypted_private_key_file, password
):
context = httpx.create_ssl_context(
cert=(cert_pem_file, cert_encrypted_private_key_file, password)
)
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
def test_load_ssl_config_cert_and_key_invalid_password(
cert_pem_file, cert_encrypted_private_key_file
):
with pytest.raises(ssl.SSLError):
httpx.create_ssl_context(
cert=(cert_pem_file, cert_encrypted_private_key_file, "password1")
)
def test_load_ssl_config_cert_without_key_raises(cert_pem_file):
with pytest.raises(ssl.SSLError):
httpx.create_ssl_context(cert=cert_pem_file)
def test_load_ssl_config_no_verify():
context = httpx.create_ssl_context(verify=False)
assert context.verify_mode == ssl.VerifyMode.CERT_NONE
assert context.check_hostname is False
def test_load_ssl_context():
ssl_context = ssl.create_default_context()
context = httpx.create_ssl_context(verify=ssl_context)
assert context is ssl_context
def test_create_ssl_context_with_get_request(server, cert_pem_file):
context = httpx.create_ssl_context(verify=cert_pem_file)
response = httpx.get(server.url, verify=context)
assert response.status_code == 200
def test_limits_repr():
limits = httpx.Limits(max_connections=100)
expected = "Limits(max_connections=100, max_keepalive_connections=None, keepalive_expiry=5.0)"
assert repr(limits) == expected
def test_limits_eq():
limits = httpx.Limits(max_connections=100)
assert limits == httpx.Limits(max_connections=100)
def test_timeout_eq():
timeout = httpx.Timeout(timeout=5.0)
assert timeout == httpx.Timeout(timeout=5.0)
def test_timeout_all_parameters_set():
timeout = httpx.Timeout(connect=5.0, read=5.0, write=5.0, pool=5.0)
assert timeout == httpx.Timeout(timeout=5.0)
def test_timeout_from_nothing():
timeout = httpx.Timeout(None)
assert timeout.connect is None
assert timeout.read is None
assert timeout.write is None
assert timeout.pool is None
def test_timeout_from_none():
timeout = httpx.Timeout(timeout=None)
assert timeout == httpx.Timeout(None)
def test_timeout_from_one_none_value():
timeout = httpx.Timeout(None, read=None)
assert timeout == httpx.Timeout(None)
def test_timeout_from_one_value():
timeout = httpx.Timeout(None, read=5.0)
assert timeout == httpx.Timeout(timeout=(None, 5.0, None, None))
def test_timeout_from_one_value_and_default():
timeout = httpx.Timeout(5.0, pool=60.0)
assert timeout == httpx.Timeout(timeout=(5.0, 5.0, 5.0, 60.0))
def METHOD_NAME():
with pytest.raises(ValueError):
httpx.Timeout(pool=60.0)
def test_timeout_from_tuple():
timeout = httpx.Timeout(timeout=(5.0, 5.0, 5.0, 5.0))
assert timeout == httpx.Timeout(timeout=5.0)
def test_timeout_from_config_instance():
timeout = httpx.Timeout(timeout=5.0)
assert httpx.Timeout(timeout) == httpx.Timeout(timeout=5.0)
def test_timeout_repr():
timeout = httpx.Timeout(timeout=5.0)
assert repr(timeout) == "Timeout(timeout=5.0)"
timeout = httpx.Timeout(None, read=5.0)
assert repr(timeout) == "Timeout(connect=None, read=5.0, write=None, pool=None)"
@pytest.mark.skipif(
not hasattr(ssl.SSLContext, "keylog_filename"),
reason="requires OpenSSL 1.1.1 or higher",
)
def test_ssl_config_support_for_keylog_file(tmpdir, monkeypatch): # pragma: no cover
with monkeypatch.context() as m:
m.delenv("SSLKEYLOGFILE", raising=False)
context = httpx.create_ssl_context(trust_env=True)
assert context.keylog_filename is None
filename = str(tmpdir.join("test.log"))
with monkeypatch.context() as m:
m.setenv("SSLKEYLOGFILE", filename)
context = httpx.create_ssl_context(trust_env=True)
assert context.keylog_filename == filename
context = httpx.create_ssl_context(trust_env=False)
assert context.keylog_filename is None
def test_proxy_from_url():
proxy = httpx.Proxy("https://example.com")
assert str(proxy.url) == "https://example.com"
assert proxy.auth is None
assert proxy.headers == {}
assert repr(proxy) == "Proxy('https://example.com')"
def test_proxy_with_auth_from_url():
proxy = httpx.Proxy("https://username:[email protected]")
assert str(proxy.url) == "https://example.com"
assert proxy.auth == ("username", "password")
assert proxy.headers == {}
assert repr(proxy) == "Proxy('https://example.com', auth=('username', '********'))"
def test_invalid_proxy_scheme():
with pytest.raises(ValueError):
httpx.Proxy("invalid://example.com")
|
299,528 |
read from mailbox
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""
Classes to exchange messages between EV3 bricks.
"""
from __future__ import annotations
from typing import abstractmethod, TypeVar, Optional, Callable, Generic
T = TypeVar("T")
class Connection:
@abstractmethod
def METHOD_NAME(self, name: str) -> bytes:
...
@abstractmethod
def send_to_mailbox(self, name: str, data: bytes) -> None:
...
@abstractmethod
def wait_for_mailbox_update(self, name: str) -> None:
...
class Mailbox(Generic[T]):
def __init__(
self,
name: str,
connection: Connection,
encode: Optional[Callable[[T], bytes]] = None,
decode: Optional[Callable[[bytes], T]] = None,
):
"""Mailbox(name, connection, encode=None, decode=None)
Object that represents a mailbox containing data.
You can read data that is delivered by other EV3 bricks, or send data
to other bricks that have the same mailbox.
By default, the mailbox reads and send only bytes. To send other
data, you can provide an ``encode`` function that encodes your Python
object into bytes, and a ``decode`` function to convert bytes back to
a Python object.
Arguments:
name (str):
The name of this mailbox.
connection:
A connection object such as :class:`BluetoothMailboxClient`.
encode (callable):
Function that encodes a Python object to bytes.
decode (callable):
Function that creates a new Python object from bytes.
"""
def read(self) -> T:
"""read()
Gets the current value of the mailbox.
Returns:
The current value or ``None`` if the mailbox is empty.
"""
return ""
def send(self, value: T, brick: Optional[str] = None) -> None:
"""send(value, brick=None)
Sends a value to this mailbox on connected devices.
Arguments:
value:
The value that will be delivered to the mailbox.
brick (str):
The name or Bluetooth address of the brick or ``None`` to
to broadcast to all connected devices.
Raises:
OSError:
There is a problem with the connection.
"""
def wait(self) -> None:
"""wait()
Waits for the mailbox to be updated by remote device."""
def wait_new(self) -> T:
"""wait_new()
Waits for a new value to be delivered to the mailbox that is not
equal to the current value in the mailbox.
Returns:
The new value.
"""
return object()
class LogicMailbox(Mailbox[bool]):
def __init__(self, name: str, connection: Connection):
"""LogicMailbox(name, connection)
Object that represents a mailbox containing boolean data.
This works just like a regular :class:`Mailbox`, but values
must be ``True`` or ``False``.
This is compatible with the "logic" mailbox type in EV3-G.
Arguments:
name (str):
The name of this mailbox.
connection:
A connection object such as :class:`BluetoothMailboxClient`.
"""
class NumericMailbox(Mailbox[float]):
def __init__(self, name: str, connection: Connection):
"""NumericMailbox(name, connection)
Object that represents a mailbox containing numeric data.
This works just like a regular :class:`Mailbox`, but values must be a
number, such as ``15`` or ``12.345``
This is compatible with the "numeric" mailbox type in EV3-G.
Arguments:
name (str):
The name of this mailbox.
connection:
A connection object such as :class:`BluetoothMailboxClient`.
"""
class TextMailbox(Mailbox[str]):
def __init__(self, name: str, connection: Connection):
"""TextMailbox(name, connection)
Object that represents a mailbox containing text data.
This works just like a regular :class:`Mailbox`, but data must be a
string, such as ``'hello!'``.
This is compatible with the "text" mailbox type in EV3-G.
Arguments:
name (str):
The name of this mailbox.
connection:
A connection object such as :class:`BluetoothMailboxClient`.
"""
class BluetoothMailboxServer:
"""Object that represents a Bluetooth connection from one or more remote
EV3s.
The remote EV3s can either be running MicroPython or the standard EV3
firmware.
A "server" waits for a "client" to connect to it.
"""
def __enter__(self) -> BluetoothMailboxServer:
return self
def __exit__(self, type, value, traceback) -> None:
self.server_close()
def wait_for_connection(self, count: int = 1) -> None:
"""wait_for_connection(count=1)
Waits for a :class:`BluetoothMailboxClient` on a remote device to
connect.
Arguments:
count (int):
The number of remote connections to wait for.
Raises:
OSError:
There was a problem establishing the connection.
"""
def server_close(self) -> None:
"""server_close()
Closes all connections."""
class BluetoothMailboxClient:
"""Object that represents a Bluetooth connection to one or more remote EV3s.
The remote EV3s can either be running MicroPython or the standard EV3
firmware.
A "client" initiates a connection to a waiting "server".
"""
def __enter__(self) -> BluetoothMailboxClient:
return self
def __exit__(self, type, value, traceback) -> None:
self.close()
def connect(self, brick: str) -> None:
"""connect(brick)
Connects to an :class:`BluetoothMailboxServer` on another device.
The remote device must be paired and waiting for a connection. See
:meth:`BluetoothMailboxServer.wait_for_connection`.
Arguments:
brick (str):
The name or Bluetooth address of the remote EV3 to connect to.
Raises:
OSError:
There was a problem establishing the connection.
"""
def close(self) -> None:
"""close()
Closes all connections."""
|
299,529 |
executor
|
from typing import TYPE_CHECKING
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING:
from argparse import Namespace
def deployment(args: 'Namespace'):
"""
Start a Deployment
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.deployments import Deployment
if args.uses:
dep = Deployment.load_config(args.uses)
with dep:
dep.block()
else:
raise ValueError('starting a Deployment from CLI requires a valid `--uses`')
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.pods.factory import PodFactory
try:
with PodFactory.build_pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.serve.executors.run import run, run_stateful
import multiprocessing
from jina.jaml import JAML
envs = {}
envs.update(args.env or {})
if not args.stateful:
run(name=args.name,
args=args,
runtime_cls=args.runtime_cls,
envs=envs,
is_started=multiprocessing.Event(),
is_signal_handlers_installed=multiprocessing.Event(),
is_shutdown=multiprocessing.Event(),
is_ready=multiprocessing.Event(),
jaml_classes=JAML.registered_classes())
else:
run_stateful(name=args.name,
args=args,
runtime_cls=args.runtime_cls,
envs=envs)
def METHOD_NAME(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pod` or `worker_runtime`
"""
args.host = args.host[0]
args.port_monitoring = args.port_monitoring[0]
if args.native:
return executor_native(args)
else:
return pod(args)
def gateway(args: 'Namespace'):
"""
Start a Gateway Deployment
:param args: arguments coming from the CLI.
"""
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
args.port_monitoring = args.port_monitoring[0]
_update_gateway_args(args)
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler) as runtime:
runtime.logger.info(f'Gateway started')
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pod
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def dryrun(args: 'Namespace'):
"""
Check the health of a Flow
:param args: arguments coming from the CLI.
"""
from jina.checker import dry_run_checker
dry_run_checker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
from jina import exporter
getattr(exporter, f'export_{args.export.replace("-", "_")}')(args)
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('starting a Flow from CLI requires a valid `--uses`')
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from hubble.METHOD_NAME.hubio import HubIO
getattr(HubIO(args), args.hub_cli)()
def new(args: 'Namespace'):
"""
Create a new jina project
:param args: arguments coming from the CLI.
"""
import os
import shutil
from jina.constants import __resources_path__
if args.type == 'deployment':
shutil.copytree(
os.path.join(__resources_path__, 'project-template', 'deployment'), os.path.abspath(args.name)
)
else:
shutil.copytree(
os.path.join(__resources_path__, 'project-template', 'flow'), os.path.abspath(args.name)
)
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from jina_cli.lookup import lookup_and_print
lookup_and_print(args.query.lower())
def auth(args: 'Namespace'):
"""
Authenticate a user
:param args: arguments coming from the CLI.
"""
from hubble import api
getattr(api, args.auth_cli.replace('-', '_'))(args)
def cloud(args: 'Namespace'):
"""
Use jcloud (Jina Cloud) commands
:param args: arguments coming from the CLI.
"""
from jcloud import api
getattr(api, args.jc_cli.replace('-', '_'))(args)
|
299,530 |
handler
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus namespace private-endpoint-connection delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete an existing Private Endpoint Connection.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/privateendpointconnections/{}", "2022-10-01-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.private_endpoint_connection_name = AAZStrArg(
options=["-n", "--name", "--private-endpoint-connection-name"],
help="The PrivateEndpointConnection name",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.PrivateEndpointConnectionsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class PrivateEndpointConnectionsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"privateEndpointConnectionName", self.ctx.args.private_endpoint_connection_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
299,531 |
set options
|
from pkilib.common.exceptions import DirSrvException
from pkilib.common.factory import PkiTools
from pkilib.common.Qe_class import QeHost
from pkilib.common.mh_libdirsrv import DirSrv
from os.path import exists
import os.path
import subprocess
import socket
class W_DirSrv(object):
"""
This is a wrapper class for DirSrv object which validates all the inputs sent to DirSrv object.
Selects Ldap and SSL Ports sanely, Validates the inputs and in cases uses certain default values in
cases not all options are provided.
Defaults:
**DSInstHost: localhost**
**DSRootDN:` Secret123**
**DSInstSuffix:` 'dc=example,dc=org**
**Ldap and TLS ports are choosen from the available list of below ports:**
**DSPorts: [30389, 31389, 32389, 33389, 34389, 35389, 36389, 37389, 38389, 39389]**
**TLSPorts: [30636, 31636, 32636, 33636, 34636, 35636, 36636, 37636, 38636, 39636]**
"""
def __init__(self,Host=None):
"""
Create a DirSrv object for a specific Host. Specify the ports, Instance details to the Dirsrv object
:param str Host: Host
"""
self.DSUsedPorts = {}
self.DirSrvInfo = {}
self.DirSrvInst = None
self.Host = Host
def METHOD_NAME(self):
"""
Set default values:
Defaults:
DSInstHost: localhost
DSRootDN: Secret123
DSInstSuffix: 'dc=example,dc=org'
Ldap and TLS ports are choosen from the available list of below ports:
DSPorts = [30389, 31389, 32389, 33389, 34389, 35389, 36389, 37389, 38389, 39389
TLSPorts = [30636, 31636, 32636, 33636, 34636, 35636, 36636, 37636, 38636, 39636]
"""
if self.Host is None:
self.DSInstHost = socket.gethostname()
else:
self.DSInstHost = self.Host.hostname
if self.DSRootDNPwd is None:
self.DSRootDNPwd = 'Secret123'
if self.DSInstSuffix is None:
self.DSInstSuffix = "dc=example,dc=org"
#Get ports
try:
self.DSLdapPort, self.DSTLSPort = self._set_ports(self.DSLdapPort, self.DSTLSPort)
except IndexError as err:
return ("No More ports available", 1)
else:
self.DSUsedPorts[self.DSIntName]=[self.DSLdapPort, self.DSTLSPort]
#validate Instance
try:
self._validate_options()
except DirSrvException as err:
return err.msg, err.rval
else:
return ("Success", 0)
def _set_ports(self, u_port, e_port):
"""
Idea behind this is when a directory server instance needs
to be created we need ports for ldap and ssl ports.
1. check if LdapPort and SSLPort is given
1.1 If given, verify if the ports are available(not used)
1.1.1. Bind that port to ldap_port_t using semanage command
1.1.2. Use the ports and add it to the self.UsedPorts list
1.2 else raise exception
2. If LdapPort and SSLPort is not given.
2.1 Check if the ports are available(not used)
2.1.1. Bind the port to ldap_port_t using semanage command
2.1.2. Use the ports and add it to self.UsedPorts list
"""
DSPorts = [30389, 31389, 32389, 33389, 34389, 35389, 36389, 37389, 38389, 39389]
TLSPorts = [30636, 31636, 32636, 33636, 34636, 35636, 36636, 37636, 38636, 39636]
if u_port is None and e_port is None:
for ldap_port, ldaps_port in zip(DSPorts, TLSPorts):
if (self._check_remote_port(ldap_port) or self._check_remote_port(ldaps_port)):
pass
else:
return ldap_port, ldaps_port
else:
a = []
for ports in self.DSUsedPorts.values():
a.append(ports)
b = []
for l_port, s_port in zip(DSPorts, TLSPorts):
b.append((l_port,s_port))
if (len(set(a)) > len(set(b))):
available_ports = set(a) - set(b)
else:
available_ports = set(b) - set(a)
print("available_ports =", available_ports)
sorted_available_ports = sorted(available_ports)
return sorted_available_ports[0]
def _check_remote_port(self, port):
"""
checks if the port on the remote host is free
:param: int port:
:return bool: True if port is free else return False if port is unavailable
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
result = s.connect((self.DSInstHost, port))
except socket.error as e:
print("Unable to connect to port %s due to error %r" % (port, e.errno))
return False
s.close()
if result != 0:
return True
else:
return False
def _validate_options(self):
"""
Verifies if the instance directory alread exists
:param: None
:return: raises DirSrvException if the instance directory already exists else retuns True
"""
if isinstance(self.Host, QeHost):
check_instance = ['ls ' '/etc/dirsrv/slapd-%s' % self.DSIntName]
try:
output = self.Host.run_command(check_instance, log_stdout=True,raiseonerr=True)
except subprocess.CalledProcessError as E:
return True
else:
raise DirSrvException('%s Instance already Exists' % self.DSIntName)
else:
if exists(os.path.join('/etc/dirsrv/', 'slapd-%s' % self.DSIntName)):
raise DirSrvException('%s Instance already Exists' % self.DSIntName)
else:
return True
def CreateInstance(self, InstName, InstHost=None, InstSuffix=None, RootDNPwd=None, LdapPort=None, TLSPort=None):
"""
Creates Directory server instances
:param str InstName: Instance Name
:param str InstHost: Host on which instance should be created
:param str InstSuffix: Suffix to be created
:param str RootDNPwd: Root DN password
:param str LdapPort: Ldap Port to be used
:param str TLSPort: TLSPort port to be used
:return str result, return_code: output of the command and return code
:raises DirSrvException: if Directory server instance could not be created
"""
self.DSIntName = InstName
self.DSInstHost = InstHost
self.DSInstSuffix = None
self.DSRootDNPwd = RootDNPwd
self.DSLdapPort = LdapPort
self.DSTLSPort = TLSPort
result, return_code = self.METHOD_NAME()
if return_code == 0:
self.DirSrvInst = DirSrv(self.DSIntName, self.DSInstHost,
self.DSInstSuffix, self.DSRootDNPwd, self.DSLdapPort,
self.DSTLSPort,self.Host)
cfg_file = self.DirSrvInst.create_config()
result = self.DirSrvInst.Setup_DSInstance(cfg_file)
self.DirSrvInfo[self.DSIntName] = self.DirSrvInst.__dict__
return result, return_code
else:
raise DirSrvException('Could not setup Directory Server Instance')
def RemoveInstance(self, InstName):
"""
Removes Directory server instance
:param str InstName:
:return bool: Returns True
:raises DirSrvException: if Directory Server instance cannot be removed
"""
ret = self.DirSrvInfo[InstName]
if ret['InstName'] == InstName:
DSInstName = ret['DSInstName']
result = self.DirSrvInst.Remove_DSInstance(DSInstName)
if result:
del self.DSUsedPorts[InstName]
return True
else:
raise DirSrvException('Could not remove Directory Server Instance', DSInstName)
else:
raise DirSrvException("%s Instance could not be found" %(InstName))
|
299,532 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListWebAppApplicationSettingsSlotResult',
'AwaitableListWebAppApplicationSettingsSlotResult',
'list_web_app_application_settings_slot',
'list_web_app_application_settings_slot_output',
]
@pulumi.output_type
class ListWebAppApplicationSettingsSlotResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, METHOD_NAME=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppApplicationSettingsSlotResult(ListWebAppApplicationSettingsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppApplicationSettingsSlotResult(
id=self.id,
kind=self.kind,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def list_web_app_application_settings_slot(METHOD_NAME: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppApplicationSettingsSlotResult:
"""
Gets the application settings of an app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the application settings for the production slot.
"""
__args__ = dict()
__args__['name'] = METHOD_NAME
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20201001:listWebAppApplicationSettingsSlot', __args__, opts=opts, typ=ListWebAppApplicationSettingsSlotResult).value
return AwaitableListWebAppApplicationSettingsSlotResult(
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(list_web_app_application_settings_slot)
def list_web_app_application_settings_slot_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWebAppApplicationSettingsSlotResult]:
"""
Gets the application settings of an app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the application settings for the production slot.
"""
...
|
299,533 |
member group
|
from dataikuapi.dss.app import DSSApp
from dataikuapi.dss.dataset import DSSDataset
from dataikuapi.dss.wiki import DSSWikiArticle
class DSSWorkspace:
"""
A handle to interact with a workspace on the DSS instance.
Do not create this class directly, instead use :meth:`dataikuapi.DSSClient.get_workspace`
"""
def __init__(self, client, workspace_key):
self.client = client
self.workspace_key = workspace_key
def get_settings(self):
"""
Gets the settings of this workspace.
:returns: a handle to read, modify and save the settings
:rtype: :class:`DSSWorkspaceSettings`
"""
return DSSWorkspaceSettings(self, self.client._perform_json("GET", "/workspaces/%s" % self.workspace_key))
def list_objects(self):
"""
List the objects in this workspace
:returns: The list of the objects
:rtype: list of :class:`.DSSWorkspaceObject`
"""
objects = self.client._perform_json("GET", "/workspaces/%s/objects" % self.workspace_key)
return [DSSWorkspaceObject(self, object) for object in objects]
def add_object(self, object):
"""
Add an object to this workspace.
Object can be of different shapes (:class:`dataikuapi.dss.dataset.DSSDataset`, :class:`dataikuapi.dss.wiki.DSSWikiArticle`, :class:`dataikuapi.dss.app.DSSApp`, :class:`.DSSWorkspaceHtmlLinkObject` or a :class:`.dict` that contains the raw data)
"""
if isinstance(object, DSSDataset):
data = {"reference": {"projectKey": object.project_key, "type": "DATASET", "id": object.dataset_name}}
elif isinstance(object, DSSWikiArticle):
data = {"reference": {"projectKey": object.project_key, "type": "ARTICLE", "id": object.article_id}}
elif isinstance(object, DSSApp):
data = {"appId": object.app_id}
elif isinstance(object, DSSWorkspaceHtmlLinkObject):
data = {"htmlLink": {"name": object.name, "url": object.url, "description": object.description}}
elif isinstance(object, dict):
data = object
else:
raise ValueError("Unsupported object type")
self.client._perform_json("POST", "/workspaces/%s/objects" % self.workspace_key, body=data)
def delete(self):
"""
Delete the workspace
This call requires Administrator rights on the workspace.
"""
return self.client._perform_empty("DELETE", "/workspaces/%s" % self.workspace_key)
class DSSWorkspaceHtmlLinkObject:
def __init__(self, name, url, description):
self.name = name
self.url = url
self.description = description
class DSSWorkspaceObject:
"""
A handle on an object of a workspace
Do not create this class directly, instead use :meth:`dataikuapi.dss.DSSWorkspace.list_objects`
"""
def __init__(self, workspace, data):
self.workspace = workspace
self.data = data
def get_raw(self):
return self.data
def remove(self):
"""
Remove this object from the workspace
This call requires Contributor rights on the workspace.
"""
self.workspace.client._perform_empty(
"DELETE", "/workspaces/%s/objects/%s" % (self.workspace.workspace_key, self.data['id']))
class DSSWorkspaceSettings:
"""
A handle on the settings of a workspace
Do not create this class directly, instead use :meth:`dataikuapi.dss.DSSWorkspace.get_settings`
"""
def __init__(self, workspace, settings):
self.workspace = workspace
self.settings = settings
def get_raw(self):
return self.settings
@property
def display_name(self):
"""
Get or set the name of the workspace
:rtype: :class:`str`
"""
return self.settings['displayName']
@display_name.setter
def display_name(self, value):
self.settings['displayName'] = value
@property
def color(self):
"""
Get or set the background color of the workspace (using #xxxxxx syntax)
:rtype: :class:`str`
"""
return self.settings['color']
@color.setter
def color(self, value):
self.settings['color'] = value
@property
def description(self):
"""
Get or set the description of the workspace
:rtype: :class:`str`
"""
return self.settings['description']
@description.setter
def description(self, value):
self.settings['description'] = value
@property
def permissions(self):
"""
Get or set the permissions controlling who is a member, contributor or admin of the workspace
:rtype: list of :class:`.DSSWorkspacePermissionItem`
"""
return [DSSWorkspacePermissionItem(permission) for permission in self.settings['permissions']]
@permissions.setter
def permissions(self, value):
self.settings['permissions'] = value
@property
def current_user_permissions(self):
"""
Permissions of the current user (read-only)
:rtype: :class:`.DSSWorkspacePermissionItem`
"""
return DSSWorkspacePermissionItem(self.settings['currentUserPermissions'])
def save(self):
"""
Save the changes made on the settings
This call requires Administrator rights on the workspace.
"""
self.workspace.client._perform_empty(
"PUT", "/workspaces/%s" % self.workspace.workspace_key,
body=self.settings)
class DSSWorkspacePermissionItem(dict):
def __init__(self, data):
super(DSSWorkspacePermissionItem, self).__init__(data)
@classmethod
def admin_group(cls, group):
return cls({"group": group, "admin": True, "write": True, "read": True})
@classmethod
def contributor_group(cls, group):
return cls({"group": group, "admin": False, "write": True, "read": True})
@classmethod
def METHOD_NAME(cls, group):
return cls({"group": group, "admin": False, "write": False, "read": True})
@classmethod
def admin_user(cls, user):
return cls({"user": user, "admin": True, "write": True, "read": True})
@classmethod
def contributor_user(cls, user):
return cls({"user": user, "admin": False, "write": True, "read": True})
@classmethod
def member_user(cls, user):
return cls({"user": user, "admin": False, "write": False, "read": True})
@property
def user(self):
"""
Get user login
:rtype: :class:`str`
"""
return self['user']
@property
def group(self):
"""
Get group name
:rtype: :class:`str`
"""
return self['group']
@property
def admin(self):
"""
Get admin permission
:rtype: :class:`boolean`
"""
return self['admin']
@property
def write(self):
"""
Get write permission
:rtype: :class:`boolean`
"""
return self['write']
@property
def read(self):
"""
Get read permission
:rtype: :class:`boolean`
"""
return self['read']
|
299,534 |
post
|
from pyaedt.application.Analysis import Analysis
from pyaedt.generic.general_methods import pyaedt_function_handler
class FieldAnalysisRMxprt(Analysis):
"""Manages RMXprt field analysis setup. (To be implemented.)
This class is automatically initialized by an application call (like HFSS,
Q3D...). Refer to the application function for inputs definition.
Parameters
----------
Returns
-------
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
machine="",
port=0,
aedt_process_id=None,
):
Analysis.__init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
machine,
port,
aedt_process_id,
)
self._modeler = None
self._post = None
@property
def METHOD_NAME(self):
"""Post Object.
Returns
-------
:class:`pyaedt.modules.PostProcessor.CircuitPostProcessor`
"""
if self._post is None: # pragma: no cover
from pyaedt.modules.PostProcessor import CircuitPostProcessor
self._post = CircuitPostProcessor(self)
return self._post
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modules.modeler2d.ModelerRMxprt`
"""
if self._modeler is None:
from pyaedt.modeler.modeler2d import ModelerRMxprt
self._modeler = ModelerRMxprt(self)
return self._modeler
@pyaedt_function_handler()
def disable_modelcreation(self, solution_type=None):
"""Enable the RMxprt solution.
Parameters
----------
solution_type :
Type of the solution. The default is ``None``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._design_type = "RMxprtSolution"
self.solution_type = solution_type
return True
@pyaedt_function_handler()
def enable_modelcreation(self, solution_type=None):
"""Enable model creation for the Maxwell model wizard.
Parameters
----------
solution_type : str
Type of the solution. The default is ``None``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._design_type = "ModelCreation"
self.solution_type = solution_type
return True
@pyaedt_function_handler()
def set_material_threshold(self, conductivity=100000, permeability=100):
"""Set material threshold.
Parameters
----------
conductivity : float, optional
Conductivity threshold.
The default value is 100000.
permeability : float, optional
Permeability threshold.
The default value is 100.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
try:
self.odesign.SetThreshold(conductivity, permeability)
return True
except:
return False
@pyaedt_function_handler()
def _check_solution_consistency(self):
"""Check solution consistency."""
if self.design_solutions:
return self._odesign.GetSolutionType() == self.design_solutions._solution_type
else:
return True
@pyaedt_function_handler()
def _check_design_consistency(self):
"""Check design consistency."""
consistent = False
destype = self._odesign.GetDesignType()
if destype == "RMxprt":
consistent = self._check_solution_consistency()
return consistent
|
299,535 |
gen fname
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The niftyreg module provides classes for interfacing with `niftyreg
<http://sourceforge.net/projects/niftyreg/>`_ command line tools.
These are the base tools for working with niftyreg.
Registration tools are found in niftyreg/reg.py
Every other tool is found in niftyreg/regutils.py
Examples
--------
See the docstrings of the individual classes for examples.
"""
import os
from packaging.version import Version
from ... import logging
from ..base import CommandLine, CommandLineInputSpec, traits, Undefined, PackageInfo
from ...utils.filemanip import split_filename
iflogger = logging.getLogger("nipype.interface")
def get_custom_path(command, env_dir="NIFTYREGDIR"):
return os.path.join(os.getenv(env_dir, ""), command)
class Info(PackageInfo):
version_cmd = get_custom_path("reg_aladin") + " --version"
@staticmethod
def parse_version(raw_info):
return raw_info
class NiftyRegCommandInputSpec(CommandLineInputSpec):
"""Input Spec for niftyreg interfaces."""
# Set the number of omp thread to use
omp_core_val = traits.Int(
int(os.environ.get("OMP_NUM_THREADS", "1")),
desc="Number of openmp thread to use",
argstr="-omp %i",
usedefault=True,
)
class NiftyRegCommand(CommandLine):
"""
Base support interface for NiftyReg commands.
"""
_suffix = "_nr"
_min_version = "1.5.30"
input_spec = NiftyRegCommandInputSpec
def __init__(self, required_version=None, **inputs):
self.num_threads = 1
super().__init__(**inputs)
self.required_version = required_version
_version = self.version
if _version:
if self._min_version is not None and Version(_version) < Version(
self._min_version
):
msg = "A later version of Niftyreg is required (%s < %s)"
iflogger.warning(msg, _version, self._min_version)
if required_version is not None:
if Version(_version) != Version(required_version):
msg = "The version of NiftyReg differs from the required"
msg += "(%s != %s)"
iflogger.warning(msg, _version, self.required_version)
self.inputs.on_trait_change(self._omp_update, "omp_core_val")
self.inputs.on_trait_change(self._environ_update, "environ")
self._omp_update()
def _omp_update(self):
if self.inputs.omp_core_val:
self.inputs.environ["OMP_NUM_THREADS"] = str(self.inputs.omp_core_val)
self.num_threads = self.inputs.omp_core_val
else:
if "OMP_NUM_THREADS" in self.inputs.environ:
del self.inputs.environ["OMP_NUM_THREADS"]
self.num_threads = 1
def _environ_update(self):
if self.inputs.environ:
if "OMP_NUM_THREADS" in self.inputs.environ:
self.inputs.omp_core_val = int(self.inputs.environ["OMP_NUM_THREADS"])
else:
self.inputs.omp_core_val = Undefined
else:
self.inputs.omp_core_val = Undefined
def check_version(self):
_version = self.version
if not _version:
raise Exception("Niftyreg not found")
if Version(_version) < Version(self._min_version):
err = "A later version of Niftyreg is required (%s < %s)"
raise ValueError(err % (_version, self._min_version))
if self.required_version:
if Version(_version) != Version(self.required_version):
err = "The version of NiftyReg differs from the required"
err += "(%s != %s)"
raise ValueError(err % (_version, self.required_version))
@property
def version(self):
return Info.version()
def exists(self):
return self.version is not None
def _format_arg(self, name, spec, value):
if name == "omp_core_val":
self.numthreads = value
return super()._format_arg(name, spec, value)
def METHOD_NAME(self, basename, out_dir=None, suffix=None, ext=None):
if basename == "":
msg = "Unable to generate filename for command %s. " % self.cmd
msg += "basename is not set!"
raise ValueError(msg)
_, final_bn, final_ext = split_filename(basename)
if out_dir is None:
out_dir = os.getcwd()
if ext is not None:
final_ext = ext
if suffix is not None:
final_bn = "".join((final_bn, suffix))
return os.path.abspath(os.path.join(out_dir, final_bn + final_ext))
|
299,536 |
test warning label
|
######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Unit tests for the models in ``custom_qwidgets`` module.
"""
import unittest
from contextlib import contextmanager
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QApplication, QDialogButtonBox
from spinetoolbox.widgets.custom_qwidgets import FilterWidget, SelectDatabaseItemsDialog
from spinetoolbox.mvcmodels.filter_checkbox_list_model import DataToValueFilterCheckboxListModel
class TestFilterWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not QApplication.instance():
QApplication()
def setUp(self):
self._widget = FilterWidget(None, DataToValueFilterCheckboxListModel, None, str)
self._widget.set_filter_list(["ei", "bii", "cii"])
def tearDown(self):
self._widget.close()
self._widget.deleteLater()
def test_set_filter_list(self):
self.assertFalse(self._widget.has_filter())
model = self._widget._ui_list.model()
data = [model.index(row, 0).data() for row in range(model.rowCount())]
self.assertEqual(data, ["(Select all)", "(Empty)", "ei", "bii", "cii"])
checked = [model.index(row, 0).data(Qt.ItemDataRole.CheckStateRole).value for row in range(model.rowCount())]
self.assertEqual(checked, 5 * [Qt.CheckState.Checked.value])
self.assertEqual(self._widget._filter_state, ["ei", "bii", "cii"])
self.assertIsNone(self._widget._filter_empty_state)
def test_click_Empty_item(self):
model = self._widget._ui_list.model()
self._widget._ui_list.clicked.emit(model.index(1, 0))
model = self._widget._ui_list.model()
checked = [model.index(row, 0).data(Qt.ItemDataRole.CheckStateRole).value for row in range(model.rowCount())]
self.assertEqual(
checked,
[
Qt.CheckState.Unchecked.value,
Qt.CheckState.Unchecked.value,
Qt.CheckState.Checked.value,
Qt.CheckState.Checked.value,
Qt.CheckState.Checked.value,
],
)
self.assertTrue(self._widget.has_filter())
def test_click_item(self):
model = self._widget._ui_list.model()
self._widget._ui_list.clicked.emit(model.index(2, 0))
model = self._widget._ui_list.model()
checked = [model.index(row, 0).data(Qt.ItemDataRole.CheckStateRole).value for row in range(model.rowCount())]
self.assertEqual(
checked,
[
Qt.CheckState.Unchecked.value,
Qt.CheckState.Checked.value,
Qt.CheckState.Unchecked.value,
Qt.CheckState.Checked.value,
Qt.CheckState.Checked.value,
],
)
self.assertTrue(self._widget.has_filter())
def test_click_Select_All_item(self):
model = self._widget._ui_list.model()
self._widget._ui_list.clicked.emit(model.index(0, 0))
model = self._widget._ui_list.model()
checked = [model.index(row, 0).data(Qt.ItemDataRole.CheckStateRole).value for row in range(model.rowCount())]
self.assertEqual(checked, 5 * [Qt.CheckState.Unchecked.value])
self.assertTrue(self._widget.has_filter())
def test_save_state(self):
model = self._widget._ui_list.model()
self._widget._ui_list.clicked.emit(model.index(2, 0))
self._widget.save_state()
self.assertEqual(self._widget._filter_state, {"bii", "cii"})
self.assertTrue(self._widget._filter_empty_state)
class TestSelectDatabaseItemsDialog(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not QApplication.instance():
QApplication()
def test_ok_button_text(self):
text = "Do it!"
with _select_database_items_dialog(None, text) as dialog:
self.assertEqual(dialog._ui.button_box.button(QDialogButtonBox.StandardButton.Ok).text(), text)
def METHOD_NAME(self):
with _select_database_items_dialog(None, None) as dialog:
self.assertEqual(dialog._ui.warning_label.text(), "")
dialog._item_check_boxes_widget._item_check_boxes["metadata"].setChecked(True)
self.assertEqual(dialog._ui.warning_label.text(), "Warning! Structural data items selected.")
@contextmanager
def _select_database_items_dialog(checked_states, ok_button_text):
dialog = SelectDatabaseItemsDialog(checked_states, ok_button_text)
try:
yield dialog
finally:
dialog.deleteLater()
if __name__ == "__main__":
unittest.main()
|
299,537 |
get name
|
#!/usr/bin/env python
#############################################################################
#
# Thermal contains an implementation of SONiC Platform Base API and
# provides the thermal device status which are available in the platform
#
#############################################################################
import os
import os.path
import subprocess
try:
from sonic_platform_base.thermal_base import ThermalBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class Thermal(ThermalBase):
"""Platform-specific Thermal class"""
THERMAL_NAME_LIST = []
SYSFS_THERMAL_DIR = ["/sys/bus/i2c/devices/0-004f/",
"/sys/bus/i2c/devices/0-0049/",
"/sys/bus/i2c/devices/0-004a/",
"/sys/bus/i2c/devices/0-004b/",
"/sys/bus/i2c/devices/0-004c/",
"/sys/bus/i2c/devices/0-004d/",
"/sys/bus/i2c/devices/0-004e/"]
IPMI_SENSOR_NR = ["0x30", "0x31", "0x32", "0x33", "0x34", "0x35", "0x36"]
def __init__(self, thermal_index):
self.index = thermal_index
self.lnc = None
self.lcr = None
self.unc = None
self.ucr = None
# Add thermal name
self.THERMAL_NAME_LIST.append("Top-Rear")
self.THERMAL_NAME_LIST.append("Top-Front")
self.THERMAL_NAME_LIST.append("Right-Front")
self.THERMAL_NAME_LIST.append("Top-Center")
self.THERMAL_NAME_LIST.append("Left-Front")
self.THERMAL_NAME_LIST.append("Bottom-Front")
self.THERMAL_NAME_LIST.append("Bottom-Rear")
ThermalBase.__init__(self)
self.minimum_thermal = self.get_temperature()
self.maximum_thermal = self.get_temperature()
self.__initialize_threshold()
def __initialize_threshold(self):
cmd = ["ipmitool", "raw", "0x4", "0x27"]
if self.lnc is None:
cmd.append(self.IPMI_SENSOR_NR[self.index])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = p.communicate()
self.unc = float(int(out.split()[4],16))
self.ucr = float(int(out.split()[5],16))
self.lnc = float(int(out.split()[1],16) if int(out.split()[1],16) != 0 else 2)
self.lcr = float(int(out.split()[2],16))
def __read_txt_file(self, file_path):
try:
with open(file_path, 'r') as fd:
data = fd.read()
return data.strip()
except IOError:
pass
return ""
def __get_temp(self, temp_file):
temp_file_path = os.path.join(self.SYSFS_THERMAL_DIR[self.index], temp_file)
raw_temp = self.__read_txt_file(temp_file_path)
temp = float(raw_temp)/1000
return "{:.3f}".format(temp)
def __set_threshold(self, file_name, temperature):
temp_file_path = os.path.join(self.SYSFS_THERMAL_DIR[self.index], file_name)
try:
with open(temp_file_path, 'w') as fd:
fd.write(str(temperature))
return True
except IOError:
return False
def get_temperature(self):
"""
Retrieves current temperature reading from thermal
Returns:
A float number of current temperature in Celsius up to nearest thousandth
of one degree Celsius, e.g. 30.125
"""
temp_file = "temp1_input"
return float(self.__get_temp(temp_file))
def get_low_threshold(self):
"""
Retrieves the low threshold temperature of thermal
:return: A float number, the low threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.lnc
def get_low_critical_threshold(self):
"""
Retrieves the low critical threshold temperature of thermal
:return: A float number, the low critical threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.lcr
def get_high_threshold(self):
"""
Retrieves the high threshold temperature of thermal
Returns:
A float number, the high threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.unc
def get_high_critical_threshold(self):
"""
Retrieves the high critical threshold temperature of thermal
:return: A float number, the high critical threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.ucr
def METHOD_NAME(self):
"""
Retrieves the name of the thermal device
Returns:
string: The name of the thermal device
"""
return self.THERMAL_NAME_LIST[self.index]
def get_presence(self):
"""
Retrieves the presence of the sensor
Returns:
bool: True if sensor is present, False if not
"""
temp_file = "temp1_input"
temp_file_path = os.path.join(self.SYSFS_THERMAL_DIR[self.index], temp_file)
return os.path.isfile(temp_file_path)
def get_status(self):
"""
Retrieves the operational status of the device
Returns:
A boolean value, True if device is operating properly, False if not
"""
if not self.get_presence():
return False
return True
def get_model(self):
"""
Retrieves the model number (or part number) of the device
Returns:
string: Model/part number of device
"""
return "None"
def get_serial(self):
"""
Retrieves the serial number of the device
Returns:
string: Serial number of device
"""
return "None"
def is_replaceable(self):
"""
Retrieves whether thermal module is replaceable
Returns:
A boolean value, True if replaceable, False if not
"""
return False
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device.
If the agent cannot determine the parent-relative position
for some reason, or if the associated value of
entPhysicalContainedIn is'0', then the value '-1' is returned
Returns:
integer: The 1-based relative physical position in parent device
or -1 if cannot determine the position
"""
return self.index + 1
def get_minimum_recorded(self):
"""
Retrieves the minimum recorded temperature of thermal
Returns:
A float number, the minimum recorded temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
tmp = self.get_temperature()
if tmp < self.minimum_thermal:
self.minimum_thermal = tmp
return self.minimum_thermal
def get_maximum_recorded(self):
"""
Retrieves the maximum recorded temperature of thermal
Returns:
A float number, the maximum recorded temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
tmp = self.get_temperature()
if tmp > self.maximum_thermal:
self.maximum_thermal = tmp
return self.maximum_thermal
|
299,538 |
test release order
|
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
from samcli.lib.utils.lock_distributor import LockChain, LockDistributor, LockDistributorType
class TestLockChain(TestCase):
def test_aquire_order(self):
locks = {"A": MagicMock(), "B": MagicMock(), "C": MagicMock()}
call_mock = MagicMock()
call_mock.a = locks["A"]
call_mock.b = locks["B"]
call_mock.c = locks["C"]
lock_chain = LockChain(locks)
lock_chain.acquire()
call_mock.assert_has_calls([call.a.acquire(), call.b.acquire(), call.c.acquire()])
def test_aquire_order_shuffled(self):
locks = {"A": MagicMock(), "C": MagicMock(), "B": MagicMock()}
call_mock = MagicMock()
call_mock.a = locks["A"]
call_mock.b = locks["B"]
call_mock.c = locks["C"]
lock_chain = LockChain(locks)
lock_chain.acquire()
call_mock.assert_has_calls([call.a.acquire(), call.b.acquire(), call.c.acquire()])
def METHOD_NAME(self):
locks = {"A": MagicMock(), "B": MagicMock(), "C": MagicMock()}
call_mock = MagicMock()
call_mock.a = locks["A"]
call_mock.b = locks["B"]
call_mock.c = locks["C"]
lock_chain = LockChain(locks)
lock_chain.release()
call_mock.assert_has_calls([call.a.release(), call.b.release(), call.c.release()])
def test_release_order_shuffled(self):
locks = {"A": MagicMock(), "C": MagicMock(), "B": MagicMock()}
call_mock = MagicMock()
call_mock.a = locks["A"]
call_mock.b = locks["B"]
call_mock.c = locks["C"]
lock_chain = LockChain(locks)
lock_chain.release()
call_mock.assert_has_calls([call.a.release(), call.b.release(), call.c.release()])
def test_with(self):
locks = {"A": MagicMock(), "C": MagicMock(), "B": MagicMock()}
call_mock = MagicMock()
call_mock.a = locks["A"]
call_mock.b = locks["B"]
call_mock.c = locks["C"]
with LockChain(locks) as _:
call_mock.assert_has_calls([call.a.acquire(), call.b.acquire(), call.c.acquire()])
call_mock.assert_has_calls(
[call.a.acquire(), call.b.acquire(), call.c.acquire(), call.a.release(), call.b.release(), call.c.release()]
)
class TestLockDistributor(TestCase):
@patch("samcli.lib.utils.lock_distributor.threading.Lock")
@patch("samcli.lib.utils.lock_distributor.multiprocessing.Lock")
def test_thread_get_locks(self, process_lock_mock, thread_lock_mock):
locks = [MagicMock(), MagicMock(), MagicMock(), MagicMock()]
thread_lock_mock.side_effect = locks
distributor = LockDistributor(LockDistributorType.THREAD, None)
keys = ["A", "B", "C"]
result = distributor.get_locks(keys)
self.assertEqual(result["A"], locks[1])
self.assertEqual(result["B"], locks[2])
self.assertEqual(result["C"], locks[3])
self.assertEqual(distributor.get_locks(keys)["A"], locks[1])
@patch("samcli.lib.utils.lock_distributor.threading.Lock")
@patch("samcli.lib.utils.lock_distributor.multiprocessing.Lock")
def test_process_get_locks(self, process_lock_mock, thread_lock_mock):
locks = [MagicMock(), MagicMock(), MagicMock(), MagicMock()]
process_lock_mock.side_effect = locks
distributor = LockDistributor(LockDistributorType.PROCESS, None)
keys = ["A", "B", "C"]
result = distributor.get_locks(keys)
self.assertEqual(result["A"], locks[1])
self.assertEqual(result["B"], locks[2])
self.assertEqual(result["C"], locks[3])
self.assertEqual(distributor.get_locks(keys)["A"], locks[1])
@patch("samcli.lib.utils.lock_distributor.threading.Lock")
@patch("samcli.lib.utils.lock_distributor.multiprocessing.Lock")
def test_process_manager_get_locks(self, process_lock_mock, thread_lock_mock):
manager_mock = MagicMock()
locks = [MagicMock(), MagicMock(), MagicMock(), MagicMock()]
manager_mock.dict.return_value = dict()
manager_mock.Lock.side_effect = locks
distributor = LockDistributor(LockDistributorType.PROCESS, manager_mock)
keys = ["A", "B", "C"]
result = distributor.get_locks(keys)
self.assertEqual(result["A"], locks[1])
self.assertEqual(result["B"], locks[2])
self.assertEqual(result["C"], locks[3])
self.assertEqual(distributor.get_locks(keys)["A"], locks[1])
|
299,539 |
active client
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2017 the Isard-vdi project authors:
# Josep Maria Viñolas Auquer
# Alberto Larraz Dalmases
# License: AGPLv3
import traceback
from rethinkdb import RethinkDB
from api import app
from .._common.api_exceptions import Error
r = RethinkDB()
import logging as log
from .flask_rethink import RDB
db = RDB(app)
db.init_app(app)
def METHOD_NAME(
kind,
client_ip,
remote_ip=None,
remote_port=None,
status=False,
):
# NOTE: Kind will be users/hypers as this are the only two wireguard
# interfaces. Remotevpn are handled in users wg interface.
if kind not in ["users", "hypers"]:
raise Error(
"not_found",
"Active client vpn connection: Vpn kind " + str(kind) + " not found",
traceback.format_exc(),
description_code="vpn_kind_not_found",
)
connection_data = {
"connected": status,
"remote_ip": remote_ip,
"remote_port": remote_port,
}
# Find ip
if kind == "users":
with app.app_context():
if len(
list(
r.table("remotevpn")
.get_all(client_ip, index="wg_client_ip")
.run(db.conn)
)
):
r.table("remotevpn").get_all(client_ip, index="wg_client_ip").update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
return True
r.table("users").get_all(client_ip, index="wg_client_ip").update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
return True
else: # kind = hypers
with app.app_context():
r.table("hypervisors").get_all(client_ip, index="wg_client_ip").update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
return True
def reset_connection_status(
kind,
):
if kind not in ["users", "hypers", "all"]:
raise Error(
"not_found",
"Reset vpn connection: Vpn kind " + str(kind) + " not found",
traceback.format_exc(),
description_code="vpn_kind_not_found",
)
connection_data = {"connected": False, "remote_ip": None, "remote_port": None}
# Find ip
if kind in ["users", "all"]:
with app.app_context():
r.table("users").has_fields({"vpn": {"wireguard": "Address"}}).update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
if kind in ["remotevpn", "all"]:
with app.app_context():
r.table("remotevpn").has_fields({"vpn": {"wireguard": "Address"}}).update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
if kind in ["hypers", "all"]:
with app.app_context():
r.table("hypervisors").has_fields({"vpn": {"wireguard": "Address"}}).update(
{"vpn": {"wireguard": connection_data}}
).run(db.conn)
return True
def reset_connections_list_status(
data,
):
connection_data = {"connected": False, "remote_ip": None, "remote_port": None}
users_vpn_ips = [d["client_ip"] for d in data if d["kind"] == "users"]
if len(users_vpn_ips):
with app.app_context():
r.table("users").get_all(
r.args(users_vpn_ips), index="wg_client_ip"
).update({"vpn": {"wireguard": connection_data}}).run(db.conn)
r.table("remotevpn").get_all(
r.args(users_vpn_ips), index="wg_client_ip"
).update({"vpn": {"wireguard": connection_data}}).run(db.conn)
hypers_vpn_ips = [d["client_ip"] for d in data if d["kind"] == "hypers"]
if len(hypers_vpn_ips):
with app.app_context():
r.table("hypervisors").get_all(
r.args(hypers_vpn_ips), index="wg_client_ip"
).update({"vpn": {"wireguard": connection_data}}).run(db.conn)
return True
|
299,540 |
ls nokwargs
|
from __future__ import annotations
import json
import os.path
import platform
import subprocess
import sys
import tempfile
import threading
import time
import psutil
import pytest
from simpleflow import execute
from simpleflow.exceptions import ExecutionError, ExecutionTimeoutError
@execute.program(path="ls")
def METHOD_NAME(*args):
"""
Only accepts a variable number of positional arguments.
"""
pass
def test_execute_program_no_kwargs():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
METHOD_NAME(hide=f.name)
@execute.program(path="ls")
def ls_noargs(**kwargs):
"""
Only accepts a variable number of keyword arguments.
"""
pass
def test_execute_program_no_args():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
ls_noargs(f.name)
@execute.program(path="ls")
def ls_restrict_named_arguments(*, hide=execute.RequiredArgument):
pass
def test_execute_program_restrict_named_arguments():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError):
ls_restrict_named_arguments(f.name)
@execute.program(path="ls")
def ls_optional_named_arguments(hide="", *args):
pass
@pytest.mark.xfail(platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX")
def test_execute_program_optional_named_arguments():
with tempfile.NamedTemporaryFile(suffix="\xe9") as f:
assert ls_optional_named_arguments(f.name).strip() == f.name
assert f.name not in ls_optional_named_arguments(hide=f.name)
@execute.program()
def ls(*args, **kwargs):
pass
def test_execute_program_with_positional_arguments():
with tempfile.NamedTemporaryFile() as f:
assert ls(f.name).strip() == f.name
@pytest.mark.xfail(platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX")
def test_execute_program_with_named_arguments():
with tempfile.NamedTemporaryFile() as f:
assert f.name not in (ls(os.path.dirname(f.name), hide=f.name).strip())
@execute.program()
def ls_2args(a, b):
pass
def test_ls_2args():
with pytest.raises(TypeError):
ls_2args(1, 2, 3)
@execute.python()
def inc(xs):
return [x + 1 for x in xs]
def test_function_as_program():
assert inc([1, 2, 3]) == [2, 3, 4]
@execute.python()
def add(a, b=1):
return a + b
@execute.python()
class Add:
def __init__(self, a, b=1):
self.a = a
self.b = b
def execute(self):
return self.a + self.b
def test_function_as_program_with_default_kwarg():
assert add(4) == 5
assert Add(4) == 5
def test_function_as_program_with_kwargs():
assert add(3, 7) == 10
assert Add(3, 7) == 10
def test_function_as_program_raises_builtin_exception():
with pytest.raises(ExecutionError) as excinfo:
add("1")
assert '"error":"TypeError"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
Add("1")
assert '"error":"TypeError"' in str(excinfo.value)
@execute.python()
def print_string(s, retval):
print(s, end="")
return retval
@execute.python()
class PrintString:
def __init__(self, s, retval):
self.s = s
self.retval = retval
def execute(self):
print(self.s)
return self.retval
def test_function_with_print():
actual = print_string("This isn't part of the return value", None)
assert actual is None, actual
actual = PrintString("This isn't part of the return value", None)
assert actual is None, actual
def test_function_with_print_and_return():
assert print_string("This isn't part of the return value", 42) == 42
assert PrintString("This isn't part of the return value", 42) == 42
def test_function_returning_lf():
assert print_string("This isn't part of the return value", "a\nb") == "a\nb"
assert PrintString("This isn't part of the return value", "a\nb") == "a\nb"
class DummyException(Exception):
pass
@execute.python()
def raise_dummy_exception():
raise DummyException
@execute.python()
class RaiseDummyException:
def __init__(self):
pass
@staticmethod
def execute():
raise DummyException
def test_function_as_program_raises_custom_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception()
assert '"error":"DummyException"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
RaiseDummyException()
assert '"error":"DummyException"' in str(excinfo.value)
@execute.python()
def raise_timeout_error():
from simpleflow.exceptions import TimeoutError
raise TimeoutError("timeout", 1)
def test_function_as_program_raises_module_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_timeout_error()
assert '"error":"TimeoutError"' in str(excinfo.value)
@execute.python()
def warn():
import warnings
warnings.warn(
"The _posixsubprocess module is not being used. "
"Child process reliability may suffer if your "
"program uses threads.",
RuntimeWarning,
)
raise Exception("Fake Exception")
def test_function_with_warning():
try:
warn()
except Exception:
pass
else:
assert False
def test_function_returning_unicode():
assert print_string("", "ʘ‿ʘ") == "ʘ‿ʘ"
@execute.python()
def raise_dummy_exception_with_unicode():
raise DummyException("ʘ‿ʘ")
def test_exception_with_unicode():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception_with_unicode()
assert '"error":"DummyException"' in str(excinfo.value)
error = json.loads(excinfo.value.args[0])
assert error["message"] == "ʘ‿ʘ"
def sleep_and_return(seconds):
time.sleep(seconds)
return seconds
def test_timeout_execute():
timeout = 3 # TODO: the timeout should be smaller but as a workaround for Pypy slowness/overhead we set it to 3 sec
func = execute.python(timeout=timeout)(sleep_and_return)
# Normal case
result = func(0.25)
assert result == 0.25
# Timeout case
t = time.time()
with pytest.raises(ExecutionTimeoutError) as e:
func(10)
assert (time.time() - t) < 10.0
assert f"ExecutionTimeoutError after {timeout} seconds" in str(e.value)
def test_timeout_execute_from_thread():
# From a thread
t = threading.Thread(target=test_timeout_execute)
t.start()
t.join()
def create_sleeper_subprocess():
pid = subprocess.Popen(["sleep", "600"]).pid
return pid
@pytest.mark.xfail(
platform.system() == "Darwin" or "PyPy" in sys.version,
reason="psutil process statuses are buggy on OSX, and test randomly fails on PyPy",
)
def test_execute_dont_kill_children():
pid = execute.python()(create_sleeper_subprocess)()
subprocess = psutil.Process(pid)
assert subprocess.status() == "sleeping"
subprocess.terminate() # cleanup
def test_execute_kill_children():
pid = execute.python(kill_children=True)(create_sleeper_subprocess)()
with pytest.raises(psutil.NoSuchProcess):
psutil.Process(pid)
@execute.python()
def length(x):
return len(x)
def test_large_command_line():
x = "a" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_unicode():
x = "ä" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_utf8():
"""
UTF-8 bytes must be handled as Unicode, both in Python 2 and Python 3.
"""
x = "ä" * 1024 * 1024
assert length(x.encode("utf-8")) == len(x)
|
299,541 |
set line mutation scale
|
"""
Provides classes to style the axis lines.
"""
import math
import numpy as np
import matplotlib as mpl
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
class _FancyAxislineStyle:
class SimpleArrow(FancyArrowPatch):
"""The artist class that will be returned for SimpleArrow style."""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def METHOD_NAME(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
(x0, y0), (x1, y1) = path.vertices[-2:]
theta = math.atan2(y1 - y0, x1 - x0)
x2 = x1 + math.cos(theta) * mutation_size
y2 = y1 + math.sin(theta) * mutation_size
if path.codes is None:
return Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
return Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) Transform the path to the display coordinate.
2) Extend the path to make a room for arrow.
3) Update the path of the FancyArrowPatch.
4) Draw.
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() # line_mutation_scale()
extended_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extended_path
FancyArrowPatch.draw(self, renderer)
def get_window_extent(self, renderer=None):
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() # line_mutation_scale()
extended_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extended_path
return FancyArrowPatch.get_window_extent(self, renderer)
class FilledArrow(SimpleArrow):
"""The artist class that will be returned for FilledArrow style."""
_ARROW_STYLE = "-|>"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale, facecolor):
super().__init__(axis_artist, line_path, transform,
line_mutation_scale)
self.set_facecolor(facecolor)
class AxislineStyle(_Style):
"""
A container class which defines style classes for AxisArtists.
An instance of any axisline style class is a callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return an `.Artist` with the following methods::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base:
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super().__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path (set_path
method), return the Matplotlib artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
Parameters
----------
size : float
Size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super().__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0, 0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
"""
An arrow with a filled head.
"""
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
def __init__(self, size=1, facecolor=None):
"""
Parameters
----------
size : float
Size of the arrow as a fraction of the ticklabel size.
facecolor : color, default: :rc:`axes.edgecolor`
Fill color.
.. versionadded:: 3.7
"""
if facecolor is None:
facecolor = mpl.rcParams['axes.edgecolor']
self.size = size
self._facecolor = facecolor
super().__init__(size=size)
def new_line(self, axis_artist, transform):
linepath = Path([(0, 0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size,
facecolor=self._facecolor)
return axisline
_style_list["-|>"] = FilledArrow
|
299,542 |
init layer metadata
|
import os
import tempfile
from pathlib import Path
import lxml.etree as ET
import qgis.core
from qgis.PyQt import QtXml
from .jobs import manager
from .jobs.models import Job
from .logger import log
XSL_PATH = os.path.join(os.path.dirname(__file__), "data", "xsl")
def save_qmd(file_path, metadata):
dom_impl = QtXml.QDomImplementation()
doc_type = dom_impl.createDocumentType("qgis", "http://mrcc.com/qgis.dtd", "SYSTEM")
document = QtXml.QDomDocument(doc_type)
root_node = document.createElement("qgis")
root_node.setAttribute("version", qgis.core.Qgis.version())
document.appendChild(root_node)
if not metadata.writeMetadataXml(root_node, document):
log("Could not save metadata")
with open(file_path, "w", encoding="utf-8") as f:
f.write(document.toString(2))
def read_qmd(file_path):
md = qgis.core.QgsLayerMetadata()
if not os.path.exists(file_path):
return md
document = QtXml.QDomDocument("qgis")
with open(file_path, encoding="utf-8") as f:
if not document.setContent(f.read()):
log("Could not read metadata from file {}".format(md_path))
return md
root = document.firstChildElement("qgis")
if root.isNull():
log("Root <qgis> element could not be found")
return md
md.readMetadataXml(root)
return md
def qmd_to_iso(qmd_path):
file_name = os.path.splitext(os.path.split(qmd_path)[1])[0] + ".xml"
temp_file = os.path.join(tempfile.gettempdir(), file_name)
in_dom = ET.parse(qmd_path)
print(
os.path.join(XSL_PATH, "qgis-to-iso19139.xsl"),
os.path.exists(os.path.join(XSL_PATH, "qgis-to-iso19139.xsl")),
)
xslt = ET.parse(os.path.join(XSL_PATH, "qgis-to-iso19139.xsl"))
transform = ET.XSLT(xslt)
out_dom = transform(in_dom)
s = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
+ ET.tostring(out_dom, pretty_print=True).decode()
)
with open(temp_file, "w", encoding="utf8") as f:
f.write(s)
return Path(temp_file)
def init_dataset_metadata(job: Job, metadata: qgis.core.QgsLayerMetadata = None):
md = read_dataset_metadata(job)
if metadata is None:
md.setTitle(job.task_name)
md.setAbstract(job.task_notes)
else:
md.combine(metadata)
file_path = os.path.splitext(manager.job_manager.get_job_file_path(job))[0] + ".qmd"
save_qmd(file_path, md)
for u in job.results.get_all_uris():
METHOD_NAME(u.uri, md)
def METHOD_NAME(uri, metadata):
md = None
if metadata is None:
md = qgis.core.QgsLayerMetadata()
else:
md = metadata.clone()
layer = qgis.core.QgsRasterLayer(str(uri), "tmp", "gdal")
md.setCrs(layer.dataProvider().crs())
spatialExtent = qgis.core.QgsLayerMetadata.SpatialExtent()
spatialExtent.geom = qgis.core.QgsBox3d(layer.extent())
spatialExtent.extentCrs = layer.dataProvider().crs()
spatialExtents = [spatialExtent]
extent = qgis.core.QgsLayerMetadata.Extent()
extent.setSpatialExtents(spatialExtents)
md.setExtent(extent)
file_path = os.path.splitext(uri)[0] + ".qmd"
save_qmd(file_path, md)
def update_dataset_metadata(
job: Job, metadata: qgis.core.QgsLayerMetadata, updateLayers: bool = False
):
file_path = os.path.splitext(manager.job_manager.get_job_file_path(job))[0] + ".qmd"
save_qmd(file_path, metadata)
if updateLayers:
for u in job.results.get_all_uris():
update_layer_metadata(u.uri, metadata)
def update_layer_metadata(uri, metadata):
layer = qgis.core.QgsRasterLayer(str(uri), "tmp", "gdal")
md = layer.metadata()
if md == metadata:
return
md = combine_metadata(md, metadata)
file_path = os.path.splitext(uri)[0] + ".qmd"
save_qmd(file_path, md)
def combine_metadata(metadata, other):
if other.identifier() != "":
metadata.setIdentifier(other.identifier())
if other.parentIdentifier() != "":
metadata.setarentIdentifier(other.parentIdentifier())
if other.language() != "":
metadata.setLanguage(other.language())
if other.type() != "":
metadata.setType(other.type())
if other.title() != "":
metadata.setTitle(other.title())
if other.abstract() != "":
metadata.setAbstract(other.abstract())
if other.history() != "":
metadata.setHistory(other.history())
if len(other.keywords()) > 0:
metadata.setKeywords(other.keywords())
if len(other.contacts()) > 0:
metadata.setContacts(other.contacts())
if len(other.links()) > 0:
metadata.setLinks(other.links())
if other.fees() != "":
metadata.setFees(other.fees())
if len(other.constraints()) > 0:
metadata.setConstraints(other.constraints())
if len(other.rights()) > 0:
metadata.setRights(other.rights())
if len(other.licenses()) > 0:
metadata.setLicenses(other.licenses())
if other.encoding() != "":
metadata.setEncoding(other.encoding())
if other.crs().isValid():
metadata.setCrs(other.crs())
if len(other.extent().spatialExtents()) > 0:
extent = metadata.extent()
extent.setSpatialExtents(other.extent().spatialExtents())
metadata.setExtent(extent)
if len(other.extent().temporalExtents()) > 0:
extent = metadata.extent()
extent.setTemporalExtents(other.extent().temporalExtents())
metadata.setExtent(extent)
def export_dataset_metadata(job: Job):
md_paths = list()
file_path = manager.job_manager.get_job_file_path(job)
md_path = os.path.splitext(file_path)[0] + ".qmd"
if not os.path.exists(md_path):
log("Could not find dataset metadata file {}".format(md_path))
else:
md_paths.append(qmd_to_iso(md_path))
for u in job.results.get_all_uris():
file_path = u.uri
md_path = os.path.splitext(file_path)[0] + ".qmd"
if not os.path.exists(md_path):
log("Could not find dataset metadata file {}".format(md_path))
else:
md_paths.append(qmd_to_iso(md_path))
return md_paths
def read_dataset_metadata(job: Job):
file_path = manager.job_manager.get_job_file_path(job)
md_path = os.path.splitext(file_path)[0] + ".qmd"
return read_qmd(md_path)
|
299,543 |
mailchimp dataset config
|
import json
from typing import Any, Dict, Generator
import pydash
import pytest
from sqlalchemy.orm import Session
from fides.api.db import session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.schemas.saas.saas_config import SaaSRequest
from fides.api.schemas.saas.shared_schemas import HTTPMethod, SaaSRequestParams
from fides.api.service.connectors.saas_connector import SaaSConnector
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("mailchimp")
@pytest.fixture(scope="session")
def mailchimp_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "mailchimp.domain") or secrets["domain"],
"username": pydash.get(saas_config, "mailchimp.username")
or secrets["username"],
"api_key": pydash.get(saas_config, "mailchimp.api_key") or secrets["api_key"],
}
@pytest.fixture(scope="session")
def mailchimp_identity_email(saas_config):
return (
pydash.get(saas_config, "mailchimp.identity_email") or secrets["identity_email"]
)
@pytest.fixture
def mailchimp_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/mailchimp_config.yml",
"<instance_fides_key>",
"mailchimp_instance",
)
@pytest.fixture
def mailchimp_dataset() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/mailchimp_dataset.yml",
"<instance_fides_key>",
"mailchimp_instance",
)[0]
@pytest.fixture(scope="function")
def mailchimp_connection_config(
db: session, mailchimp_config, mailchimp_secrets
) -> Generator:
fides_key = mailchimp_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": mailchimp_secrets,
"saas_config": mailchimp_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def METHOD_NAME(
db: Session,
mailchimp_connection_config: ConnectionConfig,
mailchimp_dataset: Dict[str, Any],
) -> Generator:
fides_key = mailchimp_dataset["fides_key"]
mailchimp_connection_config.name = fides_key
mailchimp_connection_config.key = fides_key
mailchimp_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, mailchimp_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": mailchimp_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
@pytest.fixture(scope="function")
def reset_mailchimp_data(
mailchimp_connection_config, mailchimp_identity_email
) -> Generator:
"""
Gets the current value of the resource and restores it after the test is complete.
Used for erasure tests.
"""
connector = SaaSConnector(mailchimp_connection_config)
connector.set_saas_request_state(
SaaSRequest(path="test_path", method=HTTPMethod.GET)
) # dummy request as connector requires it
request: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/3.0/search-members",
query_params={"query": mailchimp_identity_email},
)
response = connector.create_client().send(request)
body = response.json()
member = body["exact_matches"]["members"][0]
yield member
request: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.PUT,
headers={"Content-Type": "application/json"},
path=f'/3.0/lists/{member["list_id"]}/members/{member["id"]}',
body=json.dumps(member),
)
connector.create_client().send(request)
|
299,544 |
test hdmedian
|
import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_allclose)
def test_compare_medians_ms():
x = np.arange(7)
y = x + 10
assert_almost_equal(ms.compare_medians_ms(x, y), 0)
y2 = np.linspace(0, 1, num=10)
assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778)
def METHOD_NAME():
# 1-D array
x = ma.arange(11)
assert_allclose(ms.hdmedian(x), 5, rtol=1e-14)
x.mask = ma.make_mask(x)
x.mask[:7] = False
assert_allclose(ms.hdmedian(x), 3, rtol=1e-14)
# Check that `var` keyword returns a value. TODO: check whether returned
# value is actually correct.
assert_(ms.hdmedian(x, var=True).size == 2)
# 2-D array
x2 = ma.arange(22).reshape((11, 2))
assert_allclose(ms.hdmedian(x2, axis=0), [10, 11])
x2.mask = ma.make_mask(x2)
x2.mask[:7, :] = False
assert_allclose(ms.hdmedian(x2, axis=0), [6, 7])
def test_rsh():
np.random.seed(132345)
x = np.random.randn(100)
res = ms.rsh(x)
# Just a sanity check that the code runs and output shape is correct.
# TODO: check that implementation is correct.
assert_(res.shape == x.shape)
# Check points keyword
res = ms.rsh(x, points=[0, 1.])
assert_(res.size == 2)
def test_mjci():
# Tests the Marits-Jarrett estimator
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
def test_trimmed_mean_ci():
# Tests the confidence intervals of the trimmed mean.
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
def test_idealfourths():
# Tests ideal-fourths
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0, 0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
class TestQuantiles:
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
def test_hdquantiles(self):
data = self.data
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4)
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
def test_hdquantiles_sd(self):
# Only test that code runs, implementation not checked for correctness
res = ms.hdquantiles_sd(self.data)
assert_(res.size == 3)
def test_mquantiles_cimj(self):
# Only test that code runs, implementation not checked for correctness
ci_lower, ci_upper = ms.mquantiles_cimj(self.data)
assert_(ci_lower.size == ci_upper.size == 3)
|
299,545 |
test bytecode broken label
|
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import contextlib
import io
import sys
import textwrap
import unittest
from _pydevd_frame_eval.vendored import bytecode
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, Bytecode, BasicBlock, ControlFlowGraph
from _pydevd_frame_eval.vendored.bytecode.concrete import OFFSET_AS_INSTRUCTION
from _pydevd_frame_eval.vendored.bytecode.tests import disassemble
class DumpCodeTests(unittest.TestCase):
maxDiff = 80 * 100
def check_dump_bytecode(self, code, expected, lineno=None):
with contextlib.redirect_stdout(io.StringIO()) as stderr:
if lineno is not None:
bytecode.dump_bytecode(code, lineno=True)
else:
bytecode.dump_bytecode(code)
output = stderr.getvalue()
self.assertEqual(output, expected)
def test_bytecode(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
# without line numbers
enum_repr = "<Compare.EQ: 2>"
expected = f"""
LOAD_FAST 'test'
LOAD_CONST 1
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <label_instr6>
LOAD_CONST 1
RETURN_VALUE
label_instr6:
LOAD_FAST 'test'
LOAD_CONST 2
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <label_instr13>
LOAD_CONST 2
RETURN_VALUE
label_instr13:
LOAD_CONST 3
RETURN_VALUE
"""[
1:
].rstrip(
" "
)
self.check_dump_bytecode(code, expected)
# with line numbers
expected = f"""
L. 2 0: LOAD_FAST 'test'
1: LOAD_CONST 1
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <label_instr6>
L. 3 4: LOAD_CONST 1
5: RETURN_VALUE
label_instr6:
L. 4 7: LOAD_FAST 'test'
8: LOAD_CONST 2
9: COMPARE_OP {enum_repr}
10: POP_JUMP_IF_FALSE <label_instr13>
L. 5 11: LOAD_CONST 2
12: RETURN_VALUE
label_instr13:
L. 6 14: LOAD_CONST 3
15: RETURN_VALUE
"""[
1:
].rstrip(
" "
)
self.check_dump_bytecode(code, expected, lineno=True)
def METHOD_NAME(self):
label = Label()
code = Bytecode([Instr("JUMP_ABSOLUTE", label)])
expected = " JUMP_ABSOLUTE <error: unknown label>\n\n"
self.check_dump_bytecode(code, expected)
def test_blocks_broken_jump(self):
block = BasicBlock()
code = ControlFlowGraph()
code[0].append(Instr("JUMP_ABSOLUTE", block))
expected = textwrap.dedent(
"""
block1:
JUMP_ABSOLUTE <error: unknown block>
"""
).lstrip("\n")
self.check_dump_bytecode(code, expected)
def test_bytecode_blocks(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
code = ControlFlowGraph.from_bytecode(code)
# without line numbers
enum_repr = "<Compare.EQ: 2>"
expected = textwrap.dedent(
f"""
block1:
LOAD_FAST 'test'
LOAD_CONST 1
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <block3>
-> block2
block2:
LOAD_CONST 1
RETURN_VALUE
block3:
LOAD_FAST 'test'
LOAD_CONST 2
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <block5>
-> block4
block4:
LOAD_CONST 2
RETURN_VALUE
block5:
LOAD_CONST 3
RETURN_VALUE
"""
).lstrip()
self.check_dump_bytecode(code, expected)
# with line numbers
expected = textwrap.dedent(
f"""
block1:
L. 2 0: LOAD_FAST 'test'
1: LOAD_CONST 1
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <block3>
-> block2
block2:
L. 3 0: LOAD_CONST 1
1: RETURN_VALUE
block3:
L. 4 0: LOAD_FAST 'test'
1: LOAD_CONST 2
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <block5>
-> block4
block4:
L. 5 0: LOAD_CONST 2
1: RETURN_VALUE
block5:
L. 6 0: LOAD_CONST 3
1: RETURN_VALUE
"""
).lstrip()
self.check_dump_bytecode(code, expected, lineno=True)
def test_concrete_bytecode(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
code = code.to_concrete_bytecode()
# without line numbers
expected = f"""
0 LOAD_FAST 0
2 LOAD_CONST 1
4 COMPARE_OP 2
6 POP_JUMP_IF_FALSE {6 if OFFSET_AS_INSTRUCTION else 12}
8 LOAD_CONST 1
10 RETURN_VALUE
12 LOAD_FAST 0
14 LOAD_CONST 2
16 COMPARE_OP 2
18 POP_JUMP_IF_FALSE {12 if OFFSET_AS_INSTRUCTION else 24}
20 LOAD_CONST 2
22 RETURN_VALUE
24 LOAD_CONST 3
26 RETURN_VALUE
""".lstrip(
"\n"
)
self.check_dump_bytecode(code, expected)
# with line numbers
expected = f"""
L. 2 0: LOAD_FAST 0
2: LOAD_CONST 1
4: COMPARE_OP 2
6: POP_JUMP_IF_FALSE {6 if OFFSET_AS_INSTRUCTION else 12}
L. 3 8: LOAD_CONST 1
10: RETURN_VALUE
L. 4 12: LOAD_FAST 0
14: LOAD_CONST 2
16: COMPARE_OP 2
18: POP_JUMP_IF_FALSE {12 if OFFSET_AS_INSTRUCTION else 24}
L. 5 20: LOAD_CONST 2
22: RETURN_VALUE
L. 6 24: LOAD_CONST 3
26: RETURN_VALUE
""".lstrip(
"\n"
)
self.check_dump_bytecode(code, expected, lineno=True)
def test_type_validation(self):
class T:
first_lineno = 1
with self.assertRaises(TypeError):
bytecode.dump_bytecode(T())
class MiscTests(unittest.TestCase):
def skip_test_version(self):
import setup
self.assertEqual(bytecode.__version__, setup.VERSION)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
299,546 |
verify access control list
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer, JMESPathCheck, NoneCheck,
api_version_constraint)
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01')
class StorageAccessControlListTests(StorageScenarioMixin, ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_container_acl_scenarios(self, resource_group, storage_account):
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
self.METHOD_NAME(account_info, 'container', container)
# @ResourceGroupPreparer()
# @StorageAccountPreparer()
# def test_storage_share_acl_scenarios(self, resource_group, storage_account):
# account_info = self.get_account_info(resource_group, storage_account)
# share = self.create_share(account_info)
# self._verify_access_control_list(account_info, 'share', share)
def METHOD_NAME(self, account_info, service_type, container_name):
container_id_parameter = '--{}-name {}'.format(service_type, container_name)
self.storage_cmd('storage {} policy list {}', account_info, service_type,
container_id_parameter).assert_with_checks(NoneCheck())
self.storage_cmd('storage {} policy create {} -n test1 --permission l', account_info, service_type,
container_id_parameter)
self.storage_cmd('storage {} policy create {} -n test2 --start 2016-01-01T00:00Z', account_info, service_type,
container_id_parameter)
self.storage_cmd('storage {} policy create {} -n test3 --expiry 2018-01-01T00:00Z', account_info, service_type,
container_id_parameter)
self.storage_cmd('storage {} policy create {} -n test4 --permission rwdl '
'--start 2016-01-01T00:00Z --expiry 2016-05-01T00:00Z', account_info, service_type,
container_id_parameter)
acl = self.storage_cmd('storage {} policy list {}', account_info, service_type,
container_id_parameter).get_output_in_json().keys()
self.assertSetEqual(set(acl), set(['test1', 'test2', 'test3', 'test4']))
self.storage_cmd('storage {} policy show {} -n test1', account_info, service_type,
container_id_parameter).assert_with_checks(JMESPathCheck('permission', 'l'))
self.storage_cmd('storage {} policy show {} -n test2', account_info, service_type,
container_id_parameter).assert_with_checks(JMESPathCheck('start', '2016-01-01T00:00:00+00:00'))
self.storage_cmd('storage {} policy show {} -n test3', account_info, service_type,
container_id_parameter).assert_with_checks(
JMESPathCheck('expiry', '2018-01-01T00:00:00+00:00'))
self.storage_cmd('storage {} policy show {} -n test4', account_info, service_type,
container_id_parameter).assert_with_checks(JMESPathCheck('start', '2016-01-01T00:00:00+00:00'),
JMESPathCheck('expiry',
'2016-05-01T00:00:00+00:00'),
JMESPathCheck('permission', 'rwdl'))
self.storage_cmd('storage {} policy update {} -n test1 --permission r', account_info, service_type,
container_id_parameter)
self.storage_cmd('storage {} policy show {} -n test1', account_info, service_type,
container_id_parameter).assert_with_checks(JMESPathCheck('permission', 'r'))
self.storage_cmd('storage {} policy delete {} -n test1', account_info, service_type, container_id_parameter)
acl = self.storage_cmd('storage {} policy list {}', account_info, service_type,
container_id_parameter).get_output_in_json().keys()
self.assertSequenceEqual(set(acl), set(['test2', 'test3', 'test4']))
|
299,547 |
init
|
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import Device, Data, Action, Dispatch, Method
class CP7452(Device):
"""Adlink CP7452 DIO"""
parts = [{'path': ':NODE', 'type': 'text', 'options': ('noshot_write',)},
{'path': ':BOARD', 'type': 'numeric',
'value': 1, 'options': ('no_write_shot',)},
{'path': ':COMMENT', 'type': 'text'},
{'path': '.DIGITAL_OUTS', 'type': 'structure'}]
for i in range(4):
parts.append({'path': '.DIGITAL_OUTS:DO%d' % (
i,), 'type': 'numeric', 'value': 0, 'options': ('no_write_shot',)})
parts.append({'path': '.DIGITAL_INS', 'type': 'structure'})
for i in range(4):
parts.append({'path': '.DIGITAL_INS:DI%d' % (
i,), 'type': 'numeric', 'options': ('no_write_model', 'write_once')})
parts.append({'path': ':INIT_ACTION', 'type': 'action',
'valueExpr': "Action(Dispatch('CAMAC_SERVER','INIT',50,None),Method(None,'INIT',head))",
'options': ('no_write_shot',)})
parts.append({'path': ':STORE_ACTION', 'type': 'action',
'valueExpr': "Action(Dispatch('CAMAC_SERVER','STORE',50,None),Method(None,'STORE',head))",
'options': ('no_write_shot',)})
del i
def METHOD_NAME(self):
"""Initialize digital outputs of CP7452 cpci board.
Connects to the host and for each of the DIGITAL_OUTS nodes which are turned on, write the value to the digital output.
"""
import os
from MDSplus import Uint32
debug = os.getenv("DEBUG_DEVICES")
try:
host = str(self.node.record.data())
except:
host = 'local'
if Data.execute('mdsconnect($)', host) == 0:
raise Exception("Error connecting to host: "+host)
board = int(self.board.record)
for i in range(4):
do_nid = self.__getattr__('digital_outs_do%d' % (i,))
if do_nid.on:
try:
exp = 'MdsValue("_lun=fopen(\\\"/sys/module/cp7452_drv/parameters/format\\\",\\\"r+\\"); write(_lun,\\\"1\\\"); fclose(_lun);'
exp = exp+'_lun=fopen(\\\"/dev/cp7452.%d/DO%d\\\",\\\"r+\\\"); write(_lun,\\\"%x\\\"); fclose(_lun)")' % (
board, i, int(Uint32(do_nid.record.data()).data()))
if debug:
print(exp)
Data.execute(exp)
except Exception as e:
print("Error outputing to DO%d\n\t%s" % (i, str(e),))
return 1
INIT = METHOD_NAME
def store(self):
"""Stores the digital input values into the tree.
Connects to the host and for each of the DIGITAL_INS nodes which are turned on, read the digital input and store the value in the node.
"""
import os
debug = os.getenv("DEBUG_DEVICES")
try:
host = str(self.node.record.data())
except:
host = 'local'
if Data.execute('mdsconnect($)', host) == 0:
raise Exception("Error connecting to host: "+host)
board = int(self.board.record)
for i in range(4):
di_nid = self.__getattr__('digital_ins_di%d' % (i, 1))
if di_nid.on:
try:
exp = 'MdsValue("_lun=fopen(\\\"/sys/module/cp7452_drv/parameters/format\\\",\\\"r+\\\"); write(_lun,\\\"1\\\"); fclose(_lun);'
exp = exp + \
'_lun=fopen(\\\"/dev/cp7452.%d/DI%d\\\",\\\"r\\\"); _ans=read(_lun); fclose(_lun),_ans")' % (
board, i)
if debug:
print(exp)
value = eval('0x'+str(Data.execute(exp)))
di_nid.record = value
except Exception as e:
print("Error inputting from DI%d\n\t%s" % (i, str(e),))
return 1
STORE = store
|
299,548 |
predict
|
from numpy import inf, nan
from sklearn.neighbors import RadiusNeighborsClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _RadiusNeighborsClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def METHOD_NAME(self, X):
return self._wrapped_model.METHOD_NAME(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RadiusNeighborsClassifier Classifier implementing a vote among neighbors within a given radius",
"allOf": [
{
"type": "object",
"required": [
"radius",
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"outlier_label",
"metric_params",
"n_jobs",
],
"relevantToOptimizer": [
"weights",
"algorithm",
"leaf_size",
"p",
"metric",
"outlier_label",
],
"additionalProperties": False,
"properties": {
"radius": {
"type": "number",
"default": 1.0,
"description": "Range of parameter space to use by default for :meth:`radius_neighbors` queries.",
},
"weights": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["distance", "uniform"]},
],
"default": "uniform",
"description": "weight function used in prediction",
},
"algorithm": {
"enum": ["auto", "ball_tree", "kd_tree", "brute"],
"default": "auto",
"description": "Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search",
},
"leaf_size": {
"type": "integer",
"minimumForOptimizer": 30,
"maximumForOptimizer": 31,
"distribution": "uniform",
"default": 30,
"description": "Leaf size passed to BallTree or KDTree",
},
"p": {
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 2,
"description": "Power parameter for the Minkowski metric",
},
"metric": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{
"enum": [
"euclidean",
"manhattan",
"minkowski",
"precomputed",
]
},
],
"default": "minkowski",
"description": "the distance metric to use for the tree",
},
"outlier_label": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{"enum": ["most_frequent"]},
{"enum": [None]},
],
"default": None,
"description": "Label, which is given for outlier samples (samples with no neighbors on given radius)",
},
"metric_params": {
"anyOf": [{"type": "object"}, {"enum": [None]}],
"default": None,
"description": "Additional keyword arguments for the metric function.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "The number of parallel jobs to run for neighbors search",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model using X as training data and y as target values",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix, BallTree, KDTree}",
"description": "Training data",
},
"y": {
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "{array-like, sparse matrix}",
"description": "Target values of shape = [n_samples] or [n_samples, n_outputs]",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict the class labels for the provided data",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == 'precomputed'",
"description": "Test samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Class labels for each data sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier#sklearn-neighbors-radiusneighborsclassifier",
"import_from": "sklearn.neighbors",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RadiusNeighborsClassifier = make_operator(
_RadiusNeighborsClassifierImpl, _combined_schemas
)
set_docstrings(RadiusNeighborsClassifier)
|
299,549 |
run
|
"""distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
import os
import socket
import platform
from urllib2 import urlopen, Request, HTTPError
from base64 import standard_b64encode
import urlparse
import cStringIO as StringIO
from hashlib import md5
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def METHOD_NAME(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\r\n--' + boundary
end_boundary = sep_boundary + '--\r\n'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\r\nContent-Disposition: form-data; name="%s"' % key)
body.write(fn)
body.write("\r\n\r\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, result.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
raise
except HTTPError, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
msg = 'Upload failed (%s): %s' % (status, reason)
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
|
299,550 |
get owner
|
import csv
import io as StringIO
import urllib.parse
import offtrac
import requests
import typing_extensions
from bugwarrior import config
from bugwarrior.services import Issue, IssueService
import logging
log = logging.getLogger(__name__)
class TracConfig(config.ServiceConfig):
service: typing_extensions.Literal['trac']
base_uri: config.NoSchemeUrl
scheme: str = 'https'
no_xmlrpc: bool = False
username: str = ''
password: str = ''
class TracIssue(Issue):
SUMMARY = 'tracsummary'
URL = 'tracurl'
NUMBER = 'tracnumber'
COMPONENT = 'traccomponent'
UDAS = {
SUMMARY: {
'type': 'string',
'label': 'Trac Summary',
},
URL: {
'type': 'string',
'label': 'Trac URL',
},
NUMBER: {
'type': 'numeric',
'label': 'Trac Number',
},
COMPONENT: {
'type': 'string',
'label': 'Trac Component',
},
}
UNIQUE_KEY = (URL, )
PRIORITY_MAP = {
'trivial': 'L',
'minor': 'L',
'major': 'M',
'critical': 'H',
'blocker': 'H',
}
def to_taskwarrior(self):
return {
'project': self.extra['project'],
'priority': self.get_priority(),
'annotations': self.extra['annotations'],
self.URL: self.record['url'],
self.SUMMARY: self.record['summary'],
self.NUMBER: self.record['number'],
self.COMPONENT: self.record['component'],
}
def get_default_description(self):
if 'number' in self.record:
number = self.record['number']
else:
number = self.record['id']
return self.build_default_description(
title=self.record['summary'],
url=self.get_processed_url(self.record['url']),
number=number,
cls='issue'
)
def get_priority(self):
return self.PRIORITY_MAP.get(
self.record.get('priority'),
self.origin['default_priority']
)
class TracService(IssueService):
ISSUE_CLASS = TracIssue
CONFIG_SCHEMA = TracConfig
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if self.config.username:
password = self.get_password('password', self.config.username)
auth = urllib.parse.quote_plus(
f'{self.config.username}:{password}@')
else:
auth = ''
self.trac = None
uri = f'{self.config.scheme}://{auth}{self.config.base_uri}/'
if self.config.no_xmlrpc:
self.uri = uri
else:
self.trac = offtrac.TracServer(uri + 'login/xmlrpc')
@staticmethod
def get_keyring_service(config):
return f"https://{config.username}@{config.base_uri}/"
def annotations(self, tag, issue, issue_obj):
annotations = []
# without offtrac, we can't get issue comments
if self.trac is None:
return annotations
changelog = self.trac.server.ticket.changeLog(issue['number'])
for time, author, field, oldvalue, newvalue, permament in changelog:
if field == 'comment':
annotations.append((author, newvalue, ))
url = issue_obj.get_processed_url(issue['url'])
return self.build_annotations(annotations, url)
def METHOD_NAME(self, issue):
tag, issue = issue
return issue.get('owner', None) or None
def issues(self):
base_url = "https://" + self.config.base_uri
if self.trac:
tickets = self.trac.query_tickets('status!=closed&max=0')
tickets = list(map(self.trac.get_ticket, tickets))
issues = [(self.target, ticket[3]) for ticket in tickets]
for i in range(len(issues)):
issues[i][1]['url'] = "%s/ticket/%i" % (base_url, tickets[i][0])
issues[i][1]['number'] = tickets[i][0]
else:
resp = requests.get(
self.uri + 'query',
params={
'status': '!closed',
'max': '0',
'format': 'csv',
'col': ['id', 'summary', 'owner', 'priority', 'component'],
})
if resp.status_code != 200:
raise RuntimeError("Trac responded with %s" % resp)
# strip Trac's bogus BOM
text = resp.text[1:].lstrip('\ufeff')
tickets = list(csv.DictReader(StringIO.StringIO(text.encode('utf-8'))))
issues = [(self.target, ticket) for ticket in tickets]
for i in range(len(issues)):
issues[i][1]['url'] = "%s/ticket/%s" % (base_url, tickets[i]['id'])
issues[i][1]['number'] = int(tickets[i]['id'])
log.debug(" Found %i total.", len(issues))
issues = list(filter(self.include, issues))
log.debug(" Pruned down to %i", len(issues))
for project, issue in issues:
issue_obj = self.get_issue_for_record(issue)
extra = {
'annotations': self.annotations(project, issue, issue_obj),
'project': project,
}
issue_obj.update_extra(extra)
yield issue_obj
|
299,551 |
get api version
|
# oxShibboleth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2020, Janssen
#
# Author: Yuriy Movchan
#
from io.jans.model.custom.script.type.idp import IdpType
from io.jans.util import StringHelper
from io.jans.idp.externalauth import AuthenticatedNameTranslator
from net.shibboleth.idp.authn.principal import UsernamePrincipal, IdPAttributePrincipal
from net.shibboleth.idp.authn import ExternalAuthentication
from net.shibboleth.idp.attribute import IdPAttribute, StringAttributeValue
from net.shibboleth.idp.authn.context import AuthenticationContext, ExternalAuthenticationContext
from net.shibboleth.idp.attribute.context import AttributeContext
from javax.security.auth import Subject
from java.util import Collections, HashMap, HashSet, ArrayList, Arrays
import java
class IdpExtension(IdpType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Idp extension. Initialization"
self.defaultNameTranslator = AuthenticatedNameTranslator()
return True
def destroy(self, configurationAttributes):
print "Idp extension. Destroy"
return True
def METHOD_NAME(self):
return 11
# Translate attributes from user profile
# context is io.jans.idp.externalauth.TranslateAttributesContext (https://github.com/JanssenFederation/shib-oxauth-authn3/blob/master/src/main/java/io.jans.idp/externalauth/TranslateAttributesContext.java)
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def translateAttributes(self, context, configurationAttributes):
print "Idp extension. Method: translateAttributes"
# Return False to use default method
#return False
request = context.getRequest()
userProfile = context.getUserProfile()
principalAttributes = self.defaultNameTranslator.produceIdpAttributePrincipal(userProfile.getAttributes())
print "Idp extension. Converted user profile: '%s' to attribute principal: '%s'" % (userProfile, principalAttributes)
if not principalAttributes.isEmpty():
print "Idp extension. Found attributes from oxAuth. Processing..."
# Start: Custom part
# Add givenName attribute
givenNameAttribute = IdPAttribute("jansEnrollmentCode")
givenNameAttribute.setValues(ArrayList(Arrays.asList(StringAttributeValue("Dummy"))))
principalAttributes.add(IdPAttributePrincipal(givenNameAttribute))
print "Idp extension. Updated attribute principal: '%s'" % principalAttributes
# End: Custom part
principals = HashSet()
principals.addAll(principalAttributes)
principals.add(UsernamePrincipal(userProfile.getId()))
request.setAttribute(ExternalAuthentication.SUBJECT_KEY, Subject(False, Collections.singleton(principals),
Collections.emptySet(), Collections.emptySet()))
print "Created an IdP subject instance with principals containing attributes for: '%s'" % userProfile.getId()
if False:
idpAttributes = ArrayList()
for principalAttribute in principalAttributes:
idpAttributes.add(principalAttribute.getAttribute())
request.setAttribute(ExternalAuthentication.ATTRIBUTES_KEY, idpAttributes)
authenticationKey = context.getAuthenticationKey()
profileRequestContext = ExternalAuthentication.getProfileRequestContext(authenticationKey, request)
authContext = profileRequestContext.getSubcontext(AuthenticationContext)
extContext = authContext.getSubcontext(ExternalAuthenticationContext)
extContext.setSubject(Subject(False, Collections.singleton(principals), Collections.emptySet(), Collections.emptySet()));
extContext.getSubcontext(AttributeContext, True).setUnfilteredIdPAttributes(idpAttributes)
extContext.getSubcontext(AttributeContext).setIdPAttributes(idpAttributes)
else:
print "No attributes released from oxAuth. Creating an IdP principal for: '%s'" % userProfile.getId()
request.setAttribute(ExternalAuthentication.PRINCIPAL_NAME_KEY, userProfile.getId())
#Return True to specify that default method is not needed
return False
# Update attributes before releasing them
# context is io.jans.idp.consent.processor.PostProcessAttributesContext (https://github.com/JanssenFederation/shib-oxauth-authn3/blob/master/src/main/java/io.jans.idp/consent/processor/PostProcessAttributesContext.java)
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def updateAttributes(self, context, configurationAttributes):
print "Idp extension. Method: updateAttributes"
attributeContext = context.getAttributeContext()
customAttributes = HashMap()
customAttributes.putAll(attributeContext.getIdPAttributes())
# Remove givenName attribute
customAttributes.remove("givenName")
# Update surname attribute
if customAttributes.containsKey("sn"):
customAttributes.get("sn").setValues(ArrayList(Arrays.asList(StringAttributeValue("Dummy"))))
# Set updated attributes
attributeContext.setIdPAttributes(customAttributes.values())
return True
|
299,552 |
creation date
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetServerKeyResult',
'AwaitableGetServerKeyResult',
'get_server_key',
'get_server_key_output',
]
@pulumi.output_type
class GetServerKeyResult:
"""
A server key.
"""
def __init__(__self__, METHOD_NAME=None, id=None, kind=None, location=None, name=None, server_key_type=None, subregion=None, thumbprint=None, type=None, uri=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if server_key_type and not isinstance(server_key_type, str):
raise TypeError("Expected argument 'server_key_type' to be a str")
pulumi.set(__self__, "server_key_type", server_key_type)
if subregion and not isinstance(subregion, str):
raise TypeError("Expected argument 'subregion' to be a str")
pulumi.set(__self__, "subregion", subregion)
if thumbprint and not isinstance(thumbprint, str):
raise TypeError("Expected argument 'thumbprint' to be a str")
pulumi.set(__self__, "thumbprint", thumbprint)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if uri and not isinstance(uri, str):
raise TypeError("Expected argument 'uri' to be a str")
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="creationDate")
def METHOD_NAME(self) -> Optional[str]:
"""
The server key creation date.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of encryption protector. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serverKeyType")
def server_key_type(self) -> str:
"""
The server key type like 'ServiceManaged', 'AzureKeyVault'.
"""
return pulumi.get(self, "server_key_type")
@property
@pulumi.getter
def subregion(self) -> str:
"""
Subregion of the server key.
"""
return pulumi.get(self, "subregion")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
Thumbprint of the server key.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The URI of the server key.
"""
return pulumi.get(self, "uri")
class AwaitableGetServerKeyResult(GetServerKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerKeyResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
server_key_type=self.server_key_type,
subregion=self.subregion,
thumbprint=self.thumbprint,
type=self.type,
uri=self.uri)
def get_server_key(key_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerKeyResult:
"""
Gets a server key.
:param str key_name: The name of the server key to be retrieved.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20150501preview:getServerKey', __args__, opts=opts, typ=GetServerKeyResult).value
return AwaitableGetServerKeyResult(
METHOD_NAME=pulumi.get(__ret__, 'creation_date'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
server_key_type=pulumi.get(__ret__, 'server_key_type'),
subregion=pulumi.get(__ret__, 'subregion'),
thumbprint=pulumi.get(__ret__, 'thumbprint'),
type=pulumi.get(__ret__, 'type'),
uri=pulumi.get(__ret__, 'uri'))
@_utilities.lift_output_func(get_server_key)
def get_server_key_output(key_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServerKeyResult]:
"""
Gets a server key.
:param str key_name: The name of the server key to be retrieved.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
...
|
299,553 |
test shape geo interface with fields
|
# Project: MapServer
# Purpose: xUnit style Python mapscript tests of Shape
# Author: Sean Gillies, [email protected]
#
# ===========================================================================
# Copyright (c) 2004, Sean Gillies
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ===========================================================================
import unittest
import mapscript
from .testing import MapTestCase, ShapeObjTestCase
class ShapePointTestCase(ShapeObjTestCase):
"""Test point type shapeObj in stand-alone mode"""
def setUp(self):
"""The test fixture is a shape of one point"""
self.points = (mapscript.pointObj(0.0, 1.0), )
self.lines = (mapscript.lineObj(), )
self.addPointToLine(self.lines[0], self.points[0])
self.shape = mapscript.shapeObj(mapscript.MS_SHAPE_POINT)
self.addLineToShape(self.shape, self.lines[0])
def testCreateShape(self):
"""the number of lines is correct"""
assert self.shape.numlines == 1
def testShapeClone(self):
"""test shape can be copied"""
s = self.shape.clone()
self.assertShapesEqual(self.shape, s)
class InlineFeatureTestCase(MapTestCase):
"""tests for issue http://mapserver.gis.umn.edu/bugs/show_bug.cgi?id=562"""
def testAddPointFeature(self):
"""adding a point to an inline feature works correctly"""
inline_layer = self.map.getLayerByName('INLINE')
assert inline_layer.connectiontype == mapscript.MS_INLINE
p = mapscript.pointObj(0.2, 51.5)
lyr = mapscript.lineObj()
self.addPointToLine(lyr, p)
shape = mapscript.shapeObj(inline_layer.type)
shape.classindex = 0
self.addLineToShape(shape, lyr)
inline_layer.addFeature(shape)
msimg = self.map.draw()
filename = 'testAddPointFeature.png'
msimg.save(filename)
def testGetShape(self):
"""returning the shape from an inline feature works"""
inline_layer = self.map.getLayerByName('INLINE')
inline_layer.open()
inline_layer.template = "FAKE"
inline_layer.queryByIndex(self.map, -1, 0)
res = inline_layer.getResult(0)
s = inline_layer.getShape(res)
lyr = self.getLineFromShape(s, 0)
p = self.getPointFromLine(lyr, 0)
self.assertAlmostEqual(p.x, -0.2)
self.assertAlmostEqual(p.y, 51.5)
def testGetNumFeatures(self):
"""the number of features in the inline layer is correct"""
inline_layer = self.map.getLayerByName('INLINE')
assert inline_layer.getNumFeatures() == 1
def testShapeGeoInterface(self):
"""return the shape using the __geo_interface__ protocol with no attribute names"""
layer = self.map.getLayerByName('POLYGON')
layer.open()
layer.template = "FAKE"
layer.queryByIndex(self.map, -1, 0)
res = layer.getResult(0)
s = layer.getShape(res)
assert s.__geo_interface__ == {
'geometry': {
'type': 'Polygon',
'coordinates': [[(-0.25, 51.227222, 0.0), (-0.25, 51.727222, 0.0), (0.25, 51.727222, 0.0),
(0.25, 51.227222, 0.0), (-0.25, 51.227222, 0.0)]]
},
'type': 'Feature',
'bbox': (-0.25, 51.227222, 0.25, 51.727222),
'properties': {'1': 'A Polygon', '0': '1'}
}
layer.close()
def METHOD_NAME(self):
"""return the shape using the __geo_interface__ protocol with attribute names"""
layer = self.map.getLayerByName('POINT')
layer.open()
layer.template = "FAKE"
layer.queryByIndex(self.map, -1, 0)
res = layer.getResult(0)
s = layer.getShape(res)
s.itemdefinitions = layer.getItemDefinitions()
assert s.__geo_interface__ == {
'geometry': {
'type': 'Point',
'coordinates': [[(0.0, 51.477222, 0.0)]]
},
'type': 'Feature',
'bbox': (0.0, 51.477222, 0.0, 51.477222),
'properties': {'FNAME': 'A Point', 'FID': 1}
}
layer.close()
class ShapeValuesTestCase(unittest.TestCase):
def testNullValue(self):
so = mapscript.shapeObj(mapscript.MS_SHAPE_POINT)
assert so.numvalues == 0
assert so.getValue(0) is None
def testSetValue(self):
so = mapscript.shapeObj(mapscript.MS_SHAPE_POINT)
so.initValues(4)
so.setValue(0, 'Foo')
assert so.numvalues == 4
assert so.getValue(0) == 'Foo'
assert so.getValue(1) == ''
# New class for testing the WKT stuff of RFC-2
class ShapeWKTTestCase(unittest.TestCase):
# define a pair of coords, and WKT as class data
point_xy = (-105.5000000000000000, 40.0000000000000000)
point_wkt = 'POINT (-105.5000000000000000 40.0000000000000000)'
def testSetPointWKT(self):
# Create new instance and set/init from WKT
so = mapscript.shapeObj.fromWKT(self.point_wkt)
# expect one line with one point
self.assertTrue(so.numlines == 1, so.numlines)
self.assertTrue(so.get(0).numpoints == 1, so.get(0).numpoints)
# expect shape's x and y values to be correct
po = so.get(0).get(0)
self.assertAlmostEqual(po.x, self.point_xy[0])
self.assertAlmostEqual(po.y, self.point_xy[1])
def testGetPointWKT(self):
# Create new instance from class data
po = mapscript.pointObj(self.point_xy[0], self.point_xy[1])
lo = mapscript.lineObj()
lo.add(po)
so = mapscript.shapeObj(mapscript.MS_SHAPE_POINT)
so.add(lo)
# test output WKT
wkt = so.toWKT()
self.assertTrue(wkt == self.point_wkt, wkt)
if __name__ == '__main__':
unittest.main()
|
299,554 |
hue
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import math
import numpy as np
from PIL import Image, ImageEnhance
def normalize(im, mean, std):
im = im / 255.0
im -= mean
im /= std
return im
def permute(im, to_bgr=False):
im = np.swapaxes(im, 1, 2)
im = np.swapaxes(im, 1, 0)
if to_bgr:
im = im[[2, 1, 0], :, :]
return im
def resize_long(im, long_size=224, interpolation=cv2.INTER_LINEAR):
value = max(im.shape[0], im.shape[1])
scale = float(long_size) / float(value)
resized_width = int(round(im.shape[1] * scale))
resized_height = int(round(im.shape[0] * scale))
im = cv2.resize(
im, (resized_width, resized_height), interpolation=interpolation)
return im
def resize(im, target_size=608, interp=cv2.INTER_LINEAR):
if isinstance(target_size, list) or isinstance(target_size, tuple):
w = target_size[0]
h = target_size[1]
else:
w = target_size
h = target_size
im = cv2.resize(im, (w, h), interpolation=interp)
return im
def random_crop(im,
crop_size=224,
lower_scale=0.08,
lower_ratio=3. / 4,
upper_ratio=4. / 3):
scale = [lower_scale, 1.0]
ratio = [lower_ratio, upper_ratio]
aspect_ratio = math.sqrt(np.random.uniform(*ratio))
w = 1. * aspect_ratio
h = 1. / aspect_ratio
bound = min((float(im.shape[0]) / im.shape[1]) / (h**2),
(float(im.shape[1]) / im.shape[0]) / (w**2))
scale_max = min(scale[1], bound)
scale_min = min(scale[0], bound)
target_area = im.shape[0] * im.shape[1] * np.random.uniform(scale_min,
scale_max)
target_size = math.sqrt(target_area)
w = int(target_size * w)
h = int(target_size * h)
i = np.random.randint(0, im.shape[0] - h + 1)
j = np.random.randint(0, im.shape[1] - w + 1)
im = im[i:i + h, j:j + w, :]
im = cv2.resize(im, (crop_size, crop_size))
return im
def center_crop(im, crop_size=224):
height, width = im.shape[:2]
w_start = (width - crop_size) // 2
h_start = (height - crop_size) // 2
w_end = w_start + crop_size
h_end = h_start + crop_size
im = im[h_start:h_end, w_start:w_end, :]
return im
def horizontal_flip(im):
if len(im.shape) == 3:
im = im[:, ::-1, :]
elif len(im.shape) == 2:
im = im[:, ::-1]
return im
def vertical_flip(im):
if len(im.shape) == 3:
im = im[::-1, :, :]
elif len(im.shape) == 2:
im = im[::-1, :]
return im
def bgr2rgb(im):
return im[:, :, ::-1]
def METHOD_NAME(im, hue_lower, hue_upper):
delta = np.random.uniform(hue_lower, hue_upper)
u = np.cos(delta * np.pi)
w = np.sin(delta * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
t = np.dot(np.dot(ityiq, bt), tyiq).T
im = np.dot(im, t)
return im
def saturation(im, saturation_lower, saturation_upper):
delta = np.random.uniform(saturation_lower, saturation_upper)
gray = im * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)
gray = gray.sum(axis=2, keepdims=True)
gray *= (1.0 - delta)
im *= delta
im += gray
return im
def contrast(im, contrast_lower, contrast_upper):
delta = np.random.uniform(contrast_lower, contrast_upper)
im *= delta
return im
def brightness(im, brightness_lower, brightness_upper):
delta = np.random.uniform(brightness_lower, brightness_upper)
im += delta
return im
def rotate(im, rotate_lower, rotate_upper):
rotate_delta = np.random.uniform(rotate_lower, rotate_upper)
im = im.rotate(int(rotate_delta))
return im
def resize_padding(im, max_side_len=2400):
'''
resize image to a size multiple of 32 which is required by the network
:param im: the resized image
:param max_side_len: limit of max image size to avoid out of memory in gpu
:return: the resized image and the resize ratio
'''
h, w, _ = im.shape
resize_w = w
resize_h = h
# limit the max side
if max(resize_h, resize_w) > max_side_len:
ratio = float(
max_side_len) / resize_h if resize_h > resize_w else float(
max_side_len) / resize_w
else:
ratio = 1.
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 - 1) * 32
resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 - 1) * 32
resize_h = max(32, resize_h)
resize_w = max(32, resize_w)
im = cv2.resize(im, (int(resize_w), int(resize_h)))
#im = cv2.resize(im, (512, 512))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
_ratio = np.array([ratio_h, ratio_w]).reshape(-1, 2)
return im, _ratio
|
299,555 |
prepare export line
|
# Copyright 2019 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from .intrastat_statement import format_9, format_x
class IntrastatStatementPurchaseSection4(models.Model):
_inherit = "account.intrastat.statement.purchase.section"
_name = "account.intrastat.statement.purchase.section4"
_description = "Intrastat Statement - Purchases Section 4"
intrastat_custom_id = fields.Many2one(
comodel_name="account.intrastat.custom", string="Customs Section"
)
month = fields.Integer(string="Ref. Month")
quarterly = fields.Integer(string="Ref. Quarter")
year_id = fields.Integer(string="Ref. Year")
protocol = fields.Integer(string="Protocol Number")
progressive_to_modify_id = fields.Many2one(
comodel_name="account.intrastat.statement.purchase.section1",
string="Progressive to Adjust ID",
)
progressive_to_modify = fields.Integer(string="Progressive to Adjust")
invoice_number = fields.Char(string="Invoice Number")
invoice_date = fields.Date(string="Invoice Date")
supply_method = fields.Selection(
selection=[("I", "Instant"), ("R", "Repeated")], string="Supply Method"
)
payment_method = fields.Selection(
selection=[("B", "Bank Transfer"), ("A", "Credit"), ("X", "Other")],
string="Payment Method",
)
country_payment_id = fields.Many2one(
comodel_name="res.country", string="Payment Country"
)
@api.model
def get_section_number(self):
return 4
@api.model
def _prepare_statement_line(self, inv_intra_line, statement_id=None):
res = super(IntrastatStatementPurchaseSection4, self)._prepare_statement_line(
inv_intra_line, statement_id
)
# Period Ref
ref_period = statement_id._get_period_ref()
res.update(
{
"month": ref_period.get("month"),
"quarterly": ref_period.get("quarterly"),
"year_id": ref_period.get("year_id"),
"invoice_number": inv_intra_line.invoice_number,
"invoice_date": inv_intra_line.invoice_date,
"supply_method": inv_intra_line.supply_method,
"payment_method": inv_intra_line.payment_method,
"country_payment_id": inv_intra_line.country_payment_id.id,
"intrastat_custom_id": statement_id.intrastat_custom_id.id,
}
)
return res
def _export_line_checks(self, section_label, section_number):
super(IntrastatStatementPurchaseSection4, self)._export_line_checks(
section_label, section_number
)
if not self.year_id:
raise ValidationError(
_("Missing reference year on 'Purchases - Section 4'")
)
if not self.intrastat_custom_id:
raise ValidationError(
_("Missing customs section on 'Purchases - Section 4'")
)
if not self.protocol:
raise ValidationError(
_("Missing protocol number on 'Purchases - Section 4'")
)
if not self.progressive_to_modify:
raise ValidationError(
_("Missing progressive to adjust on 'Purchases - Section 4'")
)
if not self.country_payment_id:
raise ValidationError(
_("Missing payment country on 'Purchases - Section 4'")
)
def METHOD_NAME(self):
self._export_line_checks(_("Purchase"), 4)
rcd = ""
# Codice della sezione doganale in cui è stato registrata la
# dichiarazione da rettificare
rcd += format_9(self.intrastat_custom_id.code, 6)
# Anno di registrazione della dichiarazione da rettificare
year = (self.year_id or 0) % 100
rcd += format_9(year, 2)
# Protocollo della dichiarazione da rettificare
rcd += format_9(self.protocol, 6)
# Progressivo della sezione 3 da rettificare
rcd += format_9(self.progressive_to_modify_id.sequence, 5)
# Codice dello Stato membro dell’acquirente
country_id = self.country_partner_id or self.partner_id.country_id
rcd += format_x(country_id.code, 2)
# Codice IVA dell’acquirente
rcd += format_x(self.vat_code.replace(" ", ""), 12)
# Ammontare delle operazioni in euro
rcd += format_9(self.amount_euro, 13)
# Ammontare delle operazioni in valuta
# >> da valorizzare solo per operazione Paesi non Euro
amount_currency = 0
if not (
self.invoice_id.company_id.currency_id.id == self.invoice_id.currency_id.id
):
amount_currency = self.amount_currency
rcd += format_9(amount_currency, 13)
# Numero Fattura
rcd += format_x(self.invoice_number, 15)
# Data Fattura
invoice_date_ddmmyy = False
if self.invoice_date:
invoice_date_ddmmyy = self.invoice_date.strftime("%d%m%y")
rcd += format_x(invoice_date_ddmmyy, 6)
# Codice del servizio
rcd += format_9(self.intrastat_code_id.name, 6)
# Modalità di erogazione
rcd += format_x(self.supply_method, 1)
# Modalità di incasso
rcd += format_x(self.payment_method, 1)
# Codice del paese di pagamento
rcd += format_x(self.country_payment_id.code, 2)
rcd += "\r\n"
return rcd
|
299,556 |
prune
|
# Copyright (c) 2018-2019, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
import platform
import logging
try:
from plistlib import loads
except ImportError:
from plistlib import readPlistFromString as loads
from xml.parsers.expat import ExpatError
from .base import StlinkDetectBase
LOG = logging.getLogger(__name__)
mbed_volume_name_match = re.compile(r"\b(mbed)\b", re.I)
def _plist_from_popen(popen):
out, _ = popen.communicate()
if not out:
return []
try:
return loads(out)
except ExpatError:
return []
def METHOD_NAME(current, keys):
""" Reduce the amount of data we have to sift through to only
include the specified keys, and children that contain the
specified keys
"""
pruned_current = {k: current[k] for k in keys if k in current}
pruned_children = list(
filter(
None, [METHOD_NAME(c, keys) for c in current.get("IORegistryEntryChildren", [])]
)
)
keep_current = any(k in current for k in keys) or pruned_children
if keep_current:
if pruned_children:
pruned_current["IORegistryEntryChildren"] = pruned_children
return pruned_current
else:
return {}
def _dfs_usb_info(obj, parents):
""" Find all of the usb info that we can from this particular IORegistry
tree with depth first search (and searching the parent stack....)
"""
output = {}
if (
"BSD Name" in obj
and obj["BSD Name"].startswith("disk")
and mbed_volume_name_match.search(obj["IORegistryEntryName"])
):
disk_id = obj["BSD Name"]
usb_info = {"serial": None}
for parent in [obj] + parents:
if "USB Serial Number" in parent:
usb_info["serial"] = parent["USB Serial Number"]
break
output[disk_id] = usb_info
for child in obj.get("IORegistryEntryChildren", []):
output.update(_dfs_usb_info(child, [obj] + parents))
return output
class StlinkDetectDarwin(StlinkDetectBase):
""" mbed-enabled platform detection on Mac OS X
"""
def __init__(self, **kwargs):
StlinkDetectBase.__init__(self, **kwargs)
self.mac_version = float(".".join(platform.mac_ver()[0].split(".")[:2]))
def find_candidates(self):
# {volume_id: {serial:, vendor_id:, product_id:, tty:}}
volumes = self._volumes()
# {volume_id: mount_point}
mounts = self._mount_points()
return [
{
"mount_point": mounts[v],
"target_id_usb_id": volumes[v].get("serial"),
"vendor_id": volumes[v].get("vendor_id"),
"product_id": volumes[v].get("product_id"),
}
for v in set(volumes.keys()) and set(mounts.keys())
if v in mounts and v in volumes
]
def _mount_points(self):
""" Returns map {volume_id: mount_point} """
diskutil_ls = subprocess.Popen(
["diskutil", "list", "-plist"], stdout=subprocess.PIPE
)
disks = _plist_from_popen(diskutil_ls)
return {
disk["DeviceIdentifier"]: disk.get("MountPoint", None)
for disk in disks["AllDisksAndPartitions"]
}
def _volumes(self):
""" returns a map {volume_id: {serial:, vendor_id:, product_id:, tty:}"""
# to find all the possible mbed volumes, we look for registry entries
# under all possible USB tree which have a "BSD Name" that starts with
# "disk" # (i.e. this is a USB disk), and have a IORegistryEntryName that
# matches /\cmbed/
# Once we've found a disk, we can search up for a parent with a valid
# serial number, and then search down again to find a tty that's part
# of the same composite device
# ioreg -a -r -n <usb_controller_name> -l
usb_controllers = [
# Leaving these here for reference. The code nominally scanned each controller,
# but a bug (?) caused it to only pay attention to the last one. That seems to
# work fine, so the others are commented out.
# "AppleUSBXHCI",
# "AppleUSBUHCI",
# "AppleUSBEHCI",
# "AppleUSBOHCI",
"IOUSBHostDevice",
]
cmp_par = "-n"
# For El Captain we need to list all the instances of (-c) rather than
# compare names (-n)
if self.mac_version >= 10.11:
cmp_par = "-c"
for usb_controller in usb_controllers:
ioreg_usb = subprocess.Popen(
["ioreg", "-a", "-r", cmp_par, usb_controller, "-l"],
stdout=subprocess.PIPE,
)
usb_tree = _plist_from_popen(ioreg_usb)
r = {}
for name, obj in enumerate(usb_tree):
pruned_obj = METHOD_NAME(
obj,
[
"USB Serial Number",
"idVendor",
"BSD Name",
"IORegistryEntryName",
"idProduct",
],
)
r.update(_dfs_usb_info(pruned_obj, []))
return r
|
299,557 |
check
|
# Back In Time
# Copyright (C) 2008-2022 Oprea Dan, Bart de Koning, Richard Bailey, Germar Reitze
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module holds the ApplicationInstance class, used to handle
the one application instance mechanism
"""
import os
import fcntl
import logger
import tools
class ApplicationInstance:
"""
Class used to handle one application instance mechanism.
Args:
pidFile (str): full path of file used to save pid and procname
autoExit (bool): automatically call sys.exit if there is an other
instance running
flock (bool): use file-locks to make sure only one instance
is checking at the same time
"""
def __init__(self, pidFile, autoExit=True, flock=False):
self.pidFile = pidFile
self.pid = 0
self.procname = ''
self.flock = None
if flock:
self.flockExclusiv()
if autoExit:
if self.METHOD_NAME(True):
self.startApplication()
def __del__(self):
self.flockUnlock()
def METHOD_NAME(self, autoExit=False):
"""
Check if the current application is already running
Args:
autoExit (bool): automatically call sys.exit if there is an other
instance running
Returns:
bool: ``True`` if this is the only application instance
"""
# check if the pidfile exists
if not os.path.isfile(self.pidFile):
return True
self.pid, self.procname = self.readPidFile()
# check if the process with specified by pid exists
if 0 == self.pid:
return True
if not tools.processAlive(self.pid):
return True
# check if the process has the same procname
# check cmdline for backwards compatibility
if self.procname and \
self.procname != tools.processName(self.pid) and \
self.procname != tools.processCmdline(self.pid):
return True
if autoExit:
# exit the application
print("The application is already running !")
# exit raise an exception so don't put it in a try/except block
exit(0)
return False
def busy(self):
"""
Check if one application with this instance is currently running.
Returns:
bool: ``True`` if an other instance is currently running.
"""
return not self.METHOD_NAME()
def startApplication(self):
"""
Called when the single instance starts to save its pid
"""
pid = os.getpid()
procname = tools.processName(pid)
try:
with open(self.pidFile, 'wt') as f:
f.write('{}\n{}'.format(pid, procname))
except OSError as e:
logger.error(
'Failed to write PID file %s: [%s] %s'
% (e.filename, e.errno, e.strerror))
self.flockUnlock()
def exitApplication(self):
"""
Called when the single instance exit (remove pid file)
"""
try:
os.remove(self.pidFile)
except:
pass
def flockExclusiv(self):
"""
Create an exclusive lock to block a second instance while
the first instance is starting.
"""
try:
self.flock = open(self.pidFile + '.flock', 'w')
fcntl.flock(self.flock, fcntl.LOCK_EX)
except OSError as e:
logger.error('Failed to write flock file %s: [%s] %s'
% (e.filename, e.errno, e.strerror))
def flockUnlock(self):
"""
Remove the exclusive lock. Second instance can now continue
but should find it self to be obsolete.
"""
if self.flock:
fcntl.fcntl(self.flock, fcntl.LOCK_UN)
self.flock.close()
try:
os.remove(self.flock.name)
except:
# an other instance was faster
# race condition while using 'if os.path.exists(...)'
pass
self.flock = None
def readPidFile(self):
"""
Read the pid and procname from the file
Returns:
tuple: tuple of (pid(int), procname(str))
"""
pid = 0
procname = ''
try:
with open(self.pidFile, 'rt') as f:
data = f.read()
data = data.split('\n', 1)
if data[0].isdigit():
pid = int(data[0])
if len(data) > 1:
procname = data[1].strip('\n')
except OSError as e:
logger.warning(
'Failed to read PID and process name from %s: [%s] %s'
% (e.filename, e.errno, e.strerror))
except ValueError as e:
logger.warning(
'Failed to extract PID and process name from %s: %s'
% (self.pidFile, str(e)))
return (pid, procname)
if __name__ == '__main__':
import time
# create application instance
appInstance = ApplicationInstance('/tmp/myapp.pid')
# do something here
print("Start MyApp")
time.sleep(5) # sleep 5 seconds
print("End MyApp")
# remove pid file
appInstance.exitApplication()
|
299,558 |
from column
|
# Copyright 2018-2023 contributors to the OpenLineage project
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Optional
from openlineage.client.facet import (
BaseFacet,
DataSourceDatasetFacet,
DocumentationDatasetFacet,
SchemaDatasetFacet,
SchemaField,
)
from openlineage.client.run import Dataset as OpenLineageDataset
from openlineage.client.run import InputDataset, OutputDataset
from openlineage.client.utils import RedactMixin
from openlineage.common.models import DbColumn, DbTableSchema
class Source(RedactMixin):
_skip_redact: List[str] = ['scheme', 'name']
def __init__(
self,
scheme: Optional[str] = None,
authority: Optional[str] = None,
connection_url: Optional[str] = None,
name: Optional[str] = None
):
self.scheme = scheme
self.authority = authority
self._name = name
self.connection_url = connection_url
if (scheme or authority) and name:
raise RuntimeError('scheme + authority and namespace are exclusive options')
def __eq__(self, other):
return self.name == other.name and \
self.connection_url == other.connection_url
def __repr__(self):
authority = '//' + self.authority if self.authority else ''
return f"Source({self.scheme!r}:{authority} - {self.connection_url!r})"
@property
def name(self) -> str:
if self._name:
return self._name
if self.authority:
return f'{self.scheme}://{self.authority}'
return f'{self.scheme}'
class Field(RedactMixin):
_skip_redact: List[str] = ['name', 'type', 'tags']
def __init__(self, name: str, type: str,
tags: Optional[List[str]] = None, description: Optional[str] = None):
self.name = name
self.type = type
self.tags = tags
self.description = description
if self.tags is None:
self.tags = []
@staticmethod
def METHOD_NAME(column: DbColumn):
return Field(
name=column.name,
type=column.type,
description=column.description
)
def __eq__(self, other):
return self.name == other.name and \
self.type == other.type and \
self.tags == other.tags and \
self.description == other.description
def __repr__(self):
return f"Field({self.name!r},{self.type!r}, \
{self.tags!r},{self.description!r})"
class Dataset(RedactMixin):
_skip_redact: List[str] = ['name']
def __init__(
self,
source: Source,
name: str, fields: Optional[List[Field]] = None,
description: Optional[str] = None,
custom_facets: Optional[Dict[str, BaseFacet]] = None,
input_facets: Optional[Dict[str, BaseFacet]] = None,
output_facets: Optional[Dict[str, BaseFacet]] = None
):
if fields is None:
fields = []
if custom_facets is None:
custom_facets = {}
if input_facets is None:
input_facets = {}
if output_facets is None:
output_facets = {}
self.source = source
self.name = name
self.fields = fields
self.description = description
self.custom_facets = custom_facets
self.input_facets = input_facets
self.output_facets = output_facets
@staticmethod
def from_table(source: Source,
table_name: str,
schema_name: Optional[str] = None,
database_name: Optional[str] = None):
return Dataset(
name=Dataset._to_name(
schema_name=schema_name,
table_name=table_name,
database_name=database_name
),
source=source
)
@staticmethod
def from_table_schema(
source: Source,
table_schema: DbTableSchema,
database_name: Optional[str] = None
):
return Dataset(
name=Dataset._to_name(
schema_name=table_schema.schema_name,
table_name=table_schema.table_name.name,
database_name=database_name
),
source=source,
fields=[
# We want to maintain column order using ordinal position.
Field.METHOD_NAME(column) for column in sorted(
table_schema.columns, key=lambda x: x.ordinal_position
)
]
)
@staticmethod
def _to_name(table_name: str, schema_name: Optional[str] = None, database_name: Optional[str] = None): # noqa
# Prefix the table name with database and schema name using
# the format: {database_name}.{table_schema}.{table_name}.
name = [table_name]
if schema_name is not None:
name = [schema_name] + name
if database_name is not None:
name = [database_name] + name
return ".".join(name)
def __eq__(self, other):
return self.source == other.source and \
self.name == other.name and \
self.fields == other.fields and \
self.description == other.description
def __repr__(self):
return f"Dataset({self.source!r},{self.name!r}, \
{self.fields!r},{self.description!r})"
def to_openlineage_dataset(self) -> OpenLineageDataset:
facets: Dict[str, BaseFacet] = {"dataSource": DataSourceDatasetFacet(
name=self.source.name,
uri=self.source.connection_url or "",
)}
if self.description:
facets["documentation"] = DocumentationDatasetFacet(
description=self.description
)
if self.fields is not None and len(self.fields):
facets["schema"] = SchemaDatasetFacet(
fields=[
SchemaField(field.name, field.type, field.description)
for field in self.fields
]
)
if self.custom_facets:
facets.update(self.custom_facets)
if len(self.input_facets):
return InputDataset(
namespace=self.source.name,
name=self.name,
facets=facets,
inputFacets=self.input_facets
)
if len(self.output_facets):
return OutputDataset(
namespace=self.source.name,
name=self.name,
facets=facets,
outputFacets=self.output_facets
)
return OpenLineageDataset(
namespace=self.source.name,
name=self.name,
facets=facets
)
|
299,559 |
get sf
|
import logging
import re
from collections import OrderedDict
from redash.query_runner import (
TYPE_BOOLEAN,
TYPE_DATE,
TYPE_DATETIME,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_STRING,
BaseQueryRunner,
register,
)
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
from simple_salesforce import Salesforce as SimpleSalesforce
from simple_salesforce import SalesforceError
from simple_salesforce.api import DEFAULT_API_VERSION
enabled = True
except ImportError:
enabled = False
# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
TYPES_MAP = dict(
id=TYPE_STRING,
string=TYPE_STRING,
currency=TYPE_FLOAT,
reference=TYPE_STRING,
double=TYPE_FLOAT,
picklist=TYPE_STRING,
date=TYPE_DATE,
url=TYPE_STRING,
phone=TYPE_STRING,
textarea=TYPE_STRING,
int=TYPE_INTEGER,
datetime=TYPE_DATETIME,
boolean=TYPE_BOOLEAN,
percent=TYPE_FLOAT,
multipicklist=TYPE_STRING,
masterrecord=TYPE_STRING,
location=TYPE_STRING,
JunctionIdList=TYPE_STRING,
encryptedstring=TYPE_STRING,
email=TYPE_STRING,
DataCategoryGroupReference=TYPE_STRING,
combobox=TYPE_STRING,
calculated=TYPE_STRING,
anyType=TYPE_STRING,
address=TYPE_STRING,
)
# Query Runner for Salesforce SOQL Queries
# For example queries, see:
# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
class Salesforce(BaseQueryRunner):
should_annotate_query = False
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"token": {"type": "string", "title": "Security Token"},
"sandbox": {"type": "boolean"},
"api_version": {
"type": "string",
"title": "Salesforce API Version",
"default": DEFAULT_API_VERSION,
},
},
"required": ["username", "password"],
"secret": ["password", "token"],
}
def test_connection(self):
response = self.METHOD_NAME().describe()
if response is None:
raise Exception("Failed describing objects.")
pass
def METHOD_NAME(self):
sf = SimpleSalesforce(
username=self.configuration["username"],
password=self.configuration["password"],
security_token=self.configuration["token"],
sandbox=self.configuration.get("sandbox", False),
version=self.configuration.get("api_version", DEFAULT_API_VERSION),
client_id="Redash",
)
return sf
def _clean_value(self, value):
if isinstance(value, OrderedDict) and "records" in value:
value = value["records"]
for row in value:
row.pop("attributes", None)
return value
def _get_value(self, dct, dots):
for key in dots.split("."):
if dct is not None and key in dct:
dct = dct.get(key)
else:
dct = None
return dct
def _get_column_name(self, key, parents=[]):
return ".".join(parents + [key])
def _build_columns(self, sf, child, parents=[]):
child_type = child["attributes"]["type"]
child_desc = sf.__getattr__(child_type).describe()
child_type_map = dict((f["name"], f["type"]) for f in child_desc["fields"])
columns = []
for key in child.keys():
if key != "attributes":
if isinstance(child[key], OrderedDict) and "attributes" in child[key]:
columns.extend(self._build_columns(sf, child[key], parents + [key]))
else:
column_name = self._get_column_name(key, parents)
key_type = child_type_map.get(key, "string")
column_type = TYPES_MAP.get(key_type, TYPE_STRING)
columns.append((column_name, column_type))
return columns
def _build_rows(self, columns, records):
rows = []
for record in records:
record.pop("attributes", None)
row = dict()
for column in columns:
key = column[0]
value = self._get_value(record, key)
row[key] = self._clean_value(value)
rows.append(row)
return rows
def run_query(self, query, user):
logger.debug("Salesforce is about to execute query: %s", query)
query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
try:
columns = []
rows = []
sf = self.METHOD_NAME()
response = sf.query_all(query)
records = response["records"]
if response["totalSize"] > 0 and len(records) == 0:
columns = self.fetch_columns([("Count", TYPE_INTEGER)])
rows = [{"Count": response["totalSize"]}]
elif len(records) > 0:
cols = self._build_columns(sf, records[0])
rows = self._build_rows(cols, records)
columns = self.fetch_columns(cols)
error = None
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
except SalesforceError as err:
error = err.content
json_data = None
return json_data, error
def get_schema(self, get_stats=False):
sf = self.METHOD_NAME()
response = sf.describe()
if response is None:
raise Exception("Failed describing objects.")
schema = {}
for sobject in response["sobjects"]:
table_name = sobject["name"]
if sobject["queryable"] is True and table_name not in schema:
desc = sf.__getattr__(sobject["name"]).describe()
fields = desc["fields"]
schema[table_name] = {
"name": table_name,
"columns": [f["name"] for f in fields],
}
return list(schema.values())
register(Salesforce)
|
299,560 |
missed runs
|
import logging
import time
import yaml
from datetime import datetime
logger = logging.getLogger('awx.main.dispatch.periodic')
class ScheduledTask:
"""
Class representing schedules, very loosely modeled after python schedule library Job
the idea of this class is to:
- only deal in relative times (time since the scheduler global start)
- only deal in integer math for target runtimes, but float for current relative time
Missed schedule policy:
Invariant target times are maintained, meaning that if interval=10s offset=0
and it runs at t=7s, then it calls for next run in 3s.
However, if a complete interval has passed, that is counted as a missed run,
and missed runs are abandoned (no catch-up runs).
"""
def __init__(self, name: str, data: dict):
# parameters need for schedule computation
self.interval = int(data['schedule'].total_seconds())
self.offset = 0 # offset relative to start time this schedule begins
self.index = 0 # number of periods of the schedule that has passed
# parameters that do not affect scheduling logic
self.last_run = None # time of last run, only used for debug
self.completed_runs = 0 # number of times schedule is known to run
self.name = name
self.data = data # used by caller to know what to run
@property
def next_run(self):
"Time until the next run with t=0 being the global_start of the scheduler class"
return (self.index + 1) * self.interval + self.offset
def due_to_run(self, relative_time):
return bool(self.next_run <= relative_time)
def expected_runs(self, relative_time):
return int((relative_time - self.offset) / self.interval)
def mark_run(self, relative_time):
self.last_run = relative_time
self.completed_runs += 1
new_index = self.expected_runs(relative_time)
if new_index > self.index + 1:
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
self.index = new_index
def METHOD_NAME(self, relative_time):
"Number of times job was supposed to ran but failed to, only used for debug"
missed_ct = self.expected_runs(relative_time) - self.completed_runs
# if this is currently due to run do not count that as a missed run
if missed_ct and self.due_to_run(relative_time):
missed_ct -= 1
return missed_ct
class Scheduler:
def __init__(self, schedule):
"""
Expects schedule in the form of a dictionary like
{
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'}
}
Only the schedule nearest-second value is used for scheduling,
the rest of the data is for use by the caller to know what to run.
"""
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
min_interval = min(job.interval for job in self.jobs)
num_jobs = len(self.jobs)
# this is intentionally oppioniated against spammy schedules
# a core goal is to spread out the scheduled tasks (for worker management)
# and high-frequency schedules just do not work with that
if num_jobs > min_interval:
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
# even space out jobs over the base interval
for i, job in enumerate(self.jobs):
job.offset = (i * min_interval) // num_jobs
# internally times are all referenced relative to startup time, add grace period
self.global_start = time.time() + 2.0
def get_and_mark_pending(self):
relative_time = time.time() - self.global_start
to_run = []
for job in self.jobs:
if job.due_to_run(relative_time):
to_run.append(job)
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
job.mark_run(relative_time)
return to_run
def time_until_next_run(self):
relative_time = time.time() - self.global_start
next_job = min(self.jobs, key=lambda j: j.next_run)
delta = next_job.next_run - relative_time
if delta <= 0.1:
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
return 0.1
elif delta > 20.0:
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
return 20.0
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
return delta
def debug(self, *args, **kwargs):
data = dict()
data['title'] = 'Scheduler status'
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
relative_time = time.time() - self.global_start
data['started_time'] = start_time
data['current_time'] = now
data['current_time_relative'] = round(relative_time, 3)
data['total_schedules'] = len(self.jobs)
data['schedule_list'] = dict(
[
(
job.name,
dict(
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
next_run_in_seconds=round(job.next_run - relative_time, 3),
offset_in_seconds=job.offset,
completed_runs=job.completed_runs,
METHOD_NAME=job.METHOD_NAME(relative_time),
),
)
for job in sorted(self.jobs, key=lambda job: job.interval)
]
)
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
|
299,561 |
etag
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWorkspaceManagerMemberResult',
'AwaitableGetWorkspaceManagerMemberResult',
'get_workspace_manager_member',
'get_workspace_manager_member_output',
]
@pulumi.output_type
class GetWorkspaceManagerMemberResult:
"""
The workspace manager member
"""
def __init__(__self__, METHOD_NAME=None, id=None, name=None, system_data=None, target_workspace_id=None, target_workspace_tenant_id=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if target_workspace_id and not isinstance(target_workspace_id, str):
raise TypeError("Expected argument 'target_workspace_id' to be a str")
pulumi.set(__self__, "target_workspace_id", target_workspace_id)
if target_workspace_tenant_id and not isinstance(target_workspace_tenant_id, str):
raise TypeError("Expected argument 'target_workspace_tenant_id' to be a str")
pulumi.set(__self__, "target_workspace_tenant_id", target_workspace_tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetWorkspaceId")
def target_workspace_id(self) -> str:
"""
Fully qualified resource ID of the target Sentinel workspace joining the given Sentinel workspace manager
"""
return pulumi.get(self, "target_workspace_id")
@property
@pulumi.getter(name="targetWorkspaceTenantId")
def target_workspace_tenant_id(self) -> str:
"""
Tenant id of the target Sentinel workspace joining the given Sentinel workspace manager
"""
return pulumi.get(self, "target_workspace_tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetWorkspaceManagerMemberResult(GetWorkspaceManagerMemberResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceManagerMemberResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
system_data=self.system_data,
target_workspace_id=self.target_workspace_id,
target_workspace_tenant_id=self.target_workspace_tenant_id,
type=self.type)
def get_workspace_manager_member(resource_group_name: Optional[str] = None,
workspace_manager_member_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceManagerMemberResult:
"""
Gets a workspace manager member
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_manager_member_name: The name of the workspace manager member
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceManagerMemberName'] = workspace_manager_member_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230801preview:getWorkspaceManagerMember', __args__, opts=opts, typ=GetWorkspaceManagerMemberResult).value
return AwaitableGetWorkspaceManagerMemberResult(
METHOD_NAME=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
target_workspace_id=pulumi.get(__ret__, 'target_workspace_id'),
target_workspace_tenant_id=pulumi.get(__ret__, 'target_workspace_tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workspace_manager_member)
def get_workspace_manager_member_output(resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_manager_member_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceManagerMemberResult]:
"""
Gets a workspace manager member
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_manager_member_name: The name of the workspace manager member
:param str workspace_name: The name of the workspace.
"""
...
|
299,562 |
visit
|
# Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=dangerous-default-value
class DFGNode:
def __init__(self, tensor):
self.name = tensor.name
self.tensor = tensor
self.device = None
self.children = []
self.parents = []
self.states = []
self.base = None
def add_child(self, child):
if len(self.states) != 0:
self.states[-1].children.append(child)
else:
self.children.append(child)
def add_parent(self, parent):
if len(parent.states) != 0:
self.parents.append(parent.states[-1])
else:
self.parents.append(parent)
def add_state(self, state):
state.base = self
self.states.append(state)
def has_children(self):
return len(self.children) != 0
def set_device(self, device):
self.device = device
class DataflowGraph:
def __init__(self, name="", inputs=[]):
self.name = name
self.roots = []
self.leaves = []
self.node_map = {}
self.device_map = {}
for tensor in inputs:
self.roots.append(self.create_node(tensor))
self.subgraph = {"inputs": [], "outputs": []}
self.host_xcel_place = False
def has_host_xcel_place(self):
return self.host_xcel_place
def create_node(self, tensor):
name = tensor.name
if name in self.node_map:
node = self.node_map[name]
else:
node = DFGNode(tensor)
self.node_map[name] = node
return node
def add_edge(self, src, dst, stateful=False):
if src.name == dst.name:
return
src_node = self.create_node(src)
dst_node = self.create_node(dst)
src_node.add_child(dst_node)
dst_node.add_parent(src_node)
if stateful:
src_node.add_state(dst_node)
def add_edges(self, src_nodes, dst_nodes):
if not isinstance(src_nodes, list):
src_nodes = [src_nodes]
if not isinstance(dst_nodes, list):
dst_nodes = [dst_nodes]
for src in src_nodes:
for dst in dst_nodes:
self.add_edge(src, dst)
def METHOD_NAME(self, func):
visited = set()
for node in self.roots:
self._dfs(node, visited, func)
def _dfs(self, node, visited, func=None):
if node.name in visited:
return
visited.add(node.name)
for child in node.children:
func(node, child)
self._dfs(child, visited, func)
def dump(self):
print("Dataflow graph:")
def print_node(src, dst):
print(src.name, "->", dst.name)
self.METHOD_NAME(print_node)
def visualize(self):
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
plt.figure(figsize=(8, 5), dpi=200)
edges = []
def append_edge(src, dst):
edges.append((src.name, dst.name))
self.METHOD_NAME(append_edge)
graph_name = f"dfg_{self.name}"
nx_G = nx.from_edgelist(edges, create_using=nx.DiGraph)
write_dot(nx_G, f"{graph_name}.dot")
pos = graphviz_layout(nx_G, prog="dot")
color_map = []
for node in nx_G:
if self.node_map[node].device is None:
color_map.append("blue")
elif self.node_map[node].device in {"host", "CPU"}:
color_map.append("green")
elif self.node_map[node].device in {"device", "FPGA"}:
color_map.append("red")
else:
print(node, self.node_map[node].device)
raise RuntimeError("Incorrect devices")
nx.draw_networkx(nx_G, pos, node_color=color_map)
# nx.draw_networkx(nx_G, node_color=color_map)
for color, device in (("blue", "None"), ("green", "CPU"), ("red", "FPGA")):
plt.scatter([], [], c=color, label=device)
plt.legend(loc=1)
plt.savefig(f"{graph_name}.png", format="png", dpi=200)
def propagate_annotation(self, tensor, attr):
name = tensor.name
node = self.node_map[name]
# pylint: disable=unused-argument
def set_annotation(src, dst):
dst.set_device(attr)
if attr == "CPU":
node.set_device("FPGA")
elif attr == "FPGA":
node.set_device("CPU")
# set next stage on device
visited = set()
self._dfs(node, visited, set_annotation)
self.host_xcel_place = True
def create_device_map(self):
flag = True
has_xcel = False
def check_valid(src, dst):
nonlocal flag, has_xcel
self.device_map[src.name] = src.device
self.device_map[dst.name] = dst.device
if src.device is None or dst.device is None:
flag = False
if src.device not in ["CPU", None] or dst.device not in ["CPU", None]:
has_xcel = True
self.METHOD_NAME(check_valid)
if not has_xcel: # label all the graph nodes as CPU
def label_cpu(src, dst):
self.device_map[src.name] = "CPU"
self.device_map[dst.name] = "CPU"
src.device = "CPU"
dst.device = "CPU"
self.METHOD_NAME(label_cpu)
flag = True
return flag
def graph_partition(self, show_partition=False):
# first check if the requested data placement is valid
for node in self.roots:
if node.device is None:
node.device = "CPU"
if not self.create_device_map():
self.visualize()
raise RuntimeError("There exists DFG nodes not labeled target devices")
def extract_subgraph(src, dst):
if src.device in {"host", "CPU"} and dst.device in {"device", "FPGA"}:
if src not in self.subgraph["inputs"]:
self.subgraph["inputs"].append(src)
elif src.device in {"device", "FPGA"} and dst.device in {"host", "CPU"}:
if src not in self.subgraph["outputs"]:
self.subgraph["outputs"].append(src)
else:
pass
self.METHOD_NAME(extract_subgraph)
for output in self.leaves:
if output.device in {"device", "FPGA"}:
self.subgraph["outputs"].append(output)
if show_partition:
self.visualize()
|
299,563 |
upload artifact to uri
|
"""
Utilities for dealing with artifacts in the context of a Run.
"""
import os
import pathlib
import posixpath
import tempfile
import urllib.parse
import uuid
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.store.artifact.artifact_repository_registry import get_artifact_repository
from mlflow.store.artifact.dbfs_artifact_repo import DbfsRestArtifactRepository
from mlflow.store.artifact.models_artifact_repo import ModelsArtifactRepository
from mlflow.tracking._tracking_service.utils import _get_store
from mlflow.utils.file_utils import path_to_local_file_uri
from mlflow.utils.uri import add_databricks_profile_info_to_artifact_uri, append_to_uri_path
def get_artifact_uri(run_id, artifact_path=None, tracking_uri=None):
"""
Get the absolute URI of the specified artifact in the specified run. If `path` is not specified,
the artifact root URI of the specified run will be returned; calls to ``log_artifact``
and ``log_artifacts`` write artifact(s) to subdirectories of the artifact root URI.
:param run_id: The ID of the run for which to obtain an absolute artifact URI.
:param artifact_path: The run-relative artifact path. For example,
``path/to/artifact``. If unspecified, the artifact root URI for the
specified run will be returned.
:param tracking_uri: The tracking URI from which to get the run and its artifact location. If
not given, the current default tracking URI is used.
:return: An *absolute* URI referring to the specified artifact or the specified run's artifact
root. For example, if an artifact path is provided and the specified run uses an
S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the specified run uses an S3-backed store, this may be a URI of
the form ``s3://<bucket_name>/path/to/artifact/root``.
"""
if not run_id:
raise MlflowException(
message="A run_id must be specified in order to obtain an artifact uri!",
error_code=INVALID_PARAMETER_VALUE,
)
store = _get_store(tracking_uri)
run = store.get_run(run_id)
# Maybe move this method to RunsArtifactRepository so the circular dependency is clearer.
assert urllib.parse.urlparse(run.info.artifact_uri).scheme != "runs" # avoid an infinite loop
if artifact_path is None:
return run.info.artifact_uri
else:
return append_to_uri_path(run.info.artifact_uri, artifact_path)
# TODO: This would be much simpler if artifact_repo.download_artifacts could take the absolute path
# or no path.
def _get_root_uri_and_artifact_path(artifact_uri):
"""
Parse the artifact_uri to get the root_uri and artifact_path.
:param artifact_uri: The *absolute* URI of the artifact.
"""
if os.path.exists(artifact_uri):
if os.name != "nt":
# If we're dealing with local files, just reference the direct pathing.
# non-nt-based file systems can directly reference path information, while nt-based
# systems need to url-encode special characters in directory listings to be able to
# resolve them (i.e., spaces converted to %20 within a file name or path listing)
root_uri = os.path.dirname(artifact_uri)
artifact_path = os.path.basename(artifact_uri)
return root_uri, artifact_path
else: # if we're dealing with nt-based systems, we need to utilize pathname2url to encode.
artifact_uri = path_to_local_file_uri(artifact_uri)
parsed_uri = urllib.parse.urlparse(str(artifact_uri))
prefix = ""
if parsed_uri.scheme and not parsed_uri.path.startswith("/"):
# relative path is a special case, urllib does not reconstruct it properly
prefix = parsed_uri.scheme + ":"
parsed_uri = parsed_uri._replace(scheme="")
# For models:/ URIs, it doesn't make sense to initialize a ModelsArtifactRepository with only
# the model name portion of the URI, then call download_artifacts with the version info.
if ModelsArtifactRepository.is_models_uri(artifact_uri):
root_uri, artifact_path = ModelsArtifactRepository.split_models_uri(artifact_uri)
else:
artifact_path = posixpath.basename(parsed_uri.path)
parsed_uri = parsed_uri._replace(path=posixpath.dirname(parsed_uri.path))
root_uri = prefix + urllib.parse.urlunparse(parsed_uri)
return root_uri, artifact_path
def _download_artifact_from_uri(artifact_uri, output_path=None):
"""
:param artifact_uri: The *absolute* URI of the artifact to download.
:param output_path: The local filesystem path to which to download the artifact. If unspecified,
a local output path will be created.
"""
root_uri, artifact_path = _get_root_uri_and_artifact_path(artifact_uri)
return get_artifact_repository(artifact_uri=root_uri).download_artifacts(
artifact_path=artifact_path, dst_path=output_path
)
def METHOD_NAME(local_path, artifact_uri):
"""
Uploads a local artifact (file) to a specified URI.
:param local_path: The local path of the file to upload.
:param artifact_uri: The *absolute* URI of the path to upload the artifact to.
"""
root_uri, artifact_path = _get_root_uri_and_artifact_path(artifact_uri)
get_artifact_repository(artifact_uri=root_uri).log_artifact(local_path, artifact_path)
def _upload_artifacts_to_databricks(
source, run_id, source_host_uri=None, target_databricks_profile_uri=None
):
"""
Copy the artifacts from ``source`` to the destination Databricks workspace (DBFS) given by
``databricks_profile_uri`` or the current tracking URI.
:param source: Source location for the artifacts to copy.
:param run_id: Run ID to associate the artifacts with.
:param source_host_uri: Specifies the source artifact's host URI (e.g. Databricks tracking URI)
if applicable. If not given, defaults to the current tracking URI.
:param target_databricks_profile_uri: Specifies the destination Databricks host. If not given,
defaults to the current tracking URI.
:return: The DBFS location in the target Databricks workspace the model files have been
uploaded to.
"""
with tempfile.TemporaryDirectory() as local_dir:
source_with_profile = add_databricks_profile_info_to_artifact_uri(source, source_host_uri)
_download_artifact_from_uri(source_with_profile, local_dir)
dest_root = "dbfs:/databricks/mlflow/tmp-external-source/"
dest_root_with_profile = add_databricks_profile_info_to_artifact_uri(
dest_root, target_databricks_profile_uri
)
dest_repo = DbfsRestArtifactRepository(dest_root_with_profile)
dest_artifact_path = run_id if run_id else uuid.uuid4().hex
# Allow uploading from the same run id multiple times by randomizing a suffix
if len(dest_repo.list_artifacts(dest_artifact_path)) > 0:
dest_artifact_path = dest_artifact_path + "-" + uuid.uuid4().hex[0:4]
dest_repo.log_artifacts(local_dir, artifact_path=dest_artifact_path)
dirname = pathlib.PurePath(source).name # innermost directory name
return posixpath.join(dest_root, dest_artifact_path, dirname) # new source
|
299,564 |
test create default queues2
|
import unittest
from pathlib import Path
from depthai_sdk.managers import EncodingManager, PipelineManager
from depthai_sdk import Previews
import depthai as dai
import os
unittest.TestLoader.sortTestMethodsUsing = None
class TestEncodingManager(unittest.TestCase):
def test_Init1(self):
"""Testing init with an empty dict and a real path"""
test = EncodingManager(encodeConfig={}, encodeOutput=Path(""))
self.assertIsNotNone(test)
def test_Init2(self):
"""Testing init with an empty dict and a false path"""
with self.assertRaises(RuntimeError):
EncodingManager(encodeConfig={}, encodeOutput=Path("/NotARealPath"))
def test_Init3(self):
"""Testing if everything in init is stored correctly if used with every attribute"""
test = EncodingManager(encodeConfig={Previews.color.name: 30}, encodeOutput=Path(""))
self.assertDictEqual(test.encodeConfig, {Previews.color.name: 30})
self.assertEqual(test.encodeOutput, Path(""))
def test_CreateEncoders1(self):
"""Testing createEncoders with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes)
def test_CreateEncoders2(self):
"""Testing createEncoders with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
self.assertTrue("color" in test._encodingNodes and
"left" in test._encodingNodes and
"right" in test._encodingNodes)
def test_CreateDefaultQueues1(self):
"""Testing createDefaultQueues with a valid pipeline"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 1)
self.assertTrue("color" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles)
def METHOD_NAME(self):
"""Testing createDefaultQueues with a valid pipeline(all nodes)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
self.assertEqual(len(test._encodingQueues), 3)
self.assertTrue("color" in test._encodingQueues and
"left" in test._encodingQueues and
"right" in test._encodingQueues)
self.assertTrue("color" in test._encodingFiles and
"left" in test._encodingFiles and
"right" in test._encodingFiles)
def test_close1(self):
"""Testing close with a valid pipeline, if closed correctly the file will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
test = EncodingManager({Previews.color.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
def test_close2(self):
"""Testing close with a valid pipeline, if closed correctly the files will be deleted (files are in .h264)"""
pm = PipelineManager()
pm.createColorCam()
pm.createLeftCam()
pm.createRightCam()
test = EncodingManager({
Previews.color.name: 30,
Previews.left.name: 30,
Previews.right.name: 30}, Path(""))
test.createEncoders(pm)
with dai.Device(pm.pipeline) as device:
test.createDefaultQueues(device)
test.close()
os.remove("color.h264")
os.remove("left.h264")
os.remove("right.h264")
|
299,565 |
test site configuration created with json file
|
"""
Tests for the create_or_update_site_configuration management command.
"""
import codecs
import json
import pytest
import ddt
from django.contrib.sites.models import Site
from django.core.management import call_command, CommandError
from django.test import TestCase
from path import Path
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
@ddt.ddt
class CreateOrUpdateSiteConfigurationTest(TestCase):
"""
Test for the create_or_update_site_configuration management command.
"""
command = 'create_or_update_site_configuration'
def setUp(self):
super().setUp()
self.site_id = 1
self.site_id_arg = ['--site-id', str(self.site_id)]
self.json_file_path = Path(__file__).parent / "fixtures/config1.json"
self.input_configuration = {
'FEATURE_FLAG': True,
'SERVICE_URL': 'https://foo.bar',
'ABC': 123,
}
@property
def site(self):
"""
Return the fixture site for this test class.
"""
return Site.objects.get(id=self.site_id)
def assert_site_configuration_does_not_exist(self):
"""
Assert that the site configuration for the fixture site does not exist.
"""
with pytest.raises(SiteConfiguration.DoesNotExist):
SiteConfiguration.objects.get(site=self.site)
def get_site_configuration(self):
"""
Return the site configuration for the fixture site.
"""
return SiteConfiguration.objects.get(site=self.site)
def create_fixture_site_configuration(self, enabled):
SiteConfiguration.objects.update_or_create(
site=self.site,
defaults={'enabled': enabled, 'site_values': {'ABC': 'abc', 'B': 'b'}}
)
def test_command_no_args(self):
"""
Verify the error on the command with no arguments.
"""
with pytest.raises(CommandError) as error:
call_command(self.command)
assert 'Error: one of the arguments --site-id domain is required' in str(error.value)
def test_site_created_when_site_id_non_existent(self):
"""
Verify that a new site is created when given a site ID that doesn't exist.
"""
non_existent_site_id = 999
with pytest.raises(Site.DoesNotExist):
Site.objects.get(id=non_existent_site_id)
call_command(self.command, '--site-id', non_existent_site_id)
Site.objects.get(id=non_existent_site_id)
def test_site_created_when_domain_non_existent(self):
"""
Verify that a new site is created when given a domain name that does not have an existing site..
"""
domain = 'nonexistent.com'
with pytest.raises(Site.DoesNotExist):
Site.objects.get(domain=domain)
call_command(self.command, domain)
Site.objects.get(domain=domain)
def test_both_site_id_domain_given(self):
"""
Verify that an error is thrown when both site_id and the domain name are provided.
"""
with pytest.raises(CommandError) as error:
call_command(self.command, 'domain.com', '--site-id', '1')
assert 'not allowed with argument' in str(error.value)
def test_site_configuration_created_when_non_existent(self):
"""
Verify that a SiteConfiguration instance is created if it doesn't exist.
"""
self.assert_site_configuration_does_not_exist()
call_command(self.command, *self.site_id_arg)
site_configuration = SiteConfiguration.objects.get(site=self.site)
assert not site_configuration.site_values
assert not site_configuration.enabled
def test_site_created_when_domain_longer_than_50_characters(self):
"""
Verify that a SiteConfiguration instance is created with name trimmed
to 50 characters when domain is longer than 50 characters
"""
self.assert_site_configuration_does_not_exist()
domain = "studio.newtestserverwithlongname.development.opencraft.hosting"
call_command(self.command, f"{domain}")
site = Site.objects.filter(domain=domain)
assert site.exists()
assert site[0].name == domain[:50]
def test_both_enabled_disabled_flags(self):
"""
Verify the error on providing both the --enabled and --disabled flags.
"""
with pytest.raises(CommandError) as error:
call_command(self.command, '--enabled', '--disabled', *self.site_id_arg)
assert 'argument --disabled: not allowed with argument --enabled' in str(error.value)
@ddt.data(('enabled', True),
('disabled', False))
@ddt.unpack
def test_site_configuration_enabled_disabled(self, flag, enabled):
"""
Verify that the SiteConfiguration instance is enabled/disabled as per the flag used.
"""
self.assert_site_configuration_does_not_exist()
call_command(self.command, f'--{flag}', *self.site_id_arg)
site_configuration = SiteConfiguration.objects.get(site=self.site)
assert not site_configuration.site_values
assert enabled == site_configuration.enabled
def test_site_configuration_created_with_parameters(self):
"""
Verify that a SiteConfiguration instance is created with the provided values if it does not exist.
"""
self.assert_site_configuration_does_not_exist()
call_command(self.command, '--configuration', json.dumps(self.input_configuration), *self.site_id_arg)
site_configuration = self.get_site_configuration()
self.assertDictEqual(site_configuration.site_values, self.input_configuration)
def METHOD_NAME(self):
"""
Verify that a SiteConfiguration instance is created with the provided values if it does not exist.
"""
self.assert_site_configuration_does_not_exist()
call_command(self.command, '-f', str(self.json_file_path.abspath()), *self.site_id_arg)
site_configuration = self.get_site_configuration()
assert site_configuration.site_values == {'ABC': 123, 'XYZ': '789'}
@ddt.data(True, False)
def test_site_configuration_updated_with_parameters(self, enabled):
"""
Verify that the existing parameters are updated when provided in the command.
"""
self.create_fixture_site_configuration(enabled)
call_command(self.command, '--configuration', json.dumps(self.input_configuration), *self.site_id_arg)
site_configuration = self.get_site_configuration()
assert site_configuration.site_values ==\
{'ABC': 123, 'B': 'b', 'FEATURE_FLAG': True, 'SERVICE_URL': 'https://foo.bar'}
assert site_configuration.enabled == enabled
@ddt.data(True, False)
def test_site_configuration_updated_from_json_file(self, enabled):
"""
Verify that the existing parameteres are updated when provided through a YAML file.
"""
self.create_fixture_site_configuration(enabled)
call_command(self.command, '-f', str(self.json_file_path.abspath()), *self.site_id_arg)
site_configuration = self.get_site_configuration()
expected_site_configuration = {'ABC': 'abc', 'B': 'b'}
with codecs.open(self.json_file_path, encoding='utf-8') as f:
expected_site_configuration.update(json.load(f))
assert site_configuration.site_values == expected_site_configuration
assert site_configuration.enabled == enabled
|
299,566 |
test sitemap projects
|
import pytest
from django.urls import reverse
from lxml import etree
from taiga.users.models import User
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
pytestmark = pytest.mark.django_db
NAMESPACES = {
"sitemapindex": "http://www.sitemaps.org/schemas/sitemap/0.9",
}
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("InitialData", (object,), {})()
m.project1 = f.ProjectFactory.create(is_private=False,
is_epics_activated=True,
is_backlog_activated=True,
is_kanban_activated=True,
is_issues_activated=True,
is_wiki_activated=True)
m.project2 = f.ProjectFactory.create(is_private=True,
is_epics_activated=True,
is_backlog_activated=True,
is_kanban_activated=True,
is_issues_activated=True,
is_wiki_activated=True)
m.epic11 = f.EpicFactory(project=m.project1)
m.epic21 = f.EpicFactory(project=m.project2)
m.milestone11 = f.MilestoneFactory(project=m.project1)
m.milestone21 = f.MilestoneFactory(project=m.project2)
m.us11 = f.UserStoryFactory(project=m.project1)
m.us21 = f.UserStoryFactory(project=m.project2)
m.task11 = f.TaskFactory(project=m.project1)
m.task21 = f.TaskFactory(project=m.project2)
m.issue11 = f.IssueFactory(project=m.project1)
m.issue21 = f.IssueFactory(project=m.project2)
m.wikipage11 = f.WikiPageFactory(project=m.project1)
m.wikipage21 = f.WikiPageFactory(project=m.project2)
return m
def test_sitemaps_index(client):
url = reverse('front-sitemap-index')
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 11 # ["/generics", "/projects", "/project_backlogs", "/project_kanbans", "/epics",
# "/milestones", "/userstories", "/tasks", "/issues", "/wikipages", "/users"]
def test_sitemap_generics(client, data):
url = reverse('front-sitemap', kwargs={"section": "generics"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 5 # ["/", "/discover", "/login", "/register", "/forgot-password"]
def METHOD_NAME(client, data):
url = reverse('front-sitemap', kwargs={"section": "projects"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_project_backlogs(client, data):
url = reverse('front-sitemap', kwargs={"section": "project-backlogs"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_project_kanbans(client, data):
url = reverse('front-sitemap', kwargs={"section": "project-kanbans"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_epics(client, data):
url = reverse('front-sitemap', kwargs={"section": "epics"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_milestones(client, data):
url = reverse('front-sitemap', kwargs={"section": "milestones"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_userstories(client, data):
url = reverse('front-sitemap', kwargs={"section": "userstories"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_tasks(client, data):
url = reverse('front-sitemap', kwargs={"section": "tasks"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_issues(client, data):
url = reverse('front-sitemap', kwargs={"section": "issues"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_wikipages(client, data):
url = reverse('front-sitemap', kwargs={"section": "wikipages"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_users(client, data):
url = reverse('front-sitemap', kwargs={"section": "users"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == User.objects.filter(is_active=True, is_system=False).count()
|
299,567 |
test regular file
|
import os
from unittest import mock
from pkgcheck import addons
from pkgcheck.checks import repo
from pkgcore.ebuild import atom
from pkgcore.test.misc import FakeRepo
from snakeoil.cli import arghparse
from snakeoil.fileutils import touch
from snakeoil.osutils import ensure_dirs, pjoin
from .. import misc
class TestRepoDirCheck(misc.Tmpdir, misc.ReportTestCase):
check_kls = repo.RepoDirCheck
def mk_check(self):
self.repo = FakeRepo(repo_id="repo", location=self.dir)
options = arghparse.Namespace(target_repo=self.repo, cache={"git": False}, gentoo_repo=True)
git_addon = addons.git.GitAddon(options)
return repo.RepoDirCheck(options, git_addon=git_addon)
def mk_pkg(self, cpvstr):
pkg = atom.atom(cpvstr)
filesdir = pjoin(self.repo.location, pkg.category, pkg.package, "files")
os.makedirs(filesdir, exist_ok=True)
return filesdir
def test_empty_repo(self):
self.assertNoReport(self.mk_check(), [])
def test_empty_file(self):
check = self.mk_check()
bin_path = pjoin(self.repo.location, "foo")
touch(bin_path)
self.assertNoReport(check, [])
def METHOD_NAME(self):
check = self.mk_check()
with open(pjoin(self.repo.location, "foo"), "w") as f:
f.write("bar")
self.assertNoReport(check, [])
def test_unreadable_file(self):
check = self.mk_check()
with open(pjoin(self.repo.location, "foo"), "w") as f:
f.write("bar")
with mock.patch("pkgcheck.open") as mocked_open:
mocked_open.side_effect = IOError("fake exception")
self.assertNoReport(check, [])
def test_ignored_root_dirs(self):
for d in self.check_kls.ignored_root_dirs:
check = self.mk_check()
bin_path = pjoin(self.repo.location, d, "foo")
os.makedirs(os.path.dirname(bin_path))
with open(bin_path, "wb") as f:
f.write(b"\xd3\xad\xbe\xef")
self.assertNoReport(check, [])
def test_null_bytes(self):
check = self.mk_check()
with open(pjoin(self.repo.location, "foo"), "wb") as f:
f.write(b"foo\x00\xffbar")
r = self.assertReport(check, [])
assert isinstance(r, repo.BinaryFile)
assert r.path == "foo"
assert "'foo'" in str(r)
def test_root_dir_binary(self):
check = self.mk_check()
bin_path = pjoin(self.repo.location, "foo")
with open(bin_path, "wb") as f:
f.write(b"\xd3\xad\xbe\xef")
r = self.assertReport(check, [])
assert isinstance(r, repo.BinaryFile)
assert r.path == "foo"
assert "'foo'" in str(r)
def test_ebuild_filesdir_binary(self):
check = self.mk_check()
filesdir = self.mk_pkg("dev-util/foo")
with open(pjoin(filesdir, "foo"), "wb") as f:
f.write(b"\xd3\xad\xbe\xef")
r = self.assertReport(check, [])
assert isinstance(r, repo.BinaryFile)
assert r.path == "dev-util/foo/files/foo"
assert "'dev-util/foo/files/foo'" in str(r)
def test_gitignore(self):
# distfiles located in deprecated in-tree location are reported by default
check = self.mk_check()
distfiles = pjoin(self.repo.location, "distfiles")
os.mkdir(distfiles)
with open(pjoin(distfiles, "foo-0.tar.gz"), "wb") as f:
f.write(b"\xd3\xad\xbe\xef")
r = self.assertReport(check, [])
assert isinstance(r, repo.BinaryFile)
assert "distfiles/foo-0.tar.gz" in str(r)
# but results are suppressed if a matching git ignore entry exists
for ignore_file in (".gitignore", ".git/info/exclude"):
path = pjoin(self.repo.location, ignore_file)
ensure_dirs(os.path.dirname(path))
with open(path, "w") as f:
f.write("/distfiles/")
self.assertNoReport(self.mk_check(), [])
os.unlink(path)
def test_non_utf8_encodings(self):
# non-english languages courtesy of google translate mangling
langs = (
("example text that shouldn't trigger", "ascii"),
("نص المثال الذي لا ينبغي أن يؤدي", "cp1256"), # arabic
("пример текста, который не должен срабатывать", "koi8_r"), # russian
("उदाहरण पाठ जो ट्रिगर नहीं होना चाहिए", "utf-16"), # hindi
("مثال کے متن جو ٹرگر نہ ہوں۔", "utf-16"), # urdu
("ဖြစ်ပေါ်မပေးသင့်ကြောင်းဥပမာစာသား", "utf-32"), # burmese
("उदाहरण पाठ जुन ट्रिगर हुँदैन", "utf-32"), # nepali
("トリガーするべきではないテキストの例", "shift_jis"), # japanese
("트리거해서는 안되는 예제 텍스트", "cp949"), # korean
("不应触发的示例文本", "gb2312"), # simplified chinese
("不應觸發的示例文本", "gb18030"), # traditional chinese
)
for text, encoding in langs:
check = self.mk_check()
with open(pjoin(self.repo.location, "foo"), "wb") as f:
data = text.encode(encoding)
f.write(data)
self.assertNoReport(check, [])
|
299,568 |
visit assign
|
""" NormalizeTuples removes implicit variable -> tuple conversion. """
from pythran.analyses import Identifiers
from pythran.passmanager import Transformation
import gast as ast
from functools import reduce
from collections import OrderedDict
from copy import deepcopy
class ConvertToTuple(ast.NodeTransformer):
def __init__(self, tuple_id, renamings):
self.tuple_id = tuple_id
self.renamings = renamings
def visit_Name(self, node):
if node.id in self.renamings:
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Constant(y, None),
ast.Load()),
self.renamings[node.id],
ast.Name(self.tuple_id, ast.Load(), None, None)
)
nnode.ctx = node.ctx
return nnode
return node
class NormalizeTuples(Transformation):
"""
Remove implicit tuple -> variable conversion.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(): a=(1,2.) ; i,j = a")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(NormalizeTuples, node)
>>> print(pm.dump(backend.Python, node))
def foo():
a = (1, 2.0)
i = a[0]
j = a[1]
"""
tuple_name = "__tuple"
def __init__(self):
Transformation.__init__(self)
def get_new_id(self):
i = 0
while 1:
new_id = "{}{}".format(NormalizeTuples.tuple_name, i)
if new_id not in self.ids:
self.ids.add(new_id)
return new_id
else:
i += 1
def traverse_tuples(self, node, state, renamings):
if isinstance(node, ast.Name):
if state:
renamings[node.id] = state
self.update = True
elif isinstance(node, ast.Tuple) or isinstance(node, ast.List):
[self.traverse_tuples(n, state + (i,), renamings)
for i, n in enumerate(node.elts)]
elif isinstance(node, (ast.Subscript, ast.Attribute)):
if state:
renamings[node] = state
self.update = True
else:
raise NotImplementedError
def visit_comprehension(self, node):
node = self.generic_visit(node)
renamings = OrderedDict()
self.traverse_tuples(node.target, (), renamings)
if renamings:
self.update = True
return self.get_new_id(), renamings
else:
return node
def visit_AnyComp(self, node, *fields):
for field in fields:
setattr(node, field, self.visit(getattr(node, field)))
generators = [self.visit(generator) for generator in node.generators]
nnode = node
for i, g in enumerate(generators):
if isinstance(g, tuple):
gtarget = "{0}{1}".format(g[0], i)
nnode.generators[i].target = ast.Name(
gtarget,
nnode.generators[i].target.ctx, None, None)
nnode = ConvertToTuple(gtarget, g[1]).visit(nnode)
self.update = True
for field in fields:
setattr(node, field, getattr(nnode, field))
node.generators = nnode.generators
return node
def visit_ListComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_SetComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_DictComp(self, node):
return self.visit_AnyComp(node, 'key', 'value')
def visit_GeneratorExp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_Lambda(self, node):
self.generic_visit(node)
for i, arg in enumerate(node.args.args):
renamings = OrderedDict()
self.traverse_tuples(arg, (), renamings)
if renamings:
nname = self.get_new_id()
node.args.args[i] = ast.Name(nname, ast.Param(), None, None)
node.body = ConvertToTuple(nname, renamings).visit(node.body)
return node
def METHOD_NAME(self, node):
self.generic_visit(node)
# if the rhs is an identifier, we don't need to duplicate it
# otherwise, better duplicate it...
no_tmp = isinstance(node.value, (ast.Name, ast.Attribute))
extra_assign = [] if no_tmp else [node]
for i, t in enumerate(node.targets):
if isinstance(t, ast.Tuple) or isinstance(t, ast.List):
renamings = OrderedDict()
self.traverse_tuples(t, (), renamings)
if renamings:
if no_tmp:
gstore = deepcopy(node.value)
else:
gstore = ast.Name(self.get_new_id(),
ast.Store(), None, None)
gload = deepcopy(gstore)
gload.ctx = ast.Load()
node.targets[i] = gstore
for rename, state in renamings.items():
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Constant(y, None),
ast.Load()),
state,
gload)
if isinstance(rename, str):
extra_assign.append(
ast.Assign(
[ast.Name(rename, ast.Store(),
None, None)],
nnode, None))
else:
extra_assign.append(ast.Assign([rename],
nnode, None))
return extra_assign or node
def visit_For(self, node):
target = node.target
if isinstance(target, ast.Tuple) or isinstance(target, ast.List):
renamings = OrderedDict()
self.traverse_tuples(target, (), renamings)
if renamings:
gtarget = self.get_new_id()
node.target = ast.Name(gtarget, node.target.ctx, None, None)
for rename, state in renamings.items():
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Constant(y, None),
ast.Load()),
state,
ast.Name(gtarget, ast.Load(), None, None))
if isinstance(rename, str):
node.body.insert(0,
ast.Assign(
[ast.Name(rename,
ast.Store(),
None, None)],
nnode, None)
)
else:
node.body.insert(0, ast.Assign([rename], nnode, None))
self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
self.ids = self.gather(Identifiers, node)
return self.generic_visit(node)
|
299,569 |
test read emep colocate emep tm5
|
import iris
import numpy as np
import pandas as pd
import pytest
from cf_units import Unit
from pyaerocom import GriddedData, const, helpers
from pyaerocom.colocateddata import ColocatedData
from pyaerocom.colocation import (
_colocate_site_data_helper,
_colocate_site_data_helper_timecol,
_regrid_gridded,
colocate_gridded_gridded,
colocate_gridded_ungridded,
)
from pyaerocom.config import ALL_REGION_NAME
from pyaerocom.exceptions import UnresolvableTimeDefinitionError
from pyaerocom.plugins.mscw_ctm.reader import ReadMscwCtm
from tests.conftest import TEST_RTOL, need_iris_32
from tests.fixtures.stations import create_fake_station_data
def test__regrid_gridded(data_tm5):
one_way = _regrid_gridded(data_tm5, "areaweighted", 5)
another_way = _regrid_gridded(data_tm5, "areaweighted", dict(lon_res_deg=5, lat_res_deg=5))
assert one_way.shape == another_way.shape
S1 = create_fake_station_data(
"concpm10",
{"concpm10": {"units": "ug m-3"}},
10,
"2010-01-01",
"2010-12-31",
"d",
{"ts_type": "daily"},
)
S1["concpm10"][10:20] = np.nan
S2 = create_fake_station_data(
"concpm10",
{"concpm10": {"units": "ug m-3"}},
10,
"2010-01-01",
"2010-12-31",
"d",
{"ts_type": "daily"},
)
S3 = create_fake_station_data(
"concpm10",
{"concpm10": {"units": "ug m-3"}},
10,
"2010-01-01",
"2010-12-31",
"13d",
{"ts_type": "13daily"},
)
S3["concpm10"][1] = np.nan
S3["concpm10"][3] = np.nan
S4 = create_fake_station_data(
"concpm10",
{"concpm10": {"units": "ug m-3"}},
10,
"2010-01-03",
"2011-12-31",
"d",
{"ts_type": "daily"},
)
S4["concpm10"][0:5] = range(5)
@pytest.mark.parametrize(
"stat_data,stat_data_ref,var,var_ref,ts_type,resample_how,min_num_obs, use_climatology_ref,num_valid",
[
(S4, S3, "concpm10", "concpm10", "monthly", "mean", {"monthly": {"daily": 25}}, False, 10),
(S3, S4, "concpm10", "concpm10", "monthly", "mean", {"monthly": {"daily": 25}}, False, 24),
(S1, S2, "concpm10", "concpm10", "monthly", "mean", 25, False, 12),
(S2, S1, "concpm10", "concpm10", "monthly", "mean", 25, False, 11),
],
)
def test__colocate_site_data_helper_timecol(
stat_data,
stat_data_ref,
var,
var_ref,
ts_type,
resample_how,
min_num_obs,
use_climatology_ref,
num_valid,
):
result = _colocate_site_data_helper_timecol(
stat_data,
stat_data_ref,
var,
var_ref,
ts_type,
resample_how,
min_num_obs,
use_climatology_ref,
)
assert isinstance(result, pd.DataFrame)
assert result.data.isnull().sum() == result.ref.isnull().sum()
assert len(result) - result.data.isnull().sum() == num_valid
def test__colocate_site_data_helper(aeronetsunv3lev2_subset):
var = "od550aer"
stat1 = aeronetsunv3lev2_subset.to_station_data(3, var)
stat2 = aeronetsunv3lev2_subset.to_station_data(4, var)
df = _colocate_site_data_helper(stat1, stat2, var, var, "daily", None, None, False)
assert isinstance(df, pd.DataFrame)
assert len(df) == 9483
assert df["data"].mean() == pytest.approx(0.31171085422102346, rel=TEST_RTOL)
assert df["ref"].mean() == pytest.approx(0.07752743643132792, rel=TEST_RTOL)
def test_colocate_gridded_ungridded_new_var(data_tm5, aeronetsunv3lev2_subset):
data = data_tm5.copy()
data.var_name = "od550bc"
coldata = colocate_gridded_ungridded(data, aeronetsunv3lev2_subset, var_ref="od550aer")
assert coldata.metadata["var_name"] == ["od550aer", "od550bc"]
@pytest.mark.parametrize(
"addargs,ts_type,shape,obsmean,modmean",
[
(
dict(
filter_name=f"{ALL_REGION_NAME}-noMOUNTAINS",
min_num_obs=const.OBS_MIN_NUM_RESAMPLE,
),
"monthly",
(2, 12, 8),
0.315930,
0.275671,
),
(
dict(filter_name=f"{ALL_REGION_NAME}-noMOUNTAINS"),
"monthly",
(2, 12, 8),
0.316924,
0.275671,
),
(
dict(
filter_name=f"{ALL_REGION_NAME}-wMOUNTAINS", min_num_obs=const.OBS_MIN_NUM_RESAMPLE
),
"monthly",
(2, 12, 11),
0.269707,
0.243861,
),
(
dict(
filter_name=f"{ALL_REGION_NAME}-noMOUNTAINS",
use_climatology_ref=True,
min_num_obs=const.OBS_MIN_NUM_RESAMPLE,
),
"monthly",
(2, 12, 13),
0.302636,
0.234147,
),
pytest.param(
dict(
filter_name=f"{ALL_REGION_NAME}-noMOUNTAINS",
regrid_res_deg=30,
min_num_obs=const.OBS_MIN_NUM_RESAMPLE,
),
"monthly",
(2, 12, 8),
0.31593,
# 0.1797,
0.169897,
marks=[need_iris_32],
),
(
dict(filter_name=f"{ALL_REGION_NAME}-noMOUNTAINS", ts_type="yearly"),
"yearly",
(2, 1, 8),
0.417676,
0.275671,
),
],
)
def test_colocate_gridded_ungridded(
data_tm5, aeronetsunv3lev2_subset, addargs, ts_type, shape, obsmean, modmean
):
coldata = colocate_gridded_ungridded(data_tm5, aeronetsunv3lev2_subset, **addargs)
assert isinstance(coldata, ColocatedData)
assert coldata.ts_type == ts_type
assert coldata.shape == shape
assert np.nanmean(coldata.data.data[0]) == pytest.approx(obsmean, rel=TEST_RTOL)
assert np.nanmean(coldata.data.data[1]) == pytest.approx(modmean, rel=TEST_RTOL)
def test_colocate_gridded_ungridded_nonglobal(aeronetsunv3lev2_subset):
times = [1, 2]
time_unit = Unit("days since 2010-1-1 0:0:0")
cubes = iris.cube.CubeList()
for time in times:
time_coord = iris.coords.DimCoord(time, units=time_unit, standard_name="time")
cube = helpers.make_dummy_cube_latlon(
lat_res_deg=1, lon_res_deg=1, lat_range=[30.05, 81.95], lon_range=[-29.5, 89.95]
)
cube.add_aux_coord(time_coord)
cubes.append(cube)
time_cube = cubes.merge_cube()
gridded = GriddedData(time_cube)
gridded.var_name = "od550aer"
gridded.units = Unit("1")
coldata = colocate_gridded_ungridded(gridded, aeronetsunv3lev2_subset, colocate_time=False)
assert isinstance(coldata, ColocatedData)
assert coldata.shape == (2, 2, 2)
def test_colocate_gridded_gridded_same_new_var(data_tm5):
data = data_tm5.copy()
data.var_name = "Blaaa"
coldata = colocate_gridded_gridded(data, data_tm5)
assert coldata.metadata["var_name"] == ["od550aer", "Blaaa"]
def test_colocate_gridded_gridded_same(data_tm5):
coldata = colocate_gridded_gridded(data_tm5, data_tm5)
assert isinstance(coldata, ColocatedData)
stats = coldata.calc_statistics()
# check mean value
assert stats["data_mean"] == pytest.approx(0.09825691)
# check that mean value is same as in input GriddedData object
assert stats["data_mean"] == pytest.approx(data_tm5.mean(areaweighted=False))
assert stats["refdata_mean"] == stats["data_mean"]
assert stats["nmb"] == 0
assert stats["mnmb"] == 0
assert stats["R"] == 1
assert stats["R_spearman"] == 1
@pytest.mark.xfail(raises=UnresolvableTimeDefinitionError)
def METHOD_NAME(data_tm5, path_emep):
reader = ReadMscwCtm(data_dir=path_emep["data_dir"])
data_emep = reader.read_var("concpm10", ts_type="monthly")
# Change units and year to match TM5 data
data_emep.change_base_year(2010)
data_emep.units = "1"
col = colocate_gridded_gridded(data_emep, data_tm5)
assert isinstance(col, ColocatedData)
|
299,570 |
compute job result logs
|
#
# Copyright 2023 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import logging
from typing import Any, Dict, List, Optional, Type
from enforce_typing import enforce_types
from ocean_lib.agreements.consumable import AssetNotConsumable, ConsumableCodes
from ocean_lib.agreements.service_types import ServiceTypes
from ocean_lib.aquarius import Aquarius
from ocean_lib.assets.asset_downloader import is_consumable
from ocean_lib.assets.ddo import DDO
from ocean_lib.data_provider.data_service_provider import DataServiceProvider
from ocean_lib.models.compute_input import ComputeInput
from ocean_lib.services.service import Service
from ocean_lib.structures.algorithm_metadata import AlgorithmMetadata
logger = logging.getLogger("ocean")
class OceanCompute:
@enforce_types
def __init__(
self, config_dict: dict, data_provider: Type[DataServiceProvider]
) -> None:
"""Initialises OceanCompute class."""
self._config_dict = config_dict
self._data_provider = data_provider
@enforce_types
def start(
self,
consumer_wallet,
dataset: ComputeInput,
compute_environment: str,
algorithm: Optional[ComputeInput] = None,
algorithm_meta: Optional[AlgorithmMetadata] = None,
algorithm_algocustomdata: Optional[dict] = None,
additional_datasets: List[ComputeInput] = [],
) -> str:
metadata_cache_uri = self._config_dict.get("METADATA_CACHE_URI")
ddo = Aquarius.get_instance(metadata_cache_uri).get_ddo(dataset.did)
service = ddo.get_service_by_id(dataset.service_id)
assert (
ServiceTypes.CLOUD_COMPUTE == service.type
), "service at serviceId is not of type compute service."
consumable_result = is_consumable(
ddo,
service,
{"type": "address", "value": consumer_wallet.address},
with_connectivity_check=True,
)
if consumable_result != ConsumableCodes.OK:
raise AssetNotConsumable(consumable_result)
# Start compute job
job_info = self._data_provider.start_compute_job(
dataset_compute_service=service,
consumer=consumer_wallet,
dataset=dataset,
compute_environment=compute_environment,
algorithm=algorithm,
algorithm_meta=algorithm_meta,
algorithm_custom_data=algorithm_algocustomdata,
input_datasets=additional_datasets,
)
return job_info["jobId"]
@enforce_types
def status(self, ddo: DDO, service: Service, job_id: str, wallet) -> Dict[str, Any]:
"""
Gets job status.
:param ddo: DDO offering the compute service of this job
:param service: compute service of this job
:param job_id: str id of the compute job
:param wallet: Wallet instance
:return: dict the status for an existing compute job, keys are (ok, status, statusText)
"""
job_info = self._data_provider.compute_job_status(
ddo.did, job_id, service, wallet
)
job_info.update({"ok": job_info.get("status") not in (31, 32, None)})
return job_info
@enforce_types
def result(
self, ddo: DDO, service: Service, job_id: str, index: int, wallet
) -> Dict[str, Any]:
"""
Gets job result.
:param ddo: DDO offering the compute service of this job
:param service: compute service of this job
:param job_id: str id of the compute job
:param index: compute result index
:param wallet: Wallet instance
:return: dict the results/logs urls for an existing compute job, keys are (did, urls, logs)
"""
result = self._data_provider.compute_job_result(job_id, index, service, wallet)
return result
@enforce_types
def METHOD_NAME(
self,
ddo: DDO,
service: Service,
job_id: str,
wallet,
log_type="output",
) -> Dict[str, Any]:
"""
Gets job output if exists.
:param ddo: DDO offering the compute service of this job
:param service: compute service of this job
:param job_id: str id of the compute job
:param wallet: Wallet instance
:return: dict the results/logs urls for an existing compute job, keys are (did, urls, logs)
"""
result = self._data_provider.METHOD_NAME(
ddo, job_id, service, wallet, log_type
)
return result
@enforce_types
def stop(self, ddo: DDO, service: Service, job_id: str, wallet) -> Dict[str, Any]:
"""
Attempt to stop the running compute job.
:param ddo: DDO offering the compute service of this job
:param job_id: str id of the compute job
:param wallet: Wallet instance
:return: dict the status for the stopped compute job, keys are (ok, status, statusText)
"""
job_info = self._data_provider.stop_compute_job(
ddo.did, job_id, service, wallet
)
job_info.update({"ok": job_info.get("status") not in (31, 32, None)})
return job_info
@enforce_types
def get_c2d_environments(self, service_endpoint: str, chain_id: int) -> str:
return DataServiceProvider.get_c2d_environments(service_endpoint, chain_id)
@enforce_types
def get_free_c2d_environment(self, service_endpoint: str, chain_id) -> str:
environments = self.get_c2d_environments(service_endpoint, chain_id)
return next(env for env in environments if float(env["priceMin"]) == float(0))
|
299,571 |
create social auth entry
|
"""
Tests for `remove_social_auth_users` management command
"""
import sys
from contextlib import contextmanager
from uuid import uuid4
import pytest
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase, override_settings
from six import StringIO
from social_django.models import UserSocialAuth
from common.djangoapps.student.models import User
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.third_party_auth.management.commands import remove_social_auth_users
from common.djangoapps.third_party_auth.tests.factories import SAMLProviderConfigFactory
from openedx.core.djangolib.testing.utils import skip_unless_lms
FEATURES_WITH_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_ENABLED['ENABLE_ENROLLMENT_RESET'] = True
@skip_unless_lms
class TestRemoveSocialAuthUsersCommand(TestCase):
"""
Test django management command
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.command = remove_social_auth_users.Command()
def setUp(self):
super().setUp()
self.provider_hogwarts = SAMLProviderConfigFactory.create(slug='hogwarts', entity_id='entity-id-hogwarts')
self.provider_durmstrang = SAMLProviderConfigFactory.create(slug='durmstrang', entity_id='entity-id-durmstrang')
self.user_fleur = UserFactory(username='fleur') # no social auth
self.user_harry = UserFactory(username='harry') # social auth for Hogwarts
self.user_viktor = UserFactory(username='viktor') # social auth for Durmstrang
self.METHOD_NAME(self.user_harry, self.provider_hogwarts)
self.METHOD_NAME(self.user_viktor, self.provider_durmstrang)
@contextmanager
def _replace_stdin(self, text): # lint-amnesty, pylint: disable=missing-function-docstring
orig = sys.stdin
sys.stdin = StringIO(text)
yield
sys.stdin = orig
def METHOD_NAME(self, user, provider):
external_id = uuid4()
UserSocialAuth.objects.create(
user=user,
uid=f'{provider.slug}:{external_id}',
provider=provider.slug,
)
def find_user_social_auth_entry(self, username):
UserSocialAuth.objects.get(user__username=username)
@override_settings(FEATURES=FEATURES_WITH_ENABLED)
def test_remove_users(self):
call_command(self.command, self.provider_hogwarts.slug, force=True)
# user with input idp is removed, along with social auth entries
with pytest.raises(User.DoesNotExist):
User.objects.get(username='harry')
with pytest.raises(UserSocialAuth.DoesNotExist):
self.find_user_social_auth_entry('harry')
# other users intact
self.user_fleur.refresh_from_db()
self.user_viktor.refresh_from_db()
assert self.user_fleur is not None
assert self.user_viktor is not None
# other social auth intact
self.find_user_social_auth_entry(self.user_viktor.username)
@override_settings(FEATURES=FEATURES_WITH_ENABLED)
def test_invalid_idp(self):
invalid_slug = 'jedi-academy'
err_string = f'No SAML provider found for slug {invalid_slug}'
with self.assertRaisesRegex(CommandError, err_string):
call_command(self.command, invalid_slug)
@override_settings(FEATURES=FEATURES_WITH_ENABLED)
def test_confirmation_required(self):
""" By default this command will require user input to confirm """
with self._replace_stdin('confirm'):
call_command(self.command, self.provider_hogwarts.slug)
with pytest.raises(User.DoesNotExist):
User.objects.get(username='harry')
with pytest.raises(UserSocialAuth.DoesNotExist):
self.find_user_social_auth_entry('harry')
@override_settings(FEATURES=FEATURES_WITH_ENABLED)
def test_confirmation_failure(self):
err_string = 'User confirmation required. No records have been modified'
with self.assertRaisesRegex(CommandError, err_string):
with self._replace_stdin('no'):
call_command(self.command, self.provider_hogwarts.slug)
# no users should be removed
assert len(User.objects.all()) == 3
assert len(UserSocialAuth.objects.all()) == 2
def test_feature_default_disabled(self):
""" By default this command should not be enabled """
err_string = 'ENABLE_ENROLLMENT_RESET feature not enabled on this enviroment'
with self.assertRaisesRegex(CommandError, err_string):
call_command(self.command, self.provider_hogwarts.slug, force=True)
|
299,572 |
chunk indices
|
# Author: Niels Nuyttens <[email protected]>
#
# License: Apache Software License 2.0
from __future__ import annotations
import sys
import typing
from collections import namedtuple
from enum import Enum
from typing import Dict, List, Optional, Union # noqa: TYP001
if typing.TYPE_CHECKING:
from typing_extensions import Protocol
else:
Protocol = object
if sys.version_info >= (3, 10):
from typing import ParamSpec, TypeGuard # noqa: F401
else:
from typing_extensions import ParamSpec, TypeGuard # noqa: F401
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
import pandas as pd
from nannyml.exceptions import InvalidArgumentsException
from nannyml.plots import Figure
Key = namedtuple('Key', 'properties display_names')
class Result(Protocol):
"""The data that was calculated or estimated."""
data: pd.DataFrame
@property
def empty(self) -> bool:
...
@property
def chunk_keys(self) -> pd.Series:
...
@property
def chunk_start_dates(self) -> pd.Series:
...
@property
def chunk_end_dates(self) -> pd.Series:
...
@property
def chunk_start_indices(self) -> pd.Series:
...
@property
def chunk_end_indices(self) -> pd.Series:
...
@property
def METHOD_NAME(self) -> pd.Series:
...
@property
def chunk_periods(self) -> pd.Series:
...
def keys(self) -> List[Key]:
...
def values(self, key: Key) -> Optional[pd.Series]:
...
def alerts(self, key: Key) -> Optional[pd.Series]:
...
def upper_thresholds(self, key: Key) -> Optional[pd.Series]:
...
def lower_thresholds(self, key: Key) -> Optional[pd.Series]:
...
def upper_confidence_bounds(self, key: Key) -> Optional[pd.Series]:
...
def lower_confidence_bounds(self, key: Key) -> Optional[pd.Series]:
...
def sampling_error(self, key: Key) -> Optional[pd.Series]:
...
def filter(self, period: str = 'all', metrics: Optional[Union[str, List[str]]] = None, *args, **kwargs) -> Result:
...
def to_df(self, multilevel: bool = True) -> pd.DataFrame:
...
def plot(self, *args, **kwargs) -> Figure:
...
class Metric(Protocol):
"""Represents any kind of metric (or method) that can be calculated or estimated."""
@property
def display_name(self) -> str:
...
@property
def column_name(self) -> str:
...
class Calculator(Protocol):
"""Calculator base class."""
def fit(self, reference_data: pd.DataFrame, *args, **kwargs) -> Self:
"""Fits the calculator on reference data."""
def calculate(self, data: pd.DataFrame, *args, **kwargs) -> Result:
"""Perform a calculation based on analysis data."""
class Estimator(Protocol):
"""Estimator base class."""
def fit(self, reference_data: pd.DataFrame, *args, **kwargs) -> Self:
"""Fits the estimator on reference data."""
def estimate(self, data: pd.DataFrame, *args, **kwargs) -> Result:
"""Perform an estimation based on analysis data."""
ModelOutputsType = Union[str, Dict[str, str]]
def model_output_column_names(model_outputs: ModelOutputsType) -> List[str]:
"""Get model output column nanmes from inputs."""
if model_outputs is None:
return []
if isinstance(model_outputs, str):
return [model_outputs]
elif isinstance(model_outputs, Dict):
return [column_name for label, column_name in model_outputs.items()]
else:
raise InvalidArgumentsException(
f"received object of type {type(model_outputs)}. ModelOutputsType should be "
f"either a 'str' or a 'Dict[str, str]'"
)
def class_labels(model_outputs: ModelOutputsType) -> List[str]:
if isinstance(model_outputs, Dict):
return sorted(list(model_outputs.keys()))
else:
raise InvalidArgumentsException(
f"received object of type {type(model_outputs)}. Multiclass ModelOutputsType should be a 'Dict[str, str]'"
)
class ProblemType(str, Enum):
"""Use cases NannyML supports."""
CLASSIFICATION_BINARY = 'classification_binary'
CLASSIFICATION_MULTICLASS = 'classification_multiclass'
REGRESSION = 'regression'
@staticmethod
def parse(problem_type: str):
if problem_type in 'classification_binary':
return ProblemType.CLASSIFICATION_BINARY
elif problem_type in 'classification_multiclass':
return ProblemType.CLASSIFICATION_MULTICLASS
elif problem_type in 'regression':
return ProblemType.REGRESSION
else:
raise InvalidArgumentsException(
f"unknown value for problem_type '{problem_type}'. Value should be one of "
f"{[pt.value for pt in ProblemType]}"
)
|
299,573 |
absent
|
"""
Manage Grafana v4.0 data sources
.. versionadded:: 2017.7.0
:configuration: This state requires a configuration profile to be configured
in the minion config, minion pillar, or master config. The module will use
the 'grafana' key by default, if defined.
Example configuration using basic authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_user: admin
grafana_password: admin
grafana_timeout: 3
Example configuration using token based authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_token: token
grafana_timeout: 3
The behavior of this module is to create data sources if the do not exists, and
to update data sources if the already exists.
.. code-block:: yaml
Ensure influxdb data source is present:
grafana4_datasource.present:
- name: influxdb
- type: influxdb
- url: http://localhost:8086
- access: proxy
- basic_auth: true
- basic_auth_user: myuser
- basic_auth_password: mypass
- is_default: true
"""
from salt.utils.dictdiffer import deep_diff
def __virtual__():
"""Only load if grafana4 module is available"""
if "grafana4.get_datasource" in __salt__:
return True
return (False, "grafana4 module could not be loaded")
def present(
name,
type,
url,
access=None,
user=None,
password=None,
database=None,
basic_auth=None,
basic_auth_user=None,
basic_auth_password=None,
tls_auth=None,
json_data=None,
is_default=None,
with_credentials=None,
type_logo_url=None,
orgname=None,
profile="grafana",
):
"""
Ensure that a data source is present.
name
Name of the data source.
type
Type of the datasource ('graphite', 'influxdb' etc.).
access
Use proxy or direct. Default: proxy
url
The URL to the data source API.
user
Optional - user to authenticate with the data source.
password
Optional - password to authenticate with the data source.
database
Optional - database to use with the data source.
basic_auth
Optional - set to True to use HTTP basic auth to authenticate with the
data source.
basic_auth_user
Optional - HTTP basic auth username.
basic_auth_password
Optional - HTTP basic auth password.
json_data
Optional - additional json data to post (eg. "timeInterval").
is_default
Optional - set data source as default.
with_credentials
Optional - Whether credentials such as cookies or auth headers should
be sent with cross-site requests.
type_logo_url
Optional - Logo to use for this datasource.
orgname
Name of the organization in which the data source should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
datasource = __salt__["grafana4.get_datasource"](name, orgname, profile)
data = _get_json_data(
name=name,
type=type,
url=url,
access=access,
user=user,
password=password,
database=database,
basicAuth=basic_auth,
basicAuthUser=basic_auth_user,
basicAuthPassword=basic_auth_password,
tlsAuth=tls_auth,
jsonData=json_data,
isDefault=is_default,
withCredentials=with_credentials,
typeLogoUrl=type_logo_url,
defaults=datasource,
)
if not datasource:
if __opts__["test"]:
ret["comment"] = "Datasource {} will be created".format(name)
return ret
__salt__["grafana4.create_datasource"](profile=profile, **data)
datasource = __salt__["grafana4.get_datasource"](name, profile=profile)
ret["result"] = True
ret["comment"] = "New data source {} added".format(name)
ret["changes"] = data
return ret
# At this stage, the datasource exists; however, the object provided by
# Grafana may lack some null keys compared to our "data" dict:
for key in data:
if key not in datasource:
datasource[key] = None
if data == datasource:
ret["comment"] = "Data source {} already up-to-date".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Datasource {} will be updated".format(name)
return ret
__salt__["grafana4.update_datasource"](datasource["id"], profile=profile, **data)
ret["result"] = True
ret["changes"] = deep_diff(datasource, data, ignore=["id", "orgId", "readOnly"])
ret["comment"] = "Data source {} updated".format(name)
return ret
def METHOD_NAME(name, orgname=None, profile="grafana"):
"""
Ensure that a data source is present.
name
Name of the data source to remove.
orgname
Name of the organization from which the data source should be absent.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
datasource = __salt__["grafana4.get_datasource"](name, orgname, profile)
if not datasource:
ret["result"] = True
ret["comment"] = "Data source {} already absent".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Datasource {} will be deleted".format(name)
return ret
__salt__["grafana4.delete_datasource"](datasource["id"], profile=profile)
ret["result"] = True
ret["changes"][name] = "Absent"
ret["comment"] = "Data source {} was deleted".format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs
|
299,574 |
from string
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import json
import sys
from types import ModuleType
from typing import Callable, Union
import mlrun.errors
from .utils import SKLearnTypes
class Metric:
"""
A metric handling class to call a metric with additional keyword arguments later during a run and log the results to
MLRun.
"""
def __init__(
self,
metric: Union[Callable, str],
name: str = None,
additional_arguments: dict = None,
need_probabilities: bool = False,
):
"""
Initialize a metric object to be used with the MLRun logger.
:param metric: The metric to use. Can be passed as a string of an imported function or a full
module path to import from.
:param name: The metric name to use for logging it to MLRun.
:param additional_arguments: Additional arguments to pass for the metric function when calculating it.
:param need_probabilities: Whether this metric expects 'y_pred' to be from the 'predict_proba' method or from
'predict'.
"""
self._metric = (
self.METHOD_NAME(metric=metric) if isinstance(metric, str) else metric
)
self._arguments = {} if additional_arguments is None else additional_arguments
self._need_probabilities = need_probabilities
self._name = name if name is not None else self._get_default_name()
self._result = None # type: Union[float, None]
def __call__(
self,
y_true: SKLearnTypes.DatasetType,
y_pred: SKLearnTypes.DatasetType = None,
model: SKLearnTypes.ModelType = None,
x: SKLearnTypes.DatasetType = None,
) -> float:
"""
Call the metric function on the provided y_true and y_pred values using the stored additional arguments.
:param y_true: The ground truth values.
:param y_pred: The model predictions.
:return: The metric result.
"""
# Run a prediction if 'y_pred' was not given:
if y_pred is None:
if model is None or x is None:
raise mlrun.errors.MLRunInvalidArgumentError(
"Calculating a metric requires the model's predictions / probabilities (y_pred) or the model "
"itself and an input (x) to run 'predict' / 'predict_proba'."
)
y_pred = (
model.predict_proba(x) if self._need_probabilities else model.predict(x)
)
# Calculate the result and return:
self._result = self._metric(y_true, y_pred, **self._arguments)
return self._result
@property
def name(self) -> str:
"""
Get the name of the metric.
:return: The name of the metric.
"""
return self._name
@property
def need_probabilities(self) -> bool:
"""
Return whether this metric expects 'y_pred' to be from the 'model.predict_proba' method or from 'model.predict'.
:return: True if probabilities are required and False if not.
"""
return self._need_probabilities
@property
def result(self) -> Union[float, None]:
"""
Get the metric result. If the metric was not calculated, None will be returned.
:return: The metric result.
"""
return self._result
def _get_default_name(self) -> str:
"""
Get the default name for this metric by the following rules:
* If metric is a function, the function's name.
* If metric is a callable object, the object's class name.
:return: The metric default name.
"""
# Function object have __name__ where classes instances have __class__.__name__:
name = getattr(self._metric, "__name__", None)
return name if name is not None else self._metric.__class__.__name__
def display(self, full: bool = False):
"""
Display the metric and its result.
:param full: Whether to print a full display of the metric, including the metric arguments. Default: False.
"""
result = self._result if self._result is not None else "?"
print(f"{self._name} = {result}")
if full:
print(f"Arguments: {json.dumps(self._arguments, indent=4)}")
def _repr_pretty_(self, p, cycle: bool):
"""
A pretty representation of the metric. Will be called by the IPython kernel. This method will call the metric
display method.
:param p: A RepresentationPrinter instance.
:param cycle: If a cycle is detected to prevent infinite loop.
"""
self.display()
@staticmethod
def METHOD_NAME(metric: str) -> Callable:
"""
Look for the metric by name in the globally imported objects. If the given metric is a full module path, it will
be imported from the path.
:param metric: The metric name or a full module path to import the metric.
:return: The imported metric function.
:raise MLRunInvalidArgumentError: If the metric name was not found within the global imports.
"""
# Check if the metric is inside a module path:
module = None # type: Union[ModuleType, str, None]
if "." in metric:
module, metric = metric.rsplit(".", 1)
# Look for the metric in the globals dictionary (it was imported before):
if metric in globals():
return globals()[metric]
# Import the metric from the given module:
if module is not None:
# Check if the module is already imported:
if module in sys.modules:
# It is already imported:
module = sys.modules[module]
else:
# Import the module:
module = importlib.import_module(module)
imported_metric = getattr(module, metric)
globals().update({metric: imported_metric})
return imported_metric
# Metric string was not provided properly:
raise mlrun.errors.MLRunInvalidArgumentError(
f"The metric {metric} was not found in the global imports dictionary meaning it was not "
f"imported. In order to import it during the run, please provide the full module path to the"
f"metric. For example: 'module.sub_module.metric' will be parsed as "
f"from module.sub_module import metric."
)
|
299,575 |
inp boxes
|
# Copyright (c) Alibaba, Inc. and its affiliates.
import copy
import random
import decord
import numpy as np
import torch
from detectron2.data.transforms import (ExtentTransform, RandomBrightness,
RandomFlip, ResizeShortestEdge)
from detectron2.structures import Boxes, Instances
from scipy.interpolate import interp1d
def METHOD_NAME(boxes: dict, start, end):
idxs = sorted([int(i) for i in boxes.keys()])
bbox = [boxes[str(i)] for i in idxs]
new_bboxes = []
for i in range(4):
f = interp1d(idxs, [b[i] for b in bbox])
new_b = f(list(range(start, end + 1)))
new_bboxes.append(new_b)
new_bboxes = np.stack(new_bboxes, axis=1)
return new_bboxes
def assign_label(start, end, data_dict):
"""
根据视频起始位置,以及标注的label,给这小段视频安排bbox检测标签
方法,取交集,交集占到样本的一半或者标签的一半,即将该label赋给样本
:param start: 起始帧号(含)
:param end: 结束帧号(含)
:param labels: 标注的label, 字符串形式
:return:[[行为,x1,y1,x2,y2],]
"""
if 'actions' not in data_dict:
return []
scale = data_dict['scale']
gt_labels = []
for action in data_dict['actions']:
low = max(int(action['start']), start)
high = min(int(action['end']), end)
inter = 0 if low > high else high - low
if inter > (end - start) * 0.7 or inter > (action['end']
- action['start']) * 0.7:
boxes = METHOD_NAME(action['boxes'], low, high)
box = boxes.mean(axis=0) / scale
label = [action['label']] + box.tolist()
gt_labels.append(label)
return gt_labels
class VideoDetMapper:
def __init__(self,
classes_id_map,
used_seconds=2,
input_frames=4,
is_train=True,
tile=False):
self.classes_id = classes_id_map
self.is_train = is_train
self.used_seconds = used_seconds
self.input_frames = input_frames
self.tile = tile
self.trans = [RandomBrightness(0.5, 1.5)]
self.tfm_gens = [
ResizeShortestEdge((480, 512, 544, 576, 608, 640, 672, 704, 736,
768) if is_train else 512,
1280 if is_train else 896, 'choice')
]
if is_train:
self.tfm_gens.append(RandomFlip())
def __call__(self, data_dict):
data_dict = copy.deepcopy(data_dict)
try:
data_dict = self._call(data_dict)
except Exception as e:
print(data_dict['path:FILE'], e)
data_dict = None
return data_dict
def _call(self, data_dict):
video_name = data_dict['path:FILE']
if data_dict['actions'] is not None:
data_dict['actions'] = eval(data_dict['actions'])
else:
data_dict['actions'] = []
v = decord.VideoReader(video_name, ctx=decord.cpu(0))
num_frames = len(v)
used_frames = max(int((1 + random.random()) * v.get_avg_fps()), 1)
if self.is_train:
start_idx = random.randint(0, max(0, num_frames - used_frames))
else:
start_idx = max(0, num_frames - used_frames) // 2
idxs = np.linspace(start_idx, min(start_idx + used_frames, num_frames) - 1, self.input_frames) \
.round().astype('int32').tolist()
imgs = v.get_batch(idxs).asnumpy()
del v
labels = assign_label(idxs[0], idxs[-1] + 1, data_dict)
bboxes = np.array([label[-4:] for label in labels])
if self.is_train:
if self.tile:
imgs, labels, bboxes = self.random_tile(
video_name, imgs, labels, bboxes, pos_choices=[1, 1, 2, 4])
else:
imgs, labels, bboxes = self.random_tile(
video_name, imgs, labels, bboxes, pos_choices=[1])
for g in self.trans:
tfm = g.get_transform(imgs)
imgs = tfm.apply_image(imgs)
imgs, bboxes = self.random_extent(imgs, bboxes)
for trans in self.tfm_gens:
tfm = trans.get_transform(imgs[0])
imgs = np.stack([tfm.apply_image(img) for img in imgs])
bboxes = tfm.apply_box(bboxes)
_, h, w, c = imgs.shape
data_dict['height'] = h
data_dict['width'] = w
gt_boxes = Boxes(torch.from_numpy(bboxes)) # XYXY_ABS
gt_classes = [self.classes_id[label[0]]
for label in labels] # N is background
instances = Instances((data_dict['height'], data_dict['width']))
instances.set('gt_boxes', gt_boxes)
instances.set('gt_classes',
torch.as_tensor(gt_classes, dtype=torch.int64))
data_dict['instances'] = instances
data_dict['frames'] = torch.as_tensor(
np.ascontiguousarray(imgs.transpose([3, 0, 1, 2])))
return data_dict
def random_tile(self, name, imgs, labels, bboxes,
pos_choices=(1, 1, 2, 4)):
_, h, w, c = imgs.shape
bboxes = bboxes.tolist()
if len(labels) == 0: # 负样本 1/2, 1, 2, 4
ratio = random.choice([0, 1, 2, 4])
if ratio == 0: # 随机取部分区域
h0, w0 = random.randint(0, h // 2), random.randint(0, w // 2)
imgs = imgs[:, h0:h0 + h // 2, w0:w0 + h // 2]
elif ratio == 2:
imgs = np.tile(imgs,
(1, 1, 2,
1)) if h > w else np.tile(imgs, (1, 2, 1, 1))
elif ratio == 4:
imgs = np.tile(imgs, (1, 2, 2, 1))
else: # 正样本 1, 2, 4
ratio = random.choice(pos_choices)
if ratio == 2:
labels = labels * 2
if h >= w: # 左右拼接
imgs = np.tile(imgs, (1, 1, 2, 1))
bbox2 = [[x1 + w, y1, x2 + w, y2]
for x1, y1, x2, y2 in bboxes]
else: # 上下拼接
imgs = np.tile(imgs, (1, 2, 1, 1))
bbox2 = [[x1, y1 + h, x2, y2 + h]
for x1, y1, x2, y2 in bboxes]
bboxes = bboxes + bbox2
elif ratio == 4:
labels = labels * 4
imgs = np.tile(imgs, (1, 2, 2, 1))
bbox2 = [[x1 + w, y1, x2 + w, y2] for x1, y1, x2, y2 in bboxes] + \
[[x1, y1 + h, x2, y2 + h] for x1, y1, x2, y2 in bboxes] + \
[[x1 + w, y1 + h, x2 + w, y2 + h] for x1, y1, x2, y2 in bboxes]
bboxes = bboxes + bbox2
bboxes = np.array(bboxes)
return imgs.copy(), labels, bboxes
def random_extent(self, imgs, bboxes):
t, h, w, c = imgs.shape
r_h, r_w = int(h * 0.1), int(w * 0.1)
x0, y0 = random.randint(-r_w, r_w), random.randint(-r_h, r_h)
x1, y1 = random.randint(w - r_w,
w + r_w), random.randint(h - r_h, h + r_h)
tfm = ExtentTransform((x0, y0, x1, y1), output_size=(y1 - y0, x1 - x0))
imgs = np.stack([tfm.apply_image(img) for img in imgs])
bboxes = tfm.apply_box(bboxes)
return imgs, bboxes
|
299,576 |
format header
|
import re
import torch._C as C
"""
PythonDispatcher class is a thin python-binding to C++ dispatcher and it
is designed to show how dispatcher precompute works. In particular,
it shows for a certain op `foo`, what the computed dispatch table looks
like after user register their kernels to certains dispatch keys.
In the real C++ dispatcher we support many dispatch keys for different
functionalities. For simplicity PythonDispatcher only supports dispatch
keys for a single example of each use case. These use cases are listed below:
- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
autograd kernel in pytorch core library.
E.g. CPU, CUDA
- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
inference kernels, but they share the same autograd kernel specified in AutogradOther.
E.g. FPGA, SparseCsrCPU
- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
kernel defined in pytorch core library. Backend owner is responsible for registering both
inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
E.g. XLA, XPU, MPS
- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
Kernels registered to this key MUST work for inference for all backends.
- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
Kernels registered to this key MUST work for autograd for all backends.
- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
Kernels registered to this key MUST work for both inference + autograd for all backends.
Note we only allow registrations to alias keys inside pytorch core library. E.g
you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
kernel from torch-xla extension, instead you should upstream the kernel into
pytorch/pytorch repo so that it's available for all backends and continuously
tested even without the extension.
Usage:
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
# For more debugging information
# print(dispatcher.keys())
# print(dispatcher.registrations())
# print(dispatcher.rawRegistrations())
# print(dispatcher.rawDispatchTable())
PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
This file only provides the simplified API for developers, relevant test code is located in
test/test_dispatch.py
"""
class PythonDispatcher:
namespace = "__test__"
name = "foo"
# fmt: off
runtime_keys = [
"CPU", "AutogradCPU",
"FPGA", "AutogradOther",
"XLA", "AutogradXLA",
"Lazy", "AutogradLazy",
]
# fmt: on
alias_keys = [
"CompositeExplicitAutograd",
"Autograd",
"CompositeImplicitAutograd",
]
supported_keys = runtime_keys + alias_keys
def __init__(self):
C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
self.ref.def_("foo(Tensor x) -> Tensor")
"""
Returns a list of dispatch keys supported by PythonDispatcher.
You can register kernels to these keys.
"""
def keys(self):
return self.supported_keys
"""
Register kernels to the target dispatchKeys.
dispatchKeys(list[str]): a list of dispatch keys that you want to register
your own kernel. Note that you don't need to write the kernel yourself in
this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
automatically generated and registered.
"""
def register(self, dispatchKeys):
# Overriden is not supported and triggers a warning in C++ dispatcher.
if len(set(dispatchKeys)) != len(dispatchKeys):
raise RuntimeError(
f"Overriden is not allowed but found duplicates in {dispatchKeys}."
)
# We currently forbid this in codegen instead of C++ dispatcher.
if (
"CompositeImplicitAutograd" in dispatchKeys
and "CompositeExplicitAutograd" in dispatchKeys
):
raise RuntimeError(
"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
)
for key in dispatchKeys:
if key not in self.supported_keys:
raise RuntimeError(
f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
)
self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
"""
Helper function to format (key, kernel).
"""
def _format_line(self, key, kernel):
return f"{key:<15} {kernel}\n"
"""
Helper function to print a table header.
"""
def METHOD_NAME(self, header):
s = f"""
{header}
"""
s += self._format_line("key", "kernel")
s += "---------------------------\n"
return s
"""
Returns raw output of all registration info for debugging only.
Use registrations() for a simplified version.
"""
def rawRegistrations(self):
return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
"""
Returns raw output of computed dispatch table for debugging only.
Use dispatchTable() for a simplified version.
"""
def rawDispatchTable(self):
return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
"""
Returns a table(str) including all the registrations from users.
Note this includes registrations to both runtime keys and alias keys.
"""
def registrations(self):
output = self.METHOD_NAME("Registered Kernels")
state = self.rawRegistrations()
state_entries = state.split("\n")
for line in state_entries:
first = line.split(":")[0]
if any(first.startswith(k) for k in self.supported_keys):
kernel = line.split("::")[0].split(" ")[1]
output += self._format_line(first, kernel)
return output
"""
Returns the computed dispatch table(str). Note this only include
runtime keys, registrations to alias keys have been decoded to their
mapped runtime keys.
"""
def dispatchTable(self):
output = self.METHOD_NAME("Computed Dispatch Table")
table = self.rawDispatchTable()
table_entries = table.split("\n")
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
for line in table_entries:
k = line.split(":")[0]
if k in self.runtime_keys:
entry = regex.sub("[", line)
output += self._format_line(k, entry.split(": ")[1])
return output
|
299,577 |
check value
|
# coding: utf-8
# Copyright (C) 1994-2021 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of both the OpenPBS software ("OpenPBS")
# and the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# OpenPBS is free software. You can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenPBS is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# PBS Pro is commercially licensed software that shares a common core with
# the OpenPBS software. For a copy of the commercial license terms and
# conditions, go to: (http://www.pbspro.com/agreement.html) or contact the
# Altair Legal Department.
#
# Altair's dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of OpenPBS and
# distribute them - whether embedded or bundled with other software -
# under a commercial license agreement.
#
# Use of Altair's trademarks, including but not limited to "PBS™",
# "OpenPBS®", "PBS Professional®", and "PBS Pro™" and Altair's logos is
# subject to Altair's trademark licensing policies.
import math
from math import sqrt
from ptl.utils.pbs_testsuite import *
import statistics
class TestPerformance(PBSTestSuite):
"""
Base test suite for Performance tests
"""
def METHOD_NAME(self, res):
if isinstance(res, list):
for val in res:
if not isinstance(val, (int, float)):
raise self.failureException(
"Test result list must be int or float")
else:
if not isinstance(res, (int, float)):
raise self.failureException("Test result must be int or float")
def perf_test_result(self, result, test_measure, unit):
"""
Add test results to json file. If a multiple trial values are passed
calculate mean,std_dev,min,max for the list.
"""
self.METHOD_NAME(result)
if isinstance(result, list) and len(result) > 1:
mean_res = statistics.mean(result)
stddev_res = statistics.stdev(result)
lowv = mean_res - (stddev_res * 2)
uppv = mean_res + (stddev_res * 2)
new_result = [x for x in result if x > lowv and x < uppv]
if len(new_result) == 0:
new_result = result
max_res = round(max(new_result), 2)
min_res = round(min(new_result), 2)
mean_res = statistics.mean(new_result)
mean_res = round(mean_res, 2)
trial_no = 1
trial_data = []
for trial_result in result:
trial_result = round(trial_result, 2)
trial_data.append(
{"trial_no": trial_no, "value": trial_result})
trial_no += 1
test_data = {"test_measure": test_measure,
"unit": unit,
"test_data": {"mean": mean_res,
"std_dev": stddev_res,
"minimum": min_res,
"maximum": max_res,
"trials": trial_data,
"samples_considered": len(new_result),
"total_samples": len(result)}}
return self.set_test_measurements(test_data)
else:
variance = 0
if isinstance(result, list):
result = result[0]
if isinstance(result, float):
result = round(result, 2)
testdic = {"test_measure": test_measure, "unit": unit,
"test_data": {"mean": result,
"std_dev": variance,
"minimum": result,
"maximum": result}}
return self.set_test_measurements(testdic)
pass
|
299,578 |
region
|
#!/usr/bin/env python
""" This program adds single detector hdf trigger files together.
"""
import numpy, argparse, h5py, logging
import pycbc.version
def changes(arr):
l = numpy.where(arr[:-1] != arr[1:])[0]
l = numpy.concatenate(([0], l+1, [len(arr)]))
return numpy.unique(l)
def collect(key, files):
data = []
for fname in files:
with h5py.File(fname, 'r') as fin:
if key in fin:
data += [fin[key][:]]
return numpy.concatenate(data)
def METHOD_NAME(f, key, boundaries, ids):
dset = f[key]
refs = []
for j in range(len(boundaries) - 1):
l, r = boundaries[ids[j]], boundaries[ids[j]+1]
refs.append(dset.regionref[l:r])
f.create_dataset(key+'_template', data=refs,
dtype=h5py.special_dtype(ref=h5py.RegionReference))
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version=pycbc.version.git_verbose_msg)
parser.add_argument('--trigger-files', nargs='+')
parser.add_argument('--output-file', required=True)
parser.add_argument('--bank-file', required=True)
parser.add_argument('--compression-level', type=int, default=6,
help='Set HDF compression level in the output file '
'(default 6)')
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.INFO)
f = h5py.File(args.output_file, 'w')
logging.info("getting the list of columns from a representative file")
trigger_columns = []
for fname in args.trigger_files:
try:
f2 = h5py.File(fname, 'r')
except IOError as e:
logging.error("Cannot open %s" % fname)
raise e
ifo = tuple(f2.keys())[0]
if len(f2[ifo].keys()) > 0:
k = f2[ifo].keys()
trigger_columns = list(f2[ifo].keys())
if not 'template_hash' in trigger_columns:
f2.close()
continue
trigger_columns.remove('search')
trigger_columns.remove('template_hash')
if 'gating' in trigger_columns:
trigger_columns.remove('gating')
f2.close()
break
f2.close()
for col in trigger_columns:
logging.info("trigger column: %s", col)
logging.info('reading the metadata from the files')
tpc = numpy.array([], dtype=numpy.float64)
frpc = numpy.array([], dtype=numpy.float64)
stf = numpy.array([], dtype=numpy.float64)
rtime = numpy.array([], dtype=numpy.float64)
starts = numpy.array([], dtype=numpy.float64)
ends = numpy.array([], dtype=numpy.float64)
gating = {}
for filename in args.trigger_files:
try:
data = h5py.File(filename, 'r')
except IOError as e:
logging.error('Cannot open %s', filename)
raise e
ifo_data = data[ifo]
starts = numpy.append(starts, ifo_data['search/start_time'][:])
ends = numpy.append(ends, ifo_data['search/end_time'][:])
if 'templates_per_core' in ifo_data['search'].keys():
tpc = numpy.append(tpc, ifo_data['search/templates_per_core'][:])
if 'filter_rate_per_core' in ifo_data['search'].keys():
frpc = numpy.append(frpc, ifo_data['search/filter_rate_per_core'][:])
if 'setup_time_fraction' in ifo_data['search'].keys():
stf = numpy.append(stf, ifo_data['search/setup_time_fraction'][:])
if 'run_time' in ifo_data['search'].keys():
rtime = numpy.append(rtime, ifo_data['search/run_time'][:])
if 'gating' in ifo_data:
gating_keys = []
ifo_data['gating'].visit(gating_keys.append)
for gk in gating_keys:
gk_data = ifo_data['gating/' + gk]
if isinstance(gk_data, h5py.Dataset):
if not gk in gating:
gating[gk] = numpy.array([], dtype=numpy.float64)
gating[gk] = numpy.append(gating[gk], gk_data[:])
data.close()
# store de-duplicated segments sorted by start time
starts, uindex = numpy.unique(starts, return_index=True)
ends = ends[uindex]
sort = starts.argsort()
f['%s/search/start_time' % ifo] = starts[sort]
f['%s/search/end_time' % ifo] = ends[sort]
if len(tpc) > 0:
f['%s/search/templates_per_core' % ifo] = tpc
if len(frpc) > 0:
f['%s/search/filter_rate_per_core' % ifo] = frpc
if len(stf) > 0:
f['%s/search/setup_time_fraction' % ifo] = stf
if len(rtime) > 0:
f['%s/search/run_time' % ifo] = rtime
for gk, gv in gating.items():
f[ifo + '/gating/' + gk] = gv
logging.info('set up sorting of triggers and template ids')
# For fast lookup we need the templates in hash order
hashes = h5py.File(args.bank_file, 'r')['template_hash'][:]
bank_tids = hashes.argsort()
unsort = bank_tids.argsort()
hashes = hashes[bank_tids]
trigger_hashes = collect('%s/template_hash' % ifo, args.trigger_files)
trigger_sort = trigger_hashes.argsort()
trigger_hashes = trigger_hashes[trigger_sort]
template_boundaries = changes(trigger_hashes)
template_ids = bank_tids[numpy.searchsorted(hashes, trigger_hashes[template_boundaries[:-1]])]
full_boundaries = numpy.searchsorted(trigger_hashes, hashes)
full_boundaries = numpy.concatenate([full_boundaries, [len(trigger_hashes)]])
# get the full boundaries in hash order
del trigger_hashes
idlen = (template_boundaries[1:] - template_boundaries[:-1])
f.create_dataset('%s/template_id' % ifo,
data=numpy.repeat(template_ids, idlen),
compression='gzip', shuffle=True,
compression_opts=args.compression_level)
f['%s/template_boundaries' % ifo] = full_boundaries[unsort]
logging.info('reading the trigger columns from the input files')
for col in trigger_columns:
key = '%s/%s' % (ifo, col)
logging.info('reading %s', col)
data = collect(key, args.trigger_files)[trigger_sort]
logging.info('writing %s to file', col)
dset = f.create_dataset(key, data=data, compression='gzip',
compression_opts=args.compression_level,
shuffle=True)
del data
METHOD_NAME(f, key, full_boundaries, unsort)
f.close()
logging.info('done')
|
299,579 |
heaviside with power
|
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ArcCosineFeatureMap(ModelLayer):
"""
A general version of the arc-cosine kernel feature map (s = 1 restores
the original arc-cosine kernel feature map).
Applies H(x) * x^s, where H is the Heaviside step function and x is the
input after applying FC (such that x = w * x_orig + b).
For more information, see the original paper:
http://cseweb.ucsd.edu/~saul/papers/nips09_kernel.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- degree to raise transformed features
scale -- amount to scale the standard deviation
weight_init -- initialization distribution for weight parameter
bias_init -- initialization distribution for bias pararmeter
weight_optim -- optimizer for weight params; None for random features
bias_optim -- optimizer for bias param; None for random features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale=1.0,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
initialize_output_schema=True,
name='arc_cosine_feature_map',
**kwargs):
super().__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% self.input_dims
if initialize_output_schema:
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
self.stddev = scale * np.sqrt(1.0 / self.input_dims)
# Initialize train_init_net parameters
# Random Parameters
if set_weight_as_global_constant:
w_init = np.random.normal(scale=self.stddev,
size=(self.output_dims, self.input_dims))
b_init = np.random.uniform(low=-0.5 * self.stddev,
high=0.5 * self.stddev,
size=self.output_dims)
self.random_w = self.model.add_global_constant(
name=self.name + "_fixed_rand_W",
array=w_init
)
self.random_b = self.model.add_global_constant(
name=self.name + "_fixed_rand_b",
array=b_init
)
else:
(self.random_w, self.random_b) = self._initialize_params(
'random_w',
'random_b',
w_init=weight_init,
b_init=bias_init,
w_optim=weight_optim,
b_optim=bias_optim
)
def _initialize_params(self, w_name, b_name, w_init=None, b_init=None,
w_optim=None, b_optim=None):
"""
Initializes the Layer Parameters for weight and bias terms for features
Inputs :
w_blob -- blob to contain w values
b_blob -- blob to contain b values
w_init -- initialization distribution for weight parameter
b_init -- initialization distribution for bias parameter
w_optim -- optimizer to use for w; if None, then will use no optimizer
b_optim -- optimizer to user for b; if None, then will use no optimizer
"""
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': self.stddev}
)
w_optim = w_optim if w_optim else self.model.NoOptim
b_init = b_init if b_init else (
'UniformFill', {'min': -0.5 * self.stddev, 'max': 0.5 * self.stddev}
)
b_optim = b_optim if b_optim else self.model.NoOptim
w_param = self.create_param(param_name=w_name,
shape=(self.output_dims, self.input_dims),
initializer=w_init,
optimizer=w_optim)
b_param = self.create_param(param_name=b_name,
shape=[self.output_dims],
initializer=b_init,
optimizer=b_optim)
return [w_param, b_param]
def METHOD_NAME(self, net, input_features, output_blob, s):
"""
Applies Heaviside step function and Relu / exponentiation to features
depending on the value of s.
Inputs:
net -- net with operators
input_features -- features to processes
output_blob -- output blob reference
s -- degree to raise the transformed features
"""
if s == 0:
softsign_features = net.Softsign([input_features],
net.NextScopedBlob('softsign'))
return net.Relu(softsign_features, output_blob)
elif s == 1:
return net.Relu([input_features],
output_blob)
else:
relu_features = net.Relu([input_features],
net.NextScopedBlob('relu_rand'))
pow_features = net.Pow([input_features],
net.NextScopedBlob('pow_rand'),
exponent=float(s - 1))
return net.Mul([relu_features, pow_features],
output_blob)
def add_ops(self, net):
input_blob = self.input_record.field_blobs()
# Random features: wx + b
random_features = net.FC(input_blob + [self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
# Process random features
self.METHOD_NAME(net,
random_features,
self.output_schema.field_blobs(),
self.s)
|
299,580 |
add name
|
# SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Tensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Tensor()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTensor(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Tensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Tensor
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def ShapeIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Tensor
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Tensor
def Buffer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Tensor
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def Quantization(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from tf2onnx.tflite.QuantizationParameters import QuantizationParameters
obj = QuantizationParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Tensor
def IsVariable(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Tensor
def Sparsity(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from tf2onnx.tflite.SparsityParameters import SparsityParameters
obj = SparsityParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Tensor
def ShapeSignature(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeSignatureAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeSignatureLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def ShapeSignatureIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
def Start(builder): builder.StartObject(8)
def TensorStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def TensorAddShape(builder, shape):
"""This method is deprecated. Please switch to AddShape."""
return AddShape(builder, shape)
def StartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorStartShapeVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartShapeVector(builder, numElems)
def AddType(builder, type): builder.PrependInt8Slot(1, type, 0)
def TensorAddType(builder, type):
"""This method is deprecated. Please switch to AddType."""
return AddType(builder, type)
def AddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0)
def TensorAddBuffer(builder, buffer):
"""This method is deprecated. Please switch to AddBuffer."""
return AddBuffer(builder, buffer)
def METHOD_NAME(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def TensorAddName(builder, name):
"""This method is deprecated. Please switch to AddName."""
return METHOD_NAME(builder, name)
def AddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
def TensorAddQuantization(builder, quantization):
"""This method is deprecated. Please switch to AddQuantization."""
return AddQuantization(builder, quantization)
def AddIsVariable(builder, isVariable): builder.PrependBoolSlot(5, isVariable, 0)
def TensorAddIsVariable(builder, isVariable):
"""This method is deprecated. Please switch to AddIsVariable."""
return AddIsVariable(builder, isVariable)
def AddSparsity(builder, sparsity): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0)
def TensorAddSparsity(builder, sparsity):
"""This method is deprecated. Please switch to AddSparsity."""
return AddSparsity(builder, sparsity)
def AddShapeSignature(builder, shapeSignature): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0)
def TensorAddShapeSignature(builder, shapeSignature):
"""This method is deprecated. Please switch to AddShapeSignature."""
return AddShapeSignature(builder, shapeSignature)
def StartShapeSignatureVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorStartShapeSignatureVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartShapeSignatureVector(builder, numElems)
def End(builder): return builder.EndObject()
def TensorEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder
|
299,581 |
get credentials
|
# Copyright Swiss Data Science Center (SDSC). A partnership between
# École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External dataset provider."""
from __future__ import annotations
import urllib
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
from renku.core import errors
from renku.core.dataset.providers.api import (
AddProviderInterface,
ProviderApi,
ProviderCredentials,
ProviderPriority,
StorageProviderInterface,
)
from renku.core.dataset.providers.common import get_metadata
from renku.core.dataset.providers.models import DatasetAddAction
from renku.core.interface.storage import IStorage
from renku.core.util.os import get_absolute_path
from renku.core.util.urls import get_scheme
from renku.domain_model.project_context import project_context
from renku.infrastructure.storage.factory import StorageFactory
if TYPE_CHECKING:
from renku.core.dataset.providers.models import DatasetAddMetadata
from renku.domain_model.dataset import Dataset
class ExternalProvider(ProviderApi, StorageProviderInterface, AddProviderInterface):
"""External provider for remote filesystem."""
priority = ProviderPriority.HIGHEST
name = "External"
is_remote = True
def __init__(self, uri: str):
super().__init__(uri=get_uri_absolute_path(uri).rstrip("/"))
@staticmethod
def supports(uri: str) -> bool:
"""External doesn't support any URI for addition. It's only for storage backends."""
return False
@staticmethod
def supports_storage(uri: str) -> bool:
"""Whether or not this provider supports a given URI storage."""
return get_scheme(uri) in ("file", "")
@property
def path(self) -> str:
"""Return External path."""
return self.uri
def get_metadata(
self, uri: str, destination: Path, dataset_add_action: DatasetAddAction = DatasetAddAction.NONE, **_
) -> List["DatasetAddMetadata"]:
"""Get metadata of files that will be added to a dataset."""
files = get_metadata(provider=self, uri=uri, destination=destination, dataset_add_action=dataset_add_action)
for file in files:
if file.url and not file.url.startswith("file:"):
file.url = f"file://{file.url}"
if file.based_on:
file.based_on.url = file.url
return files
def convert_to_storage_uri(self, uri: str) -> str:
"""Convert backend-specific URI to a URI that is usable by the IStorage implementation."""
return f"file://{get_uri_absolute_path(uri=uri)}"
def METHOD_NAME(self) -> "ExternalCredentials":
"""Return an instance of provider's credential class."""
return ExternalCredentials(provider=self)
def get_storage(self, credentials: Optional["ProviderCredentials"] = None) -> "IStorage":
"""Return the storage manager for the provider."""
external_configuration = {
"type": "local",
}
if not credentials:
credentials = self.METHOD_NAME()
return StorageFactory.get_storage(
storage_scheme="file",
provider=self,
credentials=credentials,
configuration=external_configuration,
)
def on_create(self, dataset: "Dataset") -> None:
"""Hook to perform provider-specific actions when creating a dataset."""
storage = self.get_storage(credentials=None)
# NOTE: The underlying rclone tool cannot tell if a directory within a External bucket exists or not
if not storage.exists(self.uri):
raise errors.ParameterError(f"External path '{self.path}' doesn't exists.")
project_context.repository.add_ignored_pattern(pattern=str(dataset.get_datadir()))
class ExternalCredentials(ProviderCredentials):
"""External-specific credentials."""
def __init__(self, provider: ExternalProvider):
super().__init__(provider=provider)
@staticmethod
def get_credentials_names() -> Tuple[str, ...]:
"""Return a tuple of the required credentials for a provider."""
return tuple()
@property
def provider(self) -> ExternalProvider:
"""Return the associated provider instance."""
return cast(ExternalProvider, self._provider)
def get_credentials_section_name(self) -> str:
"""Get section name for storing credentials.
NOTE: This methods should be overridden by subclasses to allow multiple credentials per providers if needed.
"""
return self.provider.uri
def get_uri_absolute_path(uri: str) -> str:
"""Return absolute path to the external directory without resolving symlinks.
Support formats are ``file://<path>``, file:<path> or just ``<path>``.
Args:
uri(str): URI to get path from.
Returns:
str: Expanded/non-expanded URI's absolute path.
"""
return get_absolute_path(urllib.parse.urlparse(uri).path, expand=True)
|
299,582 |
launch wizard generate depreciations
|
# Author(s): Silvio Gregorini ([email protected])
# Copyright 2019 Openforce Srls Unipersonale (www.openforce.it)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class Asset(models.Model):
_name = "asset.asset"
_description = "Assets"
_inherit = ["mail.thread", "mail.activity.mixin", "portal.mixin"]
_order = "purchase_date desc, name asc"
@api.model
def get_default_company_id(self):
return self.env.user.company_id
asset_accounting_info_ids = fields.One2many(
"asset.accounting.info", "asset_id", string="Accounting Info"
)
category_id = fields.Many2one(
"asset.category",
required=True,
string="Category",
)
code = fields.Char(
default="",
string="Code",
)
company_id = fields.Many2one(
"res.company",
default=get_default_company_id,
required=True,
string="Company",
tracking=True,
)
currency_id = fields.Many2one(
"res.currency",
required=True,
string="Currency",
)
customer_id = fields.Many2one("res.partner", string="Customer")
depreciation_ids = fields.One2many(
"asset.depreciation",
"asset_id",
string="Depreciations",
)
name = fields.Char(
required=True,
string="Name",
tracking=True,
)
purchase_amount = fields.Monetary(
string="Purchase Value",
tracking=True,
)
purchase_date = fields.Date(
default=fields.Date.today(),
string="Purchase Date",
tracking=True,
)
purchase_move_id = fields.Many2one("account.move", string="Purchase Move")
sale_amount = fields.Monetary(
string="Sale Value",
)
sale_date = fields.Date(string="Sale Date")
sale_move_id = fields.Many2one("account.move", string="Sale Move")
sold = fields.Boolean(string="Sold")
state = fields.Selection(
[
("non_depreciated", "Non Depreciated"),
("partially_depreciated", "Partially Depreciated"),
("totally_depreciated", "Depreciated"),
],
compute="_compute_state",
default="non_depreciated",
store=True,
string="State",
)
supplier_id = fields.Many2one("res.partner", string="Supplier")
supplier_ref = fields.Char(string="Supplier Ref.")
used = fields.Boolean(
string="Used",
)
@api.model
def create(self, vals):
# Add depreciation if it's missing while category is set
create_deps_from_categ = False
if vals.get("category_id") and not vals.get("depreciation_ids"):
create_deps_from_categ = True
if vals.get("code"):
vals["code"] = " ".join(vals.get("code").split())
asset = super().create(vals)
if create_deps_from_categ:
asset.onchange_category_id()
return asset
def write(self, vals):
if vals.get("code"):
vals["code"] = " ".join(vals.get("code").split())
return super().write(vals)
def unlink(self):
self.mapped("asset_accounting_info_ids").unlink()
self.mapped("depreciation_ids").unlink()
return super().unlink()
def name_get(self):
return [(asset.id, asset.make_name()) for asset in self]
@api.constrains("company_id")
def check_company(self):
for asset in self:
comp = asset.get_linked_aa_info_records().mapped("company_id")
if len(comp) > 1 or (comp and comp != asset.company_id):
raise ValidationError(
_(
"`{}`: cannot change asset's company once it's already"
" related to accounting info."
).format(asset.make_name())
)
@api.depends("depreciation_ids", "depreciation_ids.state")
def _compute_state(self):
for asset in self:
asset.state = asset.get_asset_state()
@api.onchange("category_id")
def onchange_category_id(self):
# Do not allow category changes if any depreciation line is already
# linked to an account move
if any(self.depreciation_ids.mapped("line_ids.move_id")):
raise ValidationError(
_(
"Cannot change category for an asset that's already been"
" depreciated."
)
)
if self.category_id:
# Remove depreciation lines
self.depreciation_ids = False
# Set new lines
vals = self.category_id.get_depreciation_vals(self.purchase_amount)
self.depreciation_ids = [(0, 0, v) for v in vals]
self.onchange_purchase_amount()
self.onchange_purchase_date()
@api.onchange("company_id")
def onchange_company_currency(self):
if self.company_id:
self.currency_id = self.company_id.currency_id
@api.onchange("purchase_amount")
def onchange_purchase_amount(self):
if self.purchase_amount:
for dep in self.depreciation_ids:
dep.amount_depreciable = self.purchase_amount * dep.base_coeff
if self.depreciation_ids.mapped("line_ids").filtered(
lambda l: l.move_type == "depreciated"
):
title = _("Warning!")
msg = _(
"Current asset has already been depreciated. Changes upon"
" its purchase value will not be automatically reflected"
" upon depreciation lines, which will have to be updated"
" manually."
)
return {"warning": {"title": title, "message": msg}}
@api.onchange("purchase_date")
def onchange_purchase_date(self):
if self.purchase_date:
for dep in self.depreciation_ids:
dep.date_start = self.purchase_date
def METHOD_NAME(self):
self.ensure_one()
xmlid = "assets_management.action_wizard_asset_generate_depreciation"
[act] = self.env.ref(xmlid).read()
ctx = dict(self._context)
ctx.update(
{
"default_asset_ids": [(6, 0, self.ids)],
"default_category_ids": [(6, 0, self.category_id.ids)],
"default_company_id": self.company_id.id,
"default_date": fields.Date.today(),
"default_type_ids": [
(6, 0, self.depreciation_ids.mapped("type_id").ids)
],
}
)
act["context"] = ctx
return act
def get_asset_state(self):
self.ensure_one()
if not self.depreciation_ids:
return "non_depreciated"
states = tuple(set(self.depreciation_ids.mapped("state")))
if not states:
return "non_depreciated"
elif len(states) == 1:
return states[0]
else:
return "partially_depreciated"
def get_linked_aa_info_records(self):
self.ensure_one()
return self.env["asset.accounting.info"].search(
[
"|",
("asset_id", "=", self.id),
("dep_line_id.asset_id", "=", self.id),
]
)
def make_name(self):
self.ensure_one()
name = self.name.strip()
if self.code:
return "[{}] {}".format(self.code.strip(), name)
return name
|
299,583 |
convert to store
|
from io import BytesIO
import pytest
from translate.convert import mozlang2po, test_convert
class TestLang2PO:
ConverterClass = mozlang2po.lang2po
def _convert(
self,
input_string,
template_string=None,
blank_msgstr=False,
duplicate_style="msgctxt",
encoding="utf-8",
success_expected=True,
):
"""Helper that converts to target format without using files."""
input_file = BytesIO(input_string.encode())
output_file = BytesIO()
template_file = None
if template_string:
template_file = BytesIO(template_string.encode())
expected_result = 1 if success_expected else 0
converter = self.ConverterClass(
input_file,
output_file,
template_file,
blank_msgstr,
duplicate_style,
encoding,
)
assert converter.run() == expected_result
return converter.target_store, output_file
def METHOD_NAME(self, *args, **kwargs):
"""Helper that converts to target format store without using files."""
return self._convert(*args, **kwargs)[0]
def _convert_to_string(self, *args, **kwargs):
"""Helper that converts to target format string without using files."""
return self._convert(*args, **kwargs)[1].getvalue().decode("utf-8")
@staticmethod
def _single_element(po_store):
"""Helper to check store has one non-header unit, and return it."""
assert len(po_store.units) == 2
assert po_store.units[0].isheader()
return po_store.units[1]
@staticmethod
def _count_elements(po_store):
"""Helper that counts the number of non-header units."""
assert po_store.units[0].isheader()
return len(po_store.units) - 1
def test_convert_empty(self):
"""Check converting empty file returns no output."""
assert self._convert_to_string("", success_expected=False) == ""
def test_simple_string(self):
"""Check that a simple lang string converts correctly."""
input_string = """;One
Een
"""
expected_output = """
msgid "One"
msgstr "Een"
"""
assert expected_output in self._convert_to_string(input_string)
def test_merge(self):
"""Check converter doesn't merge."""
with pytest.raises(NotImplementedError):
self.METHOD_NAME("this", "cannot be", "blank", success_expected=False)
def test_simple_entry(self):
"""Check that a simple lang entry converts properly to a po entry."""
input_string = """;One
Een
"""
target_store = self.METHOD_NAME(input_string)
target_unit = self._single_element(target_store)
assert target_unit.source == "One"
assert target_unit.target == "Een"
def test_simple_comment(self):
"""Check handling of simple comments."""
input_string = """# Comment
;One
Een
"""
target_store = self.METHOD_NAME(input_string)
target_unit = self._single_element(target_store)
assert target_unit.source == "One"
assert target_unit.target == "Een"
assert target_unit.getnotes() == "Comment"
def test_meta_tags(self):
"""Check meta tags are not extracted."""
input_string = """## tag
# Comment
;One
Een
"""
target_store = self.METHOD_NAME(input_string)
target_unit = self._single_element(target_store)
assert "tag" not in target_unit.getnotes()
def test_keep_duplicates(self):
"""Check converting keeps duplicates."""
input_string = """
;One
Un
;One
Dous
"""
target_store = self.METHOD_NAME(input_string, duplicate_style="msgctxt")
assert self._count_elements(target_store) == 2
assert target_store.units[1].source == "One"
assert target_store.units[1].target == "Un"
assert target_store.units[2].source == "One"
assert target_store.units[2].target == "Dous"
def test_drop_duplicates(self):
"""Check converting drops duplicates."""
input_string = """
;One
Un
;One
Dous
"""
target_store = self.METHOD_NAME(input_string, duplicate_style="merge")
assert self._count_elements(target_store) == 1
assert target_store.units[1].source == "One"
assert target_store.units[1].target == "Un"
class TestLang2POCommand(test_convert.TestConvertCommand, TestLang2PO):
"""Tests running actual lang2po commands on files"""
convertmodule = mozlang2po
defaultoptions = {"progress": "none"}
expected_options = [
"-P, --pot",
"--encoding=ENCODING",
"--duplicates=DUPLICATESTYLE",
]
|
299,584 |
filter dicts keys
|
import functools
from typing import Any
#
# Decorators
#
def filter_dicts_from_results(func: callable) -> callable:
""" Filter a list of dicts.
Based on a key-value pair within the dict. A single key-value pair can be
given through the argument `filter_` OR a list of key-value pairs can be
given through the argument `filters`. Note that if the key is not present
the key is ignored completely.
Parameters
----------
func : callable
The function that returns a list of dicts.
Returns
-------
callable
The function that returns a list of dicts, filtered by the specified
filters.
"""
@functools.wraps(func)
def wrapper_filter(
*args, filter_: tuple[Any, Any] = None,
filters: list[tuple[Any, Any]] = None, **kwargs
) -> list[dict]:
"""
Apply filters to the results of the function.
Parameters
----------
*args
Positional arguments for the function.
filter_ : tuple[Any, Any], optional
A single key-value pair to filter on, by default None
filters : list[tuple[Any, Any]], optional
A list of key-value pairs to filter on, by default None
Returns
-------
list[dict]
The filtered list of dicts.
"""
dicts = func(*args, **kwargs)
if filter_:
return filter_dicts_by_values(dicts, [filter_])
return filter_dicts_by_values(dicts, filters)
return wrapper_filter
def filter_keys_from_result(func: callable) -> callable:
""" Wrapper to filter key-value pairs from a dict.
Removes key-value pair based on the key from a dict. If the key is not
present in the dict it is ignored.
Parameters
----------
func : callable
The function that returns a dict.
Returns
-------
callable
The function that returns a dict, with only the specified keys kept.
"""
@functools.wraps(func)
def wrapper_filter(*args, field: Any = None, fields: list[Any] = None,
**kwargs) -> dict:
"""
Apply filters to the results of the function. If no filters are given,
the function returns the original dict.
Parameters
----------
*args
Positional arguments for the function.
field : Any, optional
A single key to filter the dictionary on, by default None
fields : list[Any], optional
A list of keys to filter the dictionary on, by default None
Returns
-------
dict
The filtered dictionary.
"""
dict_ = func(*args, **kwargs)
if field:
return filter_dict_keys(dict_, [field])
return filter_dict_keys(dict_, fields)
return wrapper_filter
def filter_keys_from_results(func: callable) -> callable:
""" Remove key-value pairs from a list of dicts.
Removes key-value pair of all dictornairies in the list. If the key is not
present in the dicts it is ignored.
Parameters
----------
func : callable
The function that returns a list of dicts.
Returns
-------
callable
The function that returns a list of dicts, with only the specified keys
"""
@functools.wraps(func)
def wrapper_filter(*args, field: Any = None, fields: list[Any] = None,
**kwargs) -> list[dict]:
"""
Apply filters to the results of the function. If no filters are given,
the function returns the list of dicts.
Parameters
----------
*args
Positional arguments for the function.
field : Any, optional
A single key to filter the dictionaries on, by default None
fields : list[Any], optional
A list of keys to filter the dictionary on, by default None
Returns
-------
list[dict]
The filtered list of dicts.
"""
dicts = func(*args, **kwargs)
if field:
return METHOD_NAME(dicts, [field])
return METHOD_NAME(dicts, fields)
return wrapper_filter
def post_filtering(iterable: bool = True) -> callable:
"""Decorator to add filtering of dictornaries from the result.
Depending on whether this is a list of or a single dictionairy, the
decorator adds the arguments field, fields, filter, filters.
This is a wrapper for the other decorators. Note that the order of
`filter_keys_from_results` and `filter_dicts_from_results` is important as
you do want to first select the dicts that you need and then filter their
keys. This way you can filter on key-value pairs that you do not want in
your output.
Parameters
----------
iterable : bool, optional
Whether the result is a list of dicts or a single dict, by default True
Returns
-------
callable
The original function with the added decorators that filter the output
of the function by specified fields and keys.
"""
def decorator(func):
if iterable:
@functools.wraps(func)
@filter_keys_from_results
@filter_dicts_from_results
def wrapper_filter(*args, **kwargs):
return func(*args, **kwargs)
else:
@functools.wraps(func)
@filter_keys_from_result
def wrapper_filter(*args, **kwargs):
return func(*args, **kwargs)
return wrapper_filter
return decorator
#
# Helper functions
#
def filter_dicts_on_values(
dicts: list[dict], filters: list[tuple[Any, Any]]
) -> list[dict]:
"""
Filter a list of dicts on the specified key-value pairs.
Parameters
----------
dicts : list[dict]
The list of dicts to filter.
filters : list[tuple[Any, Any]]
A list of key-value pairs to filter on.
Returns
-------
list[dict]
The filtered list of dicts.
"""
filtered_dicts = []
for dict_ in dicts:
if all([dict_[filter_[0]] == filter_[1] for filter_ in filters]):
filtered_dicts.append(dict_)
return filtered_dicts
def filter_dicts_by_values(
dicts: list[dict], filters: list[tuple[Any, Any]]
) -> list[dict]:
"""
Filter a list of dicts on the specified key-value pairs.
Parameters
----------
dicts : list[dict]
The list of dicts to filter.
filters : list[tuple[Any, Any]]
A list of key-value pairs to filter on.
Returns
-------
list[dict]
The filtered list of dicts.
"""
if filters:
return filter_dicts_on_values(dicts, filters)
return dicts
def METHOD_NAME(dicts: list[dict], keys: list[str]) -> list[dict]:
"""
Filter a list of dicts on the specified keys. If no keys are given, the
original list of dicts is returned.
Parameters
----------
dicts : list[dict]
The list of dicts to filter.
keys : list[str]
A list of keys to keep in the dictionaries
Returns
-------
list[dict]
The filtered list of dicts.
"""
if keys:
return [filter_dict_keys(adict, keys) for adict in dicts]
return dicts
def filter_dict_keys(dict_: dict, keys: list[str]) -> dict:
"""
Filter a dict on the specified keys. If no keys are given, the original
dict is returned.
Parameters
----------
dict_ : dict
The dict to filter.
keys : list[str]
A list of keys to keep in the dictionary
Returns
-------
dict
The filtered dict.
"""
return {k: dict_[k] for k in keys if k in dict_} if keys else dict_
|
299,585 |
latest k checkpoint
|
"""checkpoint manager """
import logging
import os
import stat
import numpy as np
import mindspore as ms
_logger = logging.getLogger(__name__)
class CheckpointManager:
"""
Manage checkpoint files according to ckpt_save_policy of checkpoint.
Args:
ckpt_save_policy (str): Checkpoint saving strategy. The optional values is None, "top_k" or "latest_k".
None means to save each checkpoint, top_k means to save K checkpoints with the highest accuracy,
and latest_k means saving the latest K checkpoint. Default: None.
"""
def __init__(self, ckpt_save_policy=None):
self._ckpoint_filelist = []
self.ckpt_save_policy = ckpt_save_policy
@property
def ckpoint_filelist(self):
"""Get all the related checkpoint files managed here."""
return self._ckpoint_filelist
@property
def ckpoint_num(self):
"""Get the number of the related checkpoint files managed here."""
return len(self._ckpoint_filelist)
def update_ckpoint_filelist(self, directory, prefix):
"""Update the checkpoint file list."""
self._ckpoint_filelist = []
files = os.listdir(directory)
for filename in files:
if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix + "-"):
mid_name = filename[len(prefix) : -5]
flag = not (True in [char.isalpha() for char in mid_name])
if flag:
self._ckpoint_filelist.append(os.path.join(directory, filename))
def remove_ckpoint_file(self, file_name):
"""Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
try:
os.chmod(file_name, stat.S_IWRITE)
os.remove(file_name)
except OSError:
_logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
except ValueError:
_logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
def remove_oldest_ckpoint_file(self):
"""Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
self.remove_ckpoint_file(ckpoint_files[0])
self._ckpoint_filelist.remove(ckpoint_files[0])
def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
"""Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
del_list = []
oldest_file = ""
oldest_time = cur_time
for ck_file in self._ckpoint_filelist:
modify_time = os.path.getmtime(ck_file)
if cur_time - modify_time < 60 * minutes:
del_list.append(ck_file)
if modify_time < oldest_time:
oldest_time = modify_time
oldest_file = ck_file
for mv_file in del_list:
if mv_file == oldest_file:
continue
self.remove_ckpoint_file(mv_file)
def top_K_checkpoint(self, network, K=10, metric=None, save_path=""):
"""Save and return Top K checkpoint address and accuracy."""
last_file = self._ckpoint_filelist[-1] if self._ckpoint_filelist else None
if isinstance(metric, ms.Tensor):
metric = metric.asnumpy()
if self.ckpoint_num < K or np.greater(metric, last_file[1]):
if self.ckpoint_num >= K:
delete = K - 1
if delete < 0 or self.ckpoint_num <= delete:
return
to_delete = self._ckpoint_filelist[delete:]
for d in to_delete:
self.remove_ckpoint_file(d[0])
self._ckpoint_filelist = self._ckpoint_filelist[:delete]
ms.save_checkpoint(network, save_path, async_save=True)
self._ckpoint_filelist.append((save_path, float(metric)))
self._ckpoint_filelist = sorted(self._ckpoint_filelist, key=lambda x: x[1], reverse=True)
def METHOD_NAME(self, network, K=10, save_path=""):
"""Save latest K checkpoint."""
if K and 0 < K <= self.ckpoint_num:
self.remove_oldest_ckpoint_file()
ms.save_checkpoint(network, save_path, async_save=True)
self._ckpoint_filelist.append(save_path)
def save_ckpoint(self, network, num_ckpt=10, metric=None, save_path=""):
"""Save checkpoint according to different save strategy."""
if self.ckpt_save_policy is None:
ms.save_checkpoint(network, save_path, async_save=True)
elif self.ckpt_save_policy == "top_k":
if metric is None:
raise ValueError(f"The expected 'metric' is not None, but got: {metric}.")
self.top_K_checkpoint(network, K=num_ckpt, metric=metric, save_path=save_path)
_logger.info(
"Top-k accuracy checkpoints:\n" + "\n".join(f"{ckpt}\t{acc}" for ckpt, acc in self._ckpoint_filelist)
)
return self._ckpoint_filelist
elif self.ckpt_save_policy == "latest_k":
self.METHOD_NAME(network, K=num_ckpt, save_path=save_path)
return self._ckpoint_filelist
else:
raise ValueError(
f"The expected 'ckpt_save_policy' is None, top_k or latest_k, but got: {self.ckpt_save_policy}."
)
|
299,586 |
ensure minimum size
|
# Authors: see git history
#
# Copyright (c) 2010 Authors
# Licensed under the GNU GPL version 3.0 or later. See the file LICENSE for details.
import logging
from copy import copy
import inkex
from shapely.geometry import LinearRing, MultiPolygon, Polygon
from shapely.ops import polygonize, unary_union
from ..elements import EmbroideryElement
from ..i18n import _
from ..svg import get_correction_transform
from ..svg.tags import SVG_PATH_TAG
from .base import InkstitchExtension
class BreakApart(InkstitchExtension):
'''
This will break apart fill areas into separate elements.
'''
def __init__(self, *args, **kwargs):
InkstitchExtension.__init__(self, *args, **kwargs)
self.arg_parser.add_argument("-m", "--method", type=int, default=1, dest="method")
self.minimum_size = 5
def effect(self): # noqa: C901
if not self.svg.selection:
inkex.errormsg(_("Please select one or more fill areas to break apart."))
return
elements = []
nodes = self.get_nodes()
for node in nodes:
if node.tag in SVG_PATH_TAG:
elements.append(EmbroideryElement(node))
for element in elements:
if not element.get_style("fill", "black"):
continue
# we don't want to touch valid elements
paths = element.flatten(element.parse_path())
try:
paths.sort(key=lambda point_list: Polygon(point_list).area, reverse=True)
polygon = MultiPolygon([(paths[0], paths[1:])])
if self.geom_is_valid(polygon) and Polygon(paths[-1]).area > self.minimum_size:
continue
except ValueError:
pass
polygons = self.break_apart_paths(paths)
if self.options.method == 1:
polygons = self.combine_overlapping_polygons(polygons)
polygons = self.recombine_polygons(polygons)
if polygons:
self.polygons_to_nodes(polygons, element)
def break_apart_paths(self, paths):
polygons = []
for path in paths:
if len(path) < 3:
continue
linearring = LinearRing(path)
if not linearring.is_simple:
linearring = unary_union(linearring)
for polygon in polygonize(linearring):
polygons.append(polygon)
else:
polygon = Polygon(path).buffer(0)
polygons.append(polygon)
return polygons
def combine_overlapping_polygons(self, polygons):
for polygon in polygons:
for other in polygons:
if polygon == other:
continue
if polygon.overlaps(other):
diff = polygon.symmetric_difference(other)
if diff.geom_type == 'MultiPolygon':
polygons.remove(other)
polygons.remove(polygon)
for p in diff.geoms:
polygons.append(p)
# it is possible, that a polygons overlap with multiple
# polygons, this means, we need to start all over again
polygons = self.combine_overlapping_polygons(polygons)
return polygons
return polygons
def geom_is_valid(self, geom):
# Don't complain about invalid shapes, we just want to know
logger = logging.getLogger('shapely.geos')
level = logger.level
logger.setLevel(logging.CRITICAL)
valid = geom.is_valid
logger.setLevel(level)
return valid
def METHOD_NAME(self, polygons, size):
for polygon in polygons:
if polygon.area < size:
polygons.remove(polygon)
return polygons
def recombine_polygons(self, polygons):
polygons.sort(key=lambda polygon: polygon.area, reverse=True)
multipolygons = []
holes = []
self.METHOD_NAME(polygons, self.minimum_size)
for polygon in polygons:
if polygon in holes:
continue
polygon_list = [polygon]
for other in polygons:
if polygon == other:
continue
if polygon.contains(other) and other not in holes:
if any(p.contains(other) or p.intersects(other) for p in polygon_list[1:]):
continue
holes.append(other)
# if possible let's make the hole a tiny little bit smaller, just in case, it hits the edge
# and would lead therefore to an invalid shape
o = other.buffer(-0.01)
if not o.is_empty and o.geom_type == 'Polygon':
other = o
polygon_list.append(other)
multipolygons.append(polygon_list)
return multipolygons
def polygons_to_nodes(self, polygon_list, element):
# reverse the list of polygons, we don't want to cover smaller shapes
polygon_list = polygon_list[::-1]
index = element.node.getparent().index(element.node)
for polygons in polygon_list:
if polygons[0].area < 5:
continue
el = copy(element.node)
# Set fill-rule to evenodd
style = el.get('style', ' ').split(';')
style = [s for s in style if not s.startswith('fill-rule')]
style.append('fill-rule:evenodd;')
style = ';'.join(style)
el.set('style', style)
# update element id
if len(polygon_list) > 1:
node_id = self.uniqueId(el.get('id') + '_')
el.set('id', node_id)
# Set path
d = ""
for polygon in polygons:
d += "M"
for x, y in polygon.exterior.coords:
d += "%s,%s " % (x, y)
d += " "
d += "Z"
el.set('d', d)
el.set('transform', get_correction_transform(element.node))
element.node.getparent().insert(index, el)
element.node.getparent().remove(element.node)
|
299,587 |
count binary1s
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009 Robert Webb and Luis Pedro Coelho <[email protected]>
# Copyright (C) 2011-2013 Luis Pedro Coelho <[email protected]>
#
# License: MIT (see COPYING file)
import numpy as np
from ..histogram import fullhistogram
__all__ = [
'lbp',
'lbp_transform',
]
def lbp_transform(image, radius, points, ignore_zeros=False, preserve_shape=True):
'''
transformed = lbp(image, radius, points, ignore_zeros=False, preserve_shape=True)
Compute Linear Binary Pattern Transform
The return value are the transformed pixel values **histogram** of feature counts, where position ``i``
corresponds to the number of pixels that had code ``i``. The codes are
compressed so that impossible codes are not used. Therefore, this is the
``i``th feature, not just the feature with binary code ``i``.
Parameters
----------
image : ndarray
input image (2-D numpy ndarray)
radius : number (integer or floating point)
radius (in pixels)
points : integer
nr of points to consider
ignore_zeros : boolean, optional
whether to ignore zeros. Note that if you set this to ``True``, you
will need to set ``preserve_shape`` to False. (default: False)
preserve_shape : boolean, optional
whether to return an array with the same shape as ``image``. (default:
True)
Returns
-------
features : 1-D numpy ndarray
histogram of features. See above for a caveat on the interpretation of
these.
References
----------
Gray Scale and Rotation Invariant Texture Classification with Local Binary Patterns
Ojala, T. Pietikainen, M. Maenpaa, T. Lecture Notes in Computer Science (Springer)
2000, ISSU 1842, pages 404-420
'''
from ..interpolate import shift
from mahotas.features import _lbp
if ignore_zeros and preserve_shape:
raise ValueError('mahotas.features.lbp_transform: *ignore_zeros* and *preserve_shape* cannot both be used together')
image = np.asanyarray(image, dtype=np.float64)
if image.ndim != 2:
raise ValueError('mahotas.features.lbp_transform: This function is only defined for two dimensional images')
if ignore_zeros:
Y,X = np.nonzero(image)
def select(im):
return im[Y,X].ravel()
else:
select = np.ravel
pixels = select(image)
angles = np.linspace(0, 2*np.pi, points+1)[:-1]
data = []
for dy,dx in zip(np.sin(angles), np.cos(angles)):
data.append(
select(shift(image, [radius*dy,radius*dx], order=1)))
data = np.array(data)
codes = (data > pixels).astype(np.int32)
codes *= (2**np.arange(points)[:,np.newaxis])
codes = codes.sum(0)
codes = _lbp.map(codes.astype(np.uint32), points)
if preserve_shape:
codes = codes.reshape(image.shape)
return codes
def METHOD_NAME(array):
'''
one_count = count_binary1s(array)
Count the number of 1s in the binary representation of integer values
Definition::
one_count.flat[i] == nr_of_1s_in_binary_representation_of(array.flat[i])
Parameters
----------
array : ndarray
input array
Returns
-------
one_count : ndarray
output array of same type & shape as array
'''
from ..internal import _verify_is_integer_type
array = np.array(array)
_verify_is_integer_type(array, 'mahotas.features.lbp.count_binary1s')
maxv = 1+int(np.log2(1+array.max()))
counts = np.zeros_like(array)
for _ in range(maxv):
counts += (array & 1)
array >>= 1
return counts
def lbp(image, radius, points, ignore_zeros=False):
'''
features = lbp(image, radius, points, ignore_zeros=False)
Compute Linear Binary Patterns
The return value is a **histogram** of feature counts, where position ``i``
corresponds to the number of pixels that had code ``i``. The codes are
compressed so that impossible codes are not used. Therefore, this is the
``i``th feature, not just the feature with binary code ``i``.
Parameters
----------
image : ndarray
input image (2-D numpy ndarray)
radius : number (integer or floating point)
radius (in pixels)
points : integer
nr of points to consider
ignore_zeros : boolean, optional
whether to ignore zeros (default: False)
Returns
-------
features : 1-D numpy ndarray
histogram of features. See above for a caveat on the interpretation of
these.
References
----------
Gray Scale and Rotation Invariant Texture Classification with Local Binary Patterns
Ojala, T. Pietikainen, M. Maenpaa, T. Lecture Notes in Computer Science (Springer)
2000, ISSU 1842, pages 404-420
'''
from mahotas.features import _lbp
codes = lbp_transform(image, radius, points, ignore_zeros=ignore_zeros, preserve_shape=False)
final = fullhistogram(codes.astype(np.uint32))
codes = np.arange(2**points, dtype=np.uint32)
iters = codes.copy()
codes = _lbp.map(codes.astype(np.uint32), points)
pivots = (codes == iters)
npivots = np.sum(pivots)
compressed = final[pivots[:len(final)]]
compressed = np.append(compressed, np.zeros(npivots - len(compressed)))
return compressed
def lbp_names(radius, points):
'''Return list of names (string) for LBP features
Parameters
----------
radius : number (integer or floating point)
radius (in pixels)
points : integer
nr of points to consider
Returns
-------
names : list of str
See Also
--------
lbp : function
Compute LBP features
'''
from mahotas.features import _lbp
codes = np.arange(2**points, dtype=np.uint32)
iters = codes.copy()
codes = _lbp.map(codes.astype(np.uint32), points)
pivots = (codes == iters)
npivots = np.sum(pivots)
return ['lbp_r{}_p{}_{}'.format(radius, points, i) for i in range(npivots)]
|
299,588 |
test list diff
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.listdiff_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
_TYPES = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
]
class ListDiffTest(test.TestCase):
def METHOD_NAME(self, x, y, out, idx):
for dtype in _TYPES:
if dtype == dtypes.string:
x = [compat.as_bytes(str(a)) for a in x]
y = [compat.as_bytes(str(a)) for a in y]
out = [compat.as_bytes(str(a)) for a in out]
for diff_func in [array_ops.setdiff1d]:
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session() as sess:
x_tensor = ops.convert_to_tensor(x, dtype=dtype)
y_tensor = ops.convert_to_tensor(y, dtype=dtype)
out_tensor, idx_tensor = diff_func(x_tensor, y_tensor,
index_dtype=index_dtype)
tf_out, tf_idx = self.evaluate([out_tensor, idx_tensor])
self.assertAllEqual(tf_out, out)
self.assertAllEqual(tf_idx, idx)
self.assertEqual(1, out_tensor.get_shape().ndims)
self.assertEqual(1, idx_tensor.get_shape().ndims)
def testBasic1(self):
x = [1, 2, 3, 4]
y = [1, 2]
out = [3, 4]
idx = [2, 3]
self.METHOD_NAME(x, y, out, idx)
def testBasic2(self):
x = [1, 2, 3, 4]
y = [2]
out = [1, 3, 4]
idx = [0, 2, 3]
self.METHOD_NAME(x, y, out, idx)
def testBasic3(self):
x = [1, 4, 3, 2]
y = [4, 2]
out = [1, 3]
idx = [0, 2]
self.METHOD_NAME(x, y, out, idx)
def testDuplicates(self):
x = [1, 2, 4, 3, 2, 3, 3, 1]
y = [4, 2]
out = [1, 3, 3, 3, 1]
idx = [0, 3, 5, 6, 7]
self.METHOD_NAME(x, y, out, idx)
def testRandom(self):
num_random_tests = 10
int_low = -7
int_high = 8
max_size = 50
for _ in xrange(num_random_tests):
x_size = np.random.randint(max_size + 1)
x = np.random.randint(int_low, int_high, size=x_size)
y_size = np.random.randint(max_size + 1)
y = np.random.randint(int_low, int_high, size=y_size)
out_idx = [(entry, pos) for pos, entry in enumerate(x) if entry not in y]
if out_idx:
out, idx = map(list, zip(*out_idx))
else:
out = []
idx = []
self.METHOD_NAME(list(x), list(y), out, idx)
def testFullyOverlapping(self):
x = [1, 2, 3, 4]
y = [1, 2, 3, 4]
out = []
idx = []
self.METHOD_NAME(x, y, out, idx)
def testNonOverlapping(self):
x = [1, 2, 3, 4]
y = [5, 6]
out = x
idx = np.arange(len(x))
self.METHOD_NAME(x, y, out, idx)
def testEmptyX(self):
x = []
y = [1, 2]
out = []
idx = []
self.METHOD_NAME(x, y, out, idx)
def testEmptyY(self):
x = [1, 2, 3, 4]
y = []
out = x
idx = np.arange(len(x))
self.METHOD_NAME(x, y, out, idx)
def testEmptyXY(self):
x = []
y = []
out = []
idx = []
self.METHOD_NAME(x, y, out, idx)
if __name__ == "__main__":
test.main()
|
299,589 |
test implicit head
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.views
~~~~~~~~~~~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import flask.views
import unittest
from flask.testsuite import FlaskTestCase
from werkzeug.http import parse_set_header
class ViewTestCase(FlaskTestCase):
def common_test(self, app):
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
self.assert_equal(c.post('/').data, b'POST')
self.assert_equal(c.put('/').status_code, 405)
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_basic_view(self):
app = flask.Flask(__name__)
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_method_based_view(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_view_patching(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
view = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
self.common_test(app)
def test_view_inheritance(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
c = app.test_client()
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST'])
def test_view_decorators(self):
app = flask.Flask(__name__)
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.headers['X-Parachute'], 'awesome')
self.assert_equal(rv.data, b'Awesome')
def METHOD_NAME(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Blub')
self.assert_equal(rv.headers['X-Method'], 'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_explicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_endpoint_override(self):
app = flask.Flask(__name__)
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with self.assert_raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
self.common_test(app)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ViewTestCase))
return suite
|
299,590 |
test report state of the web json
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from server import app, talisman
import pytest
# Create test client without https redirect
# (normally taken care of by running in debug)
@pytest.fixture
def client():
with app.test_client() as client:
talisman.force_https = False
yield client
# Add a function to test routes with optional location
def assert_route(client, path, status, location=None):
response = client.get(path)
redirect_loc = response.location
if redirect_loc:
redirect_loc = redirect_loc.replace("http://localhost", "")
if location is not None:
assert response.status_code == status and redirect_loc == location
else:
assert response.status_code == status
def test_index(client):
assert_route(client, "/", 200)
def test_reports(client):
assert_route(client, "/reports", 200)
def test_reports_with_slash(client):
assert_route(client, "/reports/", 301, "/reports")
def test_report_state_of_the_web(client):
assert_route(client, "/reports/state-of-the-web", 200)
def test_report_state_of_the_web_with_slash(client):
assert_route(client, "/reports/state-of-the-web/", 301, "/reports/state-of-the-web")
def test_reports_www(client):
assert_route(
client,
"https://www.httparchive.org/reports",
301,
"https://httparchive.org/reports",
)
def test_reports_beta(client):
assert_route(
client,
"https://beta.httparchive.org/reports",
301,
"https://httparchive.org/reports",
)
def test_reports_legacy(client):
assert_route(
client,
"https://legacy.httparchive.org/reports",
301,
"https://httparchive.org/reports",
)
def test_report_state_of_the_web_lens(client):
response = client.get("/reports/state-of-the-web?lens=top1k")
assert (
response.status_code == 200
and '<option value="top1k" selected>' in response.get_data(as_text=True)
)
def test_reports_json(client):
response = client.get("/reports?f=json")
assert response.status_code == 200 and "application/json" in response.headers.get(
"Content-Type"
)
def METHOD_NAME(client):
response = client.get("/reports/state-of-the-web?f=json")
assert response.status_code == 200 and "application/json" in response.headers.get(
"Content-Type"
)
def test_invalid_report(client):
assert_route(client, "/reports/test", 404)
def test_report_invalid_start_date(client):
assert_route(
client, "/reports/state-of-the-web?start=1900_05_15&end=latest&view=list", 400
)
def test_report_invalid_end_date(client):
assert_route(
client, "/reports/state-of-the-web?start=earliest&end=1900_05_15&view=list", 400
)
def test_report_crux_max_date(client):
assert_route(client, "/reports/chrome-ux-report", 200)
def test_report_latest(client):
assert_route(client, "/reports/state-of-the-web?end=latest&view=list", 200)
def test_report_earliest(client):
assert_route(client, "/reports/state-of-the-web?start=earliest&view=list", 200)
def test_report_earliest_end(client):
assert_route(
client, "/reports/state-of-the-web?start=earliest&end=earliest&view=list", 200
)
def test_about(client):
assert_route(client, "/about", 200)
def test_about_with_slash(client):
assert_route(client, "/about/", 301, "/about")
def test_faq(client):
assert_route(client, "/faq", 200)
def test_faq_with_slash(client):
assert_route(client, "/faq/", 301, "/faq")
def test_faq_legacy(client):
assert_route(
client,
"/downloads.php",
301,
"/faq#how-do-i-use-bigquery-to-write-custom-queries-over-the-data",
)
def test_legacy_page(client):
assert_route(client, "/index.php", 301, "/")
def test_robots_txt(client):
response = client.get("/robots.txt")
assert response.status_code == 200 and "text/plain" in response.headers.get(
"Content-Type"
)
def test_sitemap(client):
response = client.get("/sitemap.xml")
assert response.status_code == 200 and "text/xml" in response.headers.get(
"Content-Type"
)
def test_favicon(client):
response = client.get("/favicon.ico")
# Note flask sometimes returns image/x-icon and sometimes image/vnd.microsoft.icon
assert response.status_code == 200 and "image/" in response.headers.get(
"Content-Type"
)
def test_metric(client):
response = client.get("/metric.json")
assert response.status_code == 200 and "id parameter required" in response.get_data(
as_text=True
)
def test_metric_speedindex(client):
response = client.get("/metric.json?id=speedIndex")
assert (
response.status_code == 200
and '"description":"How quickly the contents of a page'
in response.get_data(as_text=True)
)
def test_external_report(client):
assert_route(
client,
"/reports/cwv-tech",
302,
"https://datastudio.google.com/u/0/reporting/55bc8fad-44c2-4280-aa0b-5f3f0cd3d2be/page/M6ZPC",
)
def test_render_efonts_cache_control(client):
response = client.get("/static/fonts/opensans-latin-700.woff2")
assert response.status_code == 200 and "max-age=3153600" in response.headers.get(
"Cache-Control"
)
def test_render_js_cache_control(client):
response = client.get("/static/js/main.js")
assert response.status_code == 200 and "max-age=10800" in response.headers.get(
"Cache-Control"
)
|
299,591 |
test export csv
|
"""Unit testing for BOM export functionality."""
import csv
from django.urls import reverse
from InvenTree.unit_test import InvenTreeTestCase
class BomExportTest(InvenTreeTestCase):
"""Class for performing unit testing of BOM export functionality"""
fixtures = [
'category',
'part',
'location',
'bom',
]
roles = 'all'
def setUp(self):
"""Perform test setup functions"""
super().setUp()
self.url = reverse('api-bom-download', kwargs={'pk': 100})
def test_bom_template(self):
"""Test that the BOM template can be downloaded from the server."""
url = reverse('api-bom-upload-template')
# Download an XLS template
response = self.client.get(url, data={'format': 'xls'})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.headers['Content-Disposition'],
'attachment; filename="InvenTree_BOM_Template.xls"'
)
# Return a simple CSV template
response = self.client.get(url, data={'format': 'csv'})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.headers['Content-Disposition'],
'attachment; filename="InvenTree_BOM_Template.csv"'
)
filename = '_tmp.csv'
with open(filename, 'wb') as f:
f.write(response.getvalue())
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for line in reader:
headers = line
break
expected = [
'Part ID',
'Part IPN',
'Quantity',
'Reference',
'Note',
'optional',
'overage',
'inherited',
'allow_variants',
]
# Ensure all the expected headers are in the provided file
for header in expected:
self.assertIn(header, headers)
def METHOD_NAME(self):
"""Test BOM download in CSV format."""
params = {
'format': 'csv',
'cascade': True,
'parameter_data': True,
'stock_data': True,
'supplier_data': True,
'manufacturer_data': True,
}
response = self.client.get(self.url, data=params)
self.assertEqual(response.status_code, 200)
content = response.headers['Content-Disposition']
self.assertEqual(content, 'attachment; filename="BOB | Bob | A2_BOM.csv"')
filename = '_tmp.csv'
with open(filename, 'wb') as f:
f.write(response.getvalue())
# Read the file
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for line in reader:
headers = line
break
expected = [
'BOM Level',
'BOM Item ID',
'Parent ID',
'Parent IPN',
'Parent Name',
'Part ID',
'Part IPN',
'Part Name',
'Description',
'Assembly',
'Quantity',
'optional',
'consumable',
'overage',
'Reference',
'Note',
'inherited',
'allow_variants',
'Default Location',
'Total Stock',
'Available Stock',
'On Order',
]
for header in expected:
self.assertIn(header, headers)
for header in headers:
self.assertIn(header, expected)
def test_export_xls(self):
"""Test BOM download in XLS format."""
params = {
'format': 'xls',
'cascade': True,
'parameter_data': True,
'stock_data': True,
'supplier_data': True,
'manufacturer_data': True,
}
response = self.client.get(self.url, data=params)
self.assertEqual(response.status_code, 200)
content = response.headers['Content-Disposition']
self.assertEqual(content, 'attachment; filename="BOB | Bob | A2_BOM.xls"')
def test_export_xlsx(self):
"""Test BOM download in XLSX format."""
params = {
'format': 'xlsx',
'cascade': True,
'parameter_data': True,
'stock_data': True,
'supplier_data': True,
'manufacturer_data': True,
}
response = self.client.get(self.url, data=params)
self.assertEqual(response.status_code, 200)
def test_export_json(self):
"""Test BOM download in JSON format."""
params = {
'format': 'json',
'cascade': True,
'parameter_data': True,
'stock_data': True,
'supplier_data': True,
'manufacturer_data': True,
}
response = self.client.get(self.url, data=params)
self.assertEqual(response.status_code, 200)
content = response.headers['Content-Disposition']
self.assertEqual(content, 'attachment; filename="BOB | Bob | A2_BOM.json"')
|
299,592 |
ago text
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for use by the uploader command line tool."""
import datetime
import errno
import os
import os.path
import time
class RateLimiter:
"""Helper class for rate-limiting using a fixed minimum interval."""
def __init__(self, interval_secs):
"""Constructs a RateLimiter that permits a tick() every
`interval_secs`."""
self._time = time # Use property for ease of testing.
self._interval_secs = interval_secs
self._last_called_secs = 0
def tick(self):
"""Blocks until it has been at least `interval_secs` since last
tick()."""
wait_secs = (
self._last_called_secs + self._interval_secs - self._time.time()
)
if wait_secs > 0:
self._time.sleep(wait_secs)
self._last_called_secs = self._time.time()
def get_user_config_directory():
"""Returns a platform-specific root directory for user config settings."""
# On Windows, prefer %LOCALAPPDATA%, then %APPDATA%, since we can expect the
# AppData directories to be ACLed to be visible only to the user and admin
# users (https://stackoverflow.com/a/7617601/1179226). If neither is set,
# return None instead of falling back to something that may be world-readable.
if os.name == "nt":
appdata = os.getenv("LOCALAPPDATA")
if appdata:
return appdata
appdata = os.getenv("APPDATA")
if appdata:
return appdata
return None
# On non-windows, use XDG_CONFIG_HOME if set, else default to ~/.config.
xdg_config_home = os.getenv("XDG_CONFIG_HOME")
if xdg_config_home:
return xdg_config_home
return os.path.join(os.path.expanduser("~"), ".config")
def make_file_with_directories(path, private=False):
"""Creates a file and its containing directories, if they don't already
exist.
If `private` is True, the file will be made private (readable only by the
current user) and so will the leaf directory. Pre-existing contents of the
file are not modified.
Passing `private=True` is not supported on Windows because it doesn't support
the relevant parts of `os.chmod()`.
Args:
path: str, The path of the file to create.
private: boolean, Whether to make the file and leaf directory readable only
by the current user.
Raises:
RuntimeError: If called on Windows with `private` set to True.
"""
if private and os.name == "nt":
raise RuntimeError("Creating private file not supported on Windows")
try:
path = os.path.realpath(path)
leaf_dir = os.path.dirname(path)
try:
os.makedirs(leaf_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if private:
os.chmod(leaf_dir, 0o700)
open(path, "a").close()
if private:
os.chmod(path, 0o600)
except EnvironmentError as e:
raise RuntimeError("Failed to create file %s: %s" % (path, e))
def set_timestamp(pb, seconds_since_epoch):
"""Sets a `Timestamp` proto message to a floating point UNIX time.
This is like `pb.FromNanoseconds(int(seconds_since_epoch * 1e9))` but
without introducing floating-point error.
Args:
pb: A `google.protobuf.Timestamp` message to mutate.
seconds_since_epoch: A `float`, as returned by `time.time`.
"""
pb.seconds = int(seconds_since_epoch)
pb.nanos = int(round((seconds_since_epoch % 1) * 10**9))
def format_time(timestamp_pb, now=None):
"""Converts a `timestamp_pb2.Timestamp` to human-readable string.
This always includes the absolute date and time, and for recent dates
may include a relative time like "(just now)" or "(2 hours ago)". It
should thus be used for ephemeral values. Use `format_time_absolute`
if the output will be persisted.
Args:
timestamp_pb: A `google.protobuf.timestamp_pb2.Timestamp` value to
convert to string. The input will not be modified.
now: A `datetime.datetime` object representing the current time,
used for determining relative times like "just now". Optional;
defaults to `datetime.datetime.now()`.
Returns:
A string suitable for human consumption.
"""
# Add and subtract a day for <https://bugs.python.org/issue29097>,
# which breaks early datetime conversions on Windows for small
# timestamps.
dt = datetime.datetime.fromtimestamp(timestamp_pb.seconds + 86400)
dt = dt - datetime.timedelta(seconds=86400)
if now is None:
now = datetime.datetime.now()
ago = now.replace(microsecond=0) - dt
def METHOD_NAME(n, singular, plural):
return "%d %s ago" % (n, singular if n == 1 else plural)
relative = None
if ago < datetime.timedelta(seconds=5):
relative = "just now"
elif ago < datetime.timedelta(minutes=1):
relative = METHOD_NAME(int(ago.total_seconds()), "second", "seconds")
elif ago < datetime.timedelta(hours=1):
relative = METHOD_NAME(int(ago.total_seconds()) // 60, "minute", "minutes")
elif ago < datetime.timedelta(days=1):
relative = METHOD_NAME(int(ago.total_seconds()) // 3600, "hour", "hours")
relative_part = " (%s)" % relative if relative is not None else ""
return str(dt) + relative_part
def format_time_absolute(timestamp_pb):
"""Converts a `timestamp_pb2.Timestamp` to UTC time string.
This will always be of the form "2001-02-03T04:05:06Z".
Args:
timestamp_pb: A `google.protobuf.timestamp_pb2.Timestamp` value to
convert to string. The input will not be modified.
Returns:
An RFC 3339 date-time string.
"""
dt = datetime.datetime.fromtimestamp(
timestamp_pb.seconds, tz=datetime.timezone.utc
)
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def _ngettext(n, singular, plural):
return "%d %s ago" % (n, singular if n == 1 else plural)
|
299,593 |
run
|
"""
Grading plugins for Otter
"""
import importlib
from .abstract_plugin import AbstractOtterPlugin, PluginEventNotSupportedException
from ..utils import print_full_width
class PluginCollection:
"""
Class for loading, organizing, and running plugins during grading. This class is instantiated with
a list of plugin names, which should be importable strings that evaluate to objects that inherit
from ``otter.plugins.AbstractOtterPlugin``.
When this class is instantiated, each plugin is imported and passed its configurations specified
in the ``otter_config.json``. Plugins should be listed in ``otter_config.json`` in the ``plugins``
key:
.. code-block:: json
{
"plugins": [
"some_otter_plugin_package.SomeOtterPlugin",
{
"some_other_otter_plugin_package.SomeOtherOtterPlugin": {
"some_key": "some_value"
}
}
]
}
Args:
plugin_names (``list[Union[str,dict[str:Any]]]``): the importable names of plugin classes (e.g.
``some_package.SomePlugin``) and their configurations
submission_path (``str``): the absolute path to the submission being graded
submission_metadata (``dict[str:Any]``): submission metadata
"""
@staticmethod
def _parse_plugin_config(plugin_config):
if not isinstance(plugin_config, list):
raise ValueError(f"Invalid plugin config: {plugin_config}")
result = []
for plg in plugin_config:
if isinstance(plg, str):
result.append({
"plugin": plg,
"config": {},
})
elif isinstance(plg, dict):
keys = list(plg.keys())
if not len(keys) == 1:
raise ValueError(f"Invalid plugin specification: {plg}")
result.append({
"plugin": keys[0],
"config": plg[keys[0]],
})
return result
def __init__(self, plugins, submission_path, submission_metadata):
self._plugin_config = self._parse_plugin_config(plugins)
self._plugins = None
self._subm_path = submission_path
self._subm_meta = submission_metadata
self._plugins = self._load_plugins(self._plugin_config, submission_path, submission_metadata)
@property
def _plugin_names(self):
"""
The importable names of all of the plugins tracked
"""
return [p["plugin"] for p in self._plugin_config]
@staticmethod
def _load_plugins(plugin_config, submission_path, submission_metadata):
"""
Loads each plugin in ``self._plugin_config`` by importing it with ``importlib`` and creating
and instance with the ``submission_metadata`` and the configurations from ``self._plugin_config``
for that plugin. Sets ``self._plugins`` to be the list of imported and instantiated plugins.
Args:
submission_path (``str``): the absolute path to the submission being graded
submission_metadata (``dict``): submission metadata
Returns:
``list[AbstractOtterPlugin]``: the list of instantiated plugins
"""
plugins = []
for plg_cfg in plugin_config:
plg, cfg = plg_cfg["plugin"], plg_cfg["config"]
module, class_ = ".".join(plg.split(".")[:-1]), plg.split(".")[-1]
module = importlib.import_module(module)
class_ = getattr(module, class_)
# get the config key for the plugin
# plugin_cfg = plugin_config.get(class_.PLUGIN_CONFIG_KEY, {})
plugin = class_(submission_path, submission_metadata, cfg)
plugins.append(plugin)
return plugins
def add_new_plugins(self, raw_plugin_config):
"""
Add any new plugins specified in ``raw_plugin_config`` to this plugin collection. Any plugins
listed that have already been isntatiated here are *not* added.
Args:
raw_plugin_config (``list[Union[str,dict[str:Any]]]``): the importable names of plugin classes (e.g.
``some_package.SomePlugin``) and their configurations
"""
plg_cfg = self._parse_plugin_config(raw_plugin_config)
for i, plg in list(enumerate(plg_cfg))[::-1]:
name = plg["plugin"]
if any(c["plugin"] == plg["plugin"] for c in self._plugin_config):
plg_cfg.pop(i)
self._plugin_config.extend(plg_cfg)
self._plugins.extend(self._load_plugins(plg_cfg, self._subm_path, self._subm_meta))
def METHOD_NAME(self, event, *args, **kwargs):
"""
Runs the method ``event`` of each plugin in this collection. Passes ``args`` and ``kwargs``
to this method. Ignores plugins that raise ``PluginEventNotSupportedException`` for this event.
Args:
event (``str``): name of the method of the plugin to run
*args, **kwargs (any): arguments for the method
Returns:
``list[Any]``: the values returned by each plugin for the called event
"""
# TODO: logging to stdout
rets = []
for plugin in self._plugins:
try:
if hasattr(plugin, event):
ret = getattr(plugin, event)(*args, **kwargs)
rets.append(ret)
else:
rets.append(None)
except PluginEventNotSupportedException:
rets.append(None)
return rets
def before_execution(self, submission):
"""
Runs the ``before_execution`` event for each plugin, composing the results of each (i.e. the
transformed notebook returned by one plugin is passed to the next plugin).
Args:
submission (``Union[str,nbformat.NotebookNode]``): the submission to be executed
"""
event = "before_execution"
for plugin in self._plugins:
try:
if hasattr(plugin, event):
submission = getattr(plugin, event)(submission)
except PluginEventNotSupportedException:
pass
return submission
def generate_report(self):
"""
Runs the ``generate_report`` event of each plugin, formatting and concatenating them into a
single string and returning it.
Returns:
``str``: the plugin report
"""
reports = self.METHOD_NAME("generate_report")
if not any(isinstance(r, str) for r in reports):
return ""
# header = "=" * 35 + " PLUGIN REPORT " + "=" * 35
header = print_full_width("=", mid_text="PLUGIN REPORT", ret_str=True)
footer = "=" * len(header)
report = header
for r, plg in zip(reports, self._plugin_names):
if not isinstance(r, str):
continue
# title = f" {plg} Report "
# dashes = len(header) - len(title)
# if dashes > 4:
# if dashes % 2 == 0:
# ld, rd = dashes // 2, dashes // 2
# else:
# ld, rd = dashes // 2, dashes // 2 + 1
# else:
# ld, rd = 2, 2
# title = "-" * ld + title + "-" * rd
title = print_full_width("-", mid_text=f"{plg} Report", ret_str=True)
body = "\n" + title + "\n" + r + "\n"
report += "\n" + body
report += "\n" + footer
return report
|
299,594 |
watch nodes
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import tempfile
from typing import List
import pytest
from .... import oscar as mo
from ....utils import Timer
from ....tests.core import flaky
from ..core import NodeRole, NodeStatus
from ..supervisor.locator import SupervisorPeerLocatorActor
from ..supervisor.node_info import NodeInfoCollectorActor
from ..tests import backend
from ..worker.locator import WorkerSupervisorLocatorActor
del backend
class MockNodeInfoCollectorActor(mo.Actor):
def __init__(self):
self._node_infos = dict()
self._version = 0
def set_all_node_infos(self, node_infos):
self._node_infos = node_infos
def get_nodes_info(self, *args, **kwargs):
return self._node_infos
async def METHOD_NAME(self, *args, version=None, **kwargs):
await asyncio.sleep(0.5)
self._version += 1
return self._version, self._node_infos
def put_starting_nodes(self, nodes: List[str], role: NodeRole):
for node in nodes:
self._node_infos[node] = NodeStatus.STARTING
@pytest.fixture
async def actor_pool():
pool = await mo.create_actor_pool("127.0.0.1", n_process=0)
async with pool:
await mo.create_actor(
MockNodeInfoCollectorActor,
uid=NodeInfoCollectorActor.default_uid(),
address=pool.external_address,
)
yield pool
@pytest.mark.asyncio
async def test_fixed_locator(actor_pool):
addresses = ["1.2.3.4:1234", "1.2.3.4:1235", "1.2.3.4:1236", "1.2.3.4:1237"]
locator_ref = await mo.create_actor(
SupervisorPeerLocatorActor,
"fixed",
",".join(addresses),
address=actor_pool.external_address,
)
assert await locator_ref.get_supervisor("mock_name") in addresses
dbl_addrs = await locator_ref.get_supervisor("mock_name", 2)
assert len(dbl_addrs) == 2
assert all(addr in addresses for addr in dbl_addrs)
with Timer() as timer:
await locator_ref.wait_all_supervisors_ready()
assert timer.duration < 0.1
await mo.destroy_actor(locator_ref)
@pytest.fixture
def temp_address_file():
with tempfile.TemporaryDirectory(prefix="mars-test") as dir_name:
yield os.path.join(dir_name, "addresses")
@flaky(max_runs=3)
@pytest.mark.asyncio
async def test_supervisor_peer_locator(actor_pool, temp_address_file):
addresses = ["1.2.3.4:1234", "1.2.3.4:1235", "1.2.3.4:1236", "1.2.3.4:1237"]
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses))
locator_ref = await mo.create_actor(
SupervisorPeerLocatorActor,
"test",
temp_address_file,
uid=SupervisorPeerLocatorActor.default_uid(),
address=actor_pool.external_address,
)
# test starting nodes filled
info_ref = await mo.actor_ref(
uid=NodeInfoCollectorActor.default_uid(), address=actor_pool.external_address
)
assert set(await info_ref.get_nodes_info()) == set(addresses)
# test watch nodes changes
version, result = await asyncio.wait_for(
locator_ref.watch_supervisors_by_keys(["mock_name"]),
timeout=30,
)
assert result[0] in addresses
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses[2:]))
version, result = await asyncio.wait_for(
locator_ref.watch_supervisors_by_keys(["mock_name"], version=version),
timeout=30,
)
assert result[0] in addresses[2:]
# test wait all supervisors ready
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(f"{a},{idx % 2}" for idx, a in enumerate(addresses)))
async def delay_read_fun():
await asyncio.sleep(0.2)
with open(temp_address_file, "w") as file_obj:
file_obj.write(
"\n".join(f"{a},{(idx + 1) % 2}" for idx, a in enumerate(addresses))
)
await asyncio.sleep(0.5)
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses))
asyncio.create_task(delay_read_fun())
with Timer() as timer:
await asyncio.wait_for(locator_ref.wait_all_supervisors_ready(), timeout=30)
assert timer.duration > 0.4
await mo.destroy_actor(locator_ref)
@flaky(max_runs=3)
@pytest.mark.asyncio
async def test_worker_supervisor_locator(actor_pool, temp_address_file):
addresses = [actor_pool.external_address]
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses))
locator_ref = await mo.create_actor(
WorkerSupervisorLocatorActor,
"test",
temp_address_file,
uid=WorkerSupervisorLocatorActor.default_uid(),
address=actor_pool.external_address,
)
info_ref = await mo.actor_ref(
uid=NodeInfoCollectorActor.default_uid(), address=actor_pool.external_address
)
await info_ref.set_all_node_infos({actor_pool.external_address: NodeStatus.READY})
# test watch nodes changes
supervisors = await locator_ref.get_supervisors(filter_ready=False)
assert supervisors == addresses
version, result = await asyncio.wait_for(
locator_ref.watch_supervisors_by_keys(["mock_name"]),
timeout=30,
)
assert result[0] in addresses
# test watch without NodeInfoCollectorActor
await info_ref.destroy()
addresses = ["localhost:1234", "localhost:1235"]
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses))
version, result = await asyncio.wait_for(
locator_ref.watch_supervisors_by_keys(["mock_name"], version=version),
timeout=30,
)
assert result[0] in addresses
# test watch when NodeInfoCollectorActor is created again
info_ref = await mo.create_actor(
MockNodeInfoCollectorActor,
uid=NodeInfoCollectorActor.default_uid(),
address=actor_pool.external_address,
)
await info_ref.set_all_node_infos({actor_pool.external_address: NodeStatus.READY})
addresses = [actor_pool.external_address]
with open(temp_address_file, "w") as file_obj:
file_obj.write("\n".join(addresses))
version, result = await asyncio.wait_for(
locator_ref.watch_supervisors_by_keys(["mock_name"], version=version),
timeout=30,
)
assert result[0] in addresses
|
299,595 |
get icon
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Main interpreter Plugin.
"""
# Standard library imports
import os.path as osp
# Third-party imports
from qtpy.QtCore import Slot
# Local imports
from spyder.api.plugins import Plugins, SpyderPluginV2
from spyder.api.plugin_registration.decorators import (
on_plugin_available, on_plugin_teardown)
from spyder.api.translations import _
from spyder.plugins.maininterpreter.confpage import MainInterpreterConfigPage
from spyder.plugins.maininterpreter.container import MainInterpreterContainer
from spyder.utils.misc import get_python_executable
class MainInterpreter(SpyderPluginV2):
"""
Main interpreter Plugin.
"""
NAME = "main_interpreter"
REQUIRES = [Plugins.Preferences]
OPTIONAL = [Plugins.StatusBar]
CONTAINER_CLASS = MainInterpreterContainer
CONF_WIDGET_CLASS = MainInterpreterConfigPage
CONF_SECTION = NAME
CONF_FILE = False
CAN_BE_DISABLED = False
# ---- SpyderPluginV2 API
@staticmethod
def get_name():
return _("Python interpreter")
@staticmethod
def get_description():
return _(
"Manage the default Python interpreter used to run, analyze and "
"profile your code in Spyder."
)
@classmethod
def METHOD_NAME(cls):
return cls.create_icon('python')
def on_initialize(self):
container = self.get_container()
# Connect signal to open preferences
container.sig_open_preferences_requested.connect(
self._open_interpreter_preferences
)
# Add custom interpreter to list of saved ones
container.sig_add_to_custom_interpreters_requested.connect(
self._add_to_custom_interpreters
)
# Validate that the custom interpreter from the previous session
# still exists
if self.get_conf('custom'):
interpreter = self.get_conf('custom_interpreter')
if not osp.isfile(interpreter):
self.set_conf('custom', False)
self.set_conf('default', True)
self.set_conf('executable', get_python_executable())
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
# Register conf page
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.StatusBar)
def on_statusbar_available(self):
# Add status widget
statusbar = self.get_plugin(Plugins.StatusBar)
statusbar.add_status_widget(self.interpreter_status)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
# Deregister conf page
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.StatusBar)
def on_statusbar_teardown(self):
# Add status widget
statusbar = self.get_plugin(Plugins.StatusBar)
statusbar.remove_status_widget(self.interpreter_status.ID)
@property
def interpreter_status(self):
return self.get_container().interpreter_status
def set_custom_interpreter(self, interpreter):
"""Set given interpreter as the current selected one."""
self._add_to_custom_interpreters(interpreter)
self.set_conf("default", False)
self.set_conf("custom", True)
self.set_conf("custom_interpreter", interpreter)
self.set_conf("executable", interpreter)
# ---- Private API
def _open_interpreter_preferences(self):
"""Open the Preferences dialog in the main interpreter section."""
self._main.show_preferences()
preferences = self.get_plugin(Plugins.Preferences)
if preferences:
container = preferences.get_container()
dlg = container.dialog
index = dlg.get_index_by_name("main_interpreter")
dlg.set_current_index(index)
@Slot(str)
def _add_to_custom_interpreters(self, interpreter):
"""Add a new interpreter to the list of saved ones."""
custom_list = self.get_conf('custom_interpreters_list')
if interpreter not in custom_list:
custom_list.append(interpreter)
self.set_conf('custom_interpreters_list', custom_list)
|
299,596 |
test create fulltimerange
|
import shutil
from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from freqtrade.configuration import TimeRange
from freqtrade.data.dataprovider import DataProvider
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from tests.conftest import get_patched_exchange
from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy,
make_unfiltered_dataframe)
from tests.freqai.test_freqai_interface import is_mac
@pytest.mark.parametrize(
"timerange, train_period_days, expected_result",
[
("20220101-20220201", 30, "20211202-20220201"),
("20220301-20220401", 15, "20220214-20220401"),
],
)
def METHOD_NAME(
timerange, train_period_days, expected_result, freqai_conf, mocker, caplog
):
dk = get_patched_data_kitchen(mocker, freqai_conf)
assert dk.create_fulltimerange(timerange, train_period_days) == expected_result
shutil.rmtree(Path(dk.full_path))
def test_create_fulltimerange_incorrect_backtest_period(mocker, freqai_conf):
dk = get_patched_data_kitchen(mocker, freqai_conf)
with pytest.raises(OperationalException, match=r"backtest_period_days must be an integer"):
dk.create_fulltimerange("20220101-20220201", 0.5)
with pytest.raises(OperationalException, match=r"backtest_period_days must be positive"):
dk.create_fulltimerange("20220101-20220201", -1)
shutil.rmtree(Path(dk.full_path))
@pytest.mark.parametrize(
"timerange, train_period_days, backtest_period_days, expected_result",
[
("20220101-20220201", 30, 7, 9),
("20220101-20220201", 30, 0.5, 120),
("20220101-20220201", 10, 1, 80),
],
)
def test_split_timerange(
mocker, freqai_conf, timerange, train_period_days, backtest_period_days, expected_result
):
freqai_conf.update({"timerange": "20220101-20220401"})
dk = get_patched_data_kitchen(mocker, freqai_conf)
tr_list, bt_list = dk.split_timerange(timerange, train_period_days, backtest_period_days)
assert len(tr_list) == len(bt_list) == expected_result
with pytest.raises(
OperationalException, match=r"train_period_days must be an integer greater than 0."
):
dk.split_timerange("20220101-20220201", -1, 0.5)
shutil.rmtree(Path(dk.full_path))
def test_check_if_model_expired(mocker, freqai_conf):
dk = get_patched_data_kitchen(mocker, freqai_conf)
now = datetime.now(tz=timezone.utc).timestamp()
assert dk.check_if_model_expired(now) is False
now = (datetime.now(tz=timezone.utc) - timedelta(hours=2)).timestamp()
assert dk.check_if_model_expired(now) is True
shutil.rmtree(Path(dk.full_path))
def test_filter_features(mocker, freqai_conf):
freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf)
freqai.dk.find_features(unfiltered_dataframe)
filtered_df, labels = freqai.dk.filter_features(
unfiltered_dataframe,
freqai.dk.training_features_list,
freqai.dk.label_list,
training_filter=True,
)
assert len(filtered_df.columns) == 14
def test_make_train_test_datasets(mocker, freqai_conf):
freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf)
freqai.dk.find_features(unfiltered_dataframe)
features_filtered, labels_filtered = freqai.dk.filter_features(
unfiltered_dataframe,
freqai.dk.training_features_list,
freqai.dk.label_list,
training_filter=True,
)
data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered)
assert data_dictionary
assert len(data_dictionary) == 7
assert len(data_dictionary['train_features'].index) == 1916
@pytest.mark.parametrize('model', [
'LightGBMRegressor'
])
def test_get_full_model_path(mocker, freqai_conf, model):
freqai_conf.update({"freqaimodel": model})
freqai_conf.update({"timerange": "20180110-20180130"})
freqai_conf.update({"strategy": "freqai_test_strat"})
if is_mac():
pytest.skip("Mac is confused during this test for unknown reasons")
strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange)
strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai
freqai.live = True
freqai.dk = FreqaiDataKitchen(freqai_conf)
freqai.dk.live = True
timerange = TimeRange.parse_timerange("20180110-20180130")
freqai.dd.load_all_pair_histories(timerange, freqai.dk)
freqai.dd.pair_dict = MagicMock()
data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
new_timerange = TimeRange.parse_timerange("20180120-20180130")
freqai.dk.set_paths('ADA/BTC', None)
freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
model_path = freqai.dk.get_full_models_path(freqai_conf)
assert model_path.is_dir() is True
|
299,597 |
convert 2 d to 1 d numpy
|
# -*- coding: utf-8 -*-
"""Machine type converters for Table scitype.
Exports conversion and mtype dictionary for Table scitype:
convert_dict: dict indexed by triples of str
1st element = convert from - str
2nd element = convert to - str
3rd element = considered as this scitype - str
elements are conversion functions of machine type (1st) -> 2nd
Function signature of all elements
convert_dict[(from_type, to_type, as_scitype)]
Parameters
----------
obj : from_type - object to convert
store : dictionary - reference of storage for lossy conversions, default=None (no store)
Returns
-------
converted_obj : to_type - object obj converted to to_type
Raises
------
ValueError and TypeError, if requested conversion is not possible
(depending on conversion logic)
"""
__author__ = ["fkiraly"]
__all__ = ["convert_dict"]
import numpy as np
import pandas as pd
from aeon.datatypes._convert_utils._convert import _extend_conversions
from aeon.datatypes._table._registry import MTYPE_LIST_TABLE
##############################################################
# methods to convert one machine type to another machine type
##############################################################
convert_dict = dict()
def convert_identity(obj, store=None):
return obj
# assign identity function to type conversion to self
for tp in MTYPE_LIST_TABLE:
convert_dict[(tp, tp, "Table")] = convert_identity
def convert_1D_to_2D_numpy_as_Table(obj: np.ndarray, store=None) -> np.ndarray:
if not isinstance(obj, np.ndarray):
raise TypeError("input must be a np.ndarray")
if len(obj.shape) == 1:
res = np.reshape(obj, (-1, 1))
else:
raise TypeError("input must be 1D np.ndarray")
return res
convert_dict[("numpy1D", "numpy2D", "Table")] = convert_1D_to_2D_numpy_as_Table
def METHOD_NAME(obj: np.ndarray, store=None) -> np.ndarray:
if not isinstance(obj, np.ndarray):
raise TypeError("input must be a np.ndarray")
if len(obj.shape) == 2:
res = obj.flatten()
else:
raise TypeError("input must be 2D np.ndarray")
return res
convert_dict[("numpy2D", "numpy1D", "Table")] = METHOD_NAME
def convert_df_to_2Dnp_as_Table(obj: pd.DataFrame, store=None) -> np.ndarray:
if not isinstance(obj, pd.DataFrame):
raise TypeError("input must be a pd.DataFrame")
if isinstance(store, dict):
store["columns"] = obj.columns
return obj.to_numpy()
convert_dict[("pd_DataFrame_Table", "numpy2D", "Table")] = convert_df_to_2Dnp_as_Table
def convert_df_to_1Dnp_as_Table(obj: pd.DataFrame, store=None) -> np.ndarray:
return convert_df_to_2Dnp_as_Table(obj=obj, store=store).flatten()
convert_dict[("pd_DataFrame_Table", "numpy1D", "Table")] = convert_df_to_1Dnp_as_Table
def convert_2Dnp_to_df_as_Table(obj: np.ndarray, store=None) -> pd.DataFrame:
if not isinstance(obj, np.ndarray) and len(obj.shape) != 2:
raise TypeError("input must be a 2D np.ndarray")
if len(obj.shape) == 1:
obj = np.reshape(obj, (-1, 1))
if (
isinstance(store, dict)
and "columns" in store.keys()
and len(store["columns"]) == obj.shape[1]
):
res = pd.DataFrame(obj, columns=store["columns"])
else:
res = pd.DataFrame(obj)
return res
convert_dict[("numpy2D", "pd_DataFrame_Table", "Table")] = convert_2Dnp_to_df_as_Table
def convert_1Dnp_to_df_as_Table(obj: np.ndarray, store=None) -> pd.DataFrame:
if not isinstance(obj, np.ndarray) and len(obj.shape) != 1:
raise TypeError("input must be a 1D np.ndarray")
obj = np.reshape(obj, (-1, 1))
if (
isinstance(store, dict)
and "columns" in store.keys()
and len(store["columns"]) == obj.shape[1]
):
res = pd.DataFrame(obj, columns=store["columns"])
else:
res = pd.DataFrame(obj)
return res
convert_dict[("numpy1D", "pd_DataFrame_Table", "Table")] = convert_1Dnp_to_df_as_Table
def convert_s_to_df_as_table(obj: pd.Series, store=None) -> pd.DataFrame:
if not isinstance(obj, pd.Series):
raise TypeError("input must be a pd.Series")
if (
isinstance(store, dict)
and "columns" in store.keys()
and len(store["columns"]) == 1
):
res = pd.DataFrame(obj, columns=store["columns"])
else:
res = pd.DataFrame(obj)
return res
convert_dict[
("pd_Series_Table", "pd_DataFrame_Table", "Table")
] = convert_s_to_df_as_table
def convert_df_to_s_as_table(obj: pd.DataFrame, store=None) -> pd.Series:
if not isinstance(obj, pd.DataFrame):
raise TypeError("input is not a pd.DataFrame")
if len(obj.columns) != 1:
raise ValueError("input must be univariate pd.DataFrame, with one column")
if isinstance(store, dict):
store["columns"] = obj.columns[[0]]
y = obj[obj.columns[0]]
y.name = None
return y
convert_dict[
("pd_DataFrame_Table", "pd_Series_Table", "Table")
] = convert_df_to_s_as_table
def convert_list_of_dict_to_df_as_table(obj: list, store=None) -> pd.DataFrame:
if not isinstance(obj, list):
raise TypeError("input must be a list of dict")
if not np.all([isinstance(x, dict) for x in obj]):
raise TypeError("input must be a list of dict")
res = pd.DataFrame(obj)
if (
isinstance(store, dict)
and "index" in store.keys()
and len(store["index"]) == len(res)
):
res.index = store["index"]
return res
convert_dict[
("list_of_dict", "pd_DataFrame_Table", "Table")
] = convert_list_of_dict_to_df_as_table
def convert_df_to_list_of_dict_as_table(obj: pd.DataFrame, store=None) -> list:
if not isinstance(obj, pd.DataFrame):
raise TypeError("input is not a pd.DataFrame")
ret_dict = [obj.loc[i].to_dict() for i in obj.index]
if isinstance(store, dict):
store["index"] = obj.index
return ret_dict
convert_dict[
("pd_DataFrame_Table", "list_of_dict", "Table")
] = convert_df_to_list_of_dict_as_table
_extend_conversions(
"pd_Series_Table", "pd_DataFrame_Table", convert_dict, MTYPE_LIST_TABLE
)
_extend_conversions(
"list_of_dict", "pd_DataFrame_Table", convert_dict, MTYPE_LIST_TABLE
)
|
299,598 |
main
|
#!/usr/bin/env python3
"""
Add user to a project.
This script will add a user to a Combine project in the database
To add the user to the project, we need to:
1. Look up the user id from the provided username/email.
2. Look up the project id from the provided project name.
3. Check to see if the user is already in the project.
If so (and if they're not the project owner), update their role.
4. If the user is not in the project:
a. create a document in the UserRolesCollection,
b. set the role field in the user role to the requested role.
c. add the new role to the user's document in the UsersCollection
"""
import argparse
import sys
from combine_app import CombineApp, Role
def parse_args() -> argparse.Namespace:
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(
description="Add a user to a project on TheCombine. "
"The user can be specified by username or e-mail address.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--project", required=True, help="Name of the project to which the user is being added"
)
parser.add_argument(
"--user", required=True, help="Username or e-mail of the user to be added to the project"
)
parser.add_argument(
"--role",
choices=[role.value for role in Role if role != Role.Owner],
default=Role.Harvester.value,
help="Project role of the user to be added",
)
parser.add_argument(
"--verbose", action="store_true", help="Print intermediate values to aid in debugging"
)
return parser.parse_args()
def METHOD_NAME() -> None:
"""Add a user to a project."""
args = parse_args()
combine = CombineApp()
# 1. Look up the user id.
user_id = combine.get_user_id(args.user)
if user_id is None:
print(f"Cannot find user {args.user}")
sys.exit(1)
if args.verbose:
print(f"User Id: {user_id}")
# 2. Look up the project id.
proj_id = combine.get_project_id(args.project)
if proj_id is None:
print(f"Cannot find project {args.project}")
sys.exit(1)
if args.verbose:
print(f"Project ID: {proj_id}")
# 3. Check to see if the user is already in the project.
# define the query selection and projection arguments separately to
# improve readability
select_crit = f'{{ _id: ObjectId("{user_id}"), "projectRoles.{proj_id}": {{ $exists: true}} }}'
projection = f'{{ "projectRoles.{proj_id}" : 1}}'
result = combine.db_query("UsersCollection", select_crit, projection)
if len(result) == 1:
# The user is in the project
user_role_id = result[0]["projectRoles"][proj_id]
select_role = f'{{ _id: ObjectId("{user_role_id}")}}'
# Don't update if they're the project owner
user_role = combine.db_query("UserRolesCollection", select_role)[0]["role"]
if user_role == Role.Owner.value:
print(f"Could not update role for {args.user}, the project owner", file=sys.stderr)
sys.exit(1)
# Update the role
update_role = f'{{ $set: {{"role" : {args.role}}} }}'
upd_result = combine.db_cmd(
f"db.UserRolesCollection.findOneAndUpdate({select_role}, {update_role})"
)
if upd_result is None:
print(f"Could not update role for {args.user}.", file=sys.stderr)
sys.exit(1)
if args.verbose:
print(f"Updated Role {user_role_id} with role {args.role}.")
elif len(result) == 0:
# 4. The user is not in the project
# a. create a document in the UserRolesCollection,
# b. set the role field in the user role to the requested role.
insert_doc = f'{{ "role" : {args.role}, "projectId" : "{proj_id}" }}'
insert_result = combine.db_cmd(f"db.UserRolesCollection.insertOne({insert_doc})")
if insert_result is not None:
# c. add the new role to the user's document in the UsersCollection
user_role_id = insert_result["insertedId"]
select_user = f'{{ _id: ObjectId("{user_id}")}}'
update_user = f'{{ $set : {{"projectRoles.{proj_id}" : "{user_role_id}" }}}}'
add_role_result = combine.db_cmd(
f"db.UsersCollection.updateOne({select_user}, {update_user})"
)
if add_role_result is None:
print(f"Could not add new role to {args.user}.", file=sys.stderr)
sys.exit(1)
if args.verbose:
print(f"{args.user} added to {args.project} with role {args.role}.")
else:
print(f"Could not create role for {args.user} in {args.project}.", file=sys.stderr)
sys.exit(1)
else:
print(
f"Too many documents in UserRolesCollection for User {args.user}"
f" in Project {args.project}"
)
if __name__ == "__main__":
METHOD_NAME()
|
299,599 |
grad handle
|
from contextlib import contextmanager
from enum import Enum
from functools import partial
from typing import List
import torch
from colossalai.tensor.param_op_hook import ColoParamOpHook
from colossalai.zero.gemini.memory_tracer import MemStats, SyncCudaMemoryMonitor
from colossalai.zero.legacy.gemini.tensor_utils import alloc_storage, free_storage
class TrainingPhase(Enum):
FORWARD = 0
BACKWARD = 1
class GradMemStats():
def __init__(self) -> None:
self.unreleased_grad_flag = {}
self.unreleased_grad_volume = 0
def clear(self):
self.unreleased_grad_flag.clear()
self.unreleased_grad_volume = 0
class GradMemTracerHook():
def __init__(self, grad_stats: GradMemStats):
self.grad_hook_list = []
self._grad_stats = grad_stats
def METHOD_NAME(self, p, grad):
assert self._grad_stats.unreleased_grad_flag[p]
free_storage(grad)
self._grad_stats.unreleased_grad_volume -= grad.numel() * grad.element_size()
self._grad_stats.unreleased_grad_flag[p] = False
def register_grad_hook(self, module: torch.nn.Module):
for p in module.parameters():
if p.requires_grad:
self.grad_hook_list.append(p.register_hook(partial(self.METHOD_NAME, p)))
self._grad_stats.unreleased_grad_flag[p] = False
def remove_grad_hook(self):
for hook in self.grad_hook_list:
hook.remove()
class ParamMemTracerHook(ColoParamOpHook):
def __init__(self, memstats: MemStats, gradstats: GradMemStats) -> None:
super().__init__()
self._training_phase = TrainingPhase.FORWARD
self._memstats = memstats
self._grad_stats = gradstats
self.mem_monitor = SyncCudaMemoryMonitor()
def _free_cuda_params(self, params):
for p in params:
if p.data.device.type == "cpu":
raise NotImplementedError("Only free cuda memory")
free_storage(p.data)
def _allocate_params_on_cuda(self, params: List[torch.nn.Parameter]):
"""
move params to cuda
Args:
params (List[torch.nn.Parameter]): target params
Raises:
NotImplementedError: raise error when param has cpu grad
"""
for p in params:
cur_dev = p.data.device.type
if cur_dev == "cpu":
if p.grad is not None and p.grad.device.type == "cpu":
raise NotImplementedError("Only run in forward propagation")
p.data = torch.empty(p.data.shape,
device="cuda",
dtype=p.data.dtype,
requires_grad=p.data.requires_grad)
elif cur_dev == "cuda":
alloc_storage(p.data)
def record_model_data_volume(self, params):
"""
get cuda model data used by params
"""
data_volume = self._grad_stats.unreleased_grad_volume
for p in params:
cur_model_data_volume = p.data.numel() * p.data.element_size()
data_volume += cur_model_data_volume
if self._training_phase == TrainingPhase.BACKWARD and p.requires_grad:
# add param.grad, actually param.grad is None in this time
data_volume += cur_model_data_volume
if not self._grad_stats.unreleased_grad_flag[p]:
self._grad_stats.unreleased_grad_volume += cur_model_data_volume
self._grad_stats.unreleased_grad_flag[p] = True
# record max non model data used for this Op
self._memstats.record_max_cuda_model_data(data_volume)
def pre_op(self, params):
max_cuda_used_pre_op = self.mem_monitor.finish()
# record max cuda overall data for prev OP.
self._memstats.record_max_cuda_overall_data(max_cuda_used_pre_op)
# record max cuda non model data for prev OP.
self._memstats.calc_max_cuda_non_model_data()
self._allocate_params_on_cuda(params)
# record max cuda model data for current OP
self.record_model_data_volume(params)
self.mem_monitor.start()
self._memstats.increase_preop_step(params)
def post_op(self, params):
self._free_cuda_params(params)
def pre_forward(self, params: List[torch.Tensor]) -> None:
self.pre_op(params)
def post_forward(self, params: List[torch.Tensor]) -> None:
self.post_op(params)
def pre_backward(self, params: List[torch.Tensor]) -> None:
self.pre_op(params)
def post_backward(self, params: List[torch.Tensor]) -> None:
self.post_op(params)
@contextmanager
def switch_training_phase(self, training_phase: TrainingPhase = TrainingPhase.BACKWARD):
old_training_phase = self._training_phase
try:
self._training_phase = training_phase
yield
finally:
self._training_phase = old_training_phase
switch_to_backward = switch_training_phase
switch_to_forward = partial(switch_to_backward, training_phase=TrainingPhase.FORWARD)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.