id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
2,900 |
set share properties
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: file_samples_share.py
DESCRIPTION:
These samples demonstrate share operations like creating a share snapshot,
setting share quota and metadata, listing directories and files in the
file share, and getting directory and file clients from a share client.
USAGE:
python file_samples_share.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
import os
from azure.storage.fileshare import ShareAccessTier
SOURCE_FILE = './SampleSource.txt'
DEST_FILE = './SampleDestination.txt'
class ShareSamples(object):
connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
def create_share_snapshot(self):
# Instantiate the ShareClient from a connection string
from azure.storage.fileshare import ShareClient
share = ShareClient.from_connection_string(self.connection_string, "sharesamples1")
# [START create_share]
# Create share with Access Tier set to Hot
share.create_share(access_tier=ShareAccessTier("Hot"))
# [END create_share]
try:
# [START create_share_snapshot]
share.create_snapshot()
# [END create_share_snapshot]
finally:
# [START delete_share]
share.delete_share(delete_snapshots=True)
# [END delete_share]
def set_share_quota_and_metadata(self):
# [START create_share_client_from_conn_string]
from azure.storage.fileshare import ShareClient
share = ShareClient.from_connection_string(self.connection_string, "sharesamples2")
# [END create_share_client_from_conn_string]
# Create the share
share.create_share()
try:
# [START set_share_quota]
# Set the quota for the share to 1GB
share.set_share_quota(quota=1)
# [END set_share_quota]
# [START set_share_metadata]
data = {'category': 'test'}
share.set_share_metadata(metadata=data)
# [END set_share_metadata]
# Get the metadata for the share
props = share.get_share_properties().metadata
finally:
# Delete the share
share.delete_share()
def METHOD_NAME(self):
from azure.storage.fileshare import ShareClient
share1 = ShareClient.from_connection_string(self.connection_string, "sharesamples3a")
share2 = ShareClient.from_connection_string(self.connection_string, "sharesamples3b")
# Create the share
share1.create_share()
share2.create_share()
try:
# [START set_share_properties]
# Set the tier for the first share to Hot
share1.METHOD_NAME(access_tier="Hot")
# Set the quota for the first share to 3
share1.METHOD_NAME(quota=3)
# Set the tier for the second share to Cool and quota to 2
share2.METHOD_NAME(access_tier=ShareAccessTier("Cool"), quota=2)
# Get the shares' properties
print(share1.get_share_properties().access_tier)
print(share1.get_share_properties().quota)
print(share2.get_share_properties().access_tier)
print(share2.get_share_properties().quota)
# [END set_share_properties]
finally:
# Delete the shares
share1.delete_share()
share2.delete_share()
def list_directories_and_files(self):
# Instantiate the ShareClient from a connection string
from azure.storage.fileshare import ShareClient
share = ShareClient.from_connection_string(self.connection_string, "sharesamples4")
# Create the share
share.create_share()
try:
# [START share_list_files_in_dir]
# Create a directory in the share
dir_client = share.create_directory("mydir")
# Upload a file to the directory
with open(SOURCE_FILE, "rb") as source_file:
dir_client.upload_file(file_name="sample", data=source_file)
# List files in the directory
my_files = list(share.list_directories_and_files(directory_name="mydir"))
print(my_files)
# [END share_list_files_in_dir]
finally:
# Delete the share
share.delete_share()
def get_directory_or_file_client(self):
# Instantiate the ShareClient from a connection string
from azure.storage.fileshare import ShareClient
share = ShareClient.from_connection_string(self.connection_string, "sharesamples5")
# Get the directory client to interact with a specific directory
my_dir = share.get_directory_client("dir1")
# Get the file client to interact with a specific file
my_file = share.get_file_client("dir1/myfile")
def acquire_share_lease(self):
# Instantiate the ShareClient from a connection string
from azure.storage.fileshare import ShareClient
share = ShareClient.from_connection_string(self.connection_string, "sharesamples")
# Create the share
share.create_share()
# [START acquire_and_release_lease_on_share]
share.create_directory("mydir")
lease = share.acquire_lease()
share.get_share_properties(lease=lease)
share.delete_share(lease=lease)
# [END acquire_and_release_lease_on_share]
if __name__ == '__main__':
sample = ShareSamples()
sample.create_share_snapshot()
sample.set_share_quota_and_metadata()
sample.METHOD_NAME()
sample.list_directories_and_files()
sample.get_directory_or_file_client()
sample.acquire_share_lease()
|
2,901 |
execute
|
"""
Database interaction
"""
import asyncio
import logging
from contextlib import contextmanager
from sqlalchemy import create_engine, text
from sqlalchemy.exc import DBAPIError, OperationalError
from sqlalchemy.ext.asyncio import AsyncConnection as _AsyncConnection
from sqlalchemy.ext.asyncio import AsyncEngine as _AsyncEngine
from sqlalchemy.util import EMPTY_DICT
from server.metrics import db_exceptions
logger = logging.getLogger(__name__)
@contextmanager
def stat_db_errors():
"""
Collect metrics on errors thrown
"""
try:
yield
except DBAPIError as e:
db_exceptions.labels(e.__class__.__name__, e.code).inc()
raise e
class FAFDatabase:
def __init__(
self,
host: str = "localhost",
port: int = 3306,
user: str = "root",
password: str = "",
db: str = "faf_test",
**kwargs
):
kwargs["future"] = True
sync_engine = create_engine(
f"mysql+aiomysql://{user}:{password}@{host}:{port}/{db}",
**kwargs
)
self.engine = AsyncEngine(sync_engine)
def acquire(self):
return self.engine.begin()
async def close(self):
await self.engine.dispose()
class AsyncEngine(_AsyncEngine):
"""
For overriding the connection class used to execute statements.
This could also be done by changing engine._connection_cls, however this
is undocumented and probably more fragile so we subclass instead.
"""
def connect(self):
return AsyncConnection(self)
class AsyncConnection(_AsyncConnection):
async def execute(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
**kwargs
):
with stat_db_errors():
return await self.METHOD_NAME(
statement,
parameters=parameters,
execution_options=execution_options,
**kwargs
)
async def METHOD_NAME(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
**kwargs
):
"""
Wrap strings in the text type automatically and allows bindparams to be
passed via kwargs.
"""
if isinstance(statement, str):
statement = text(statement)
if kwargs and parameters is None:
parameters = kwargs
return await super().execute(
statement,
parameters=parameters,
execution_options=execution_options
)
async def stream(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
**kwargs
):
with stat_db_errors():
return await self._stream(
statement,
parameters=parameters,
execution_options=execution_options,
**kwargs
)
async def _stream(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
**kwargs
):
"""
Wrap strings in the text type automatically and allows bindparams to be
passed via kwargs.
"""
if isinstance(statement, str):
statement = text(statement)
if kwargs and parameters is None:
parameters = kwargs
return await super().stream(
statement,
parameters=parameters,
execution_options=execution_options
)
async def deadlock_retry_execute(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
max_attempts=3,
**kwargs
):
with stat_db_errors():
return await self._deadlock_retry_execute(
statement,
parameters=parameters,
execution_options=execution_options,
max_attempts=max_attempts,
**kwargs
)
async def _deadlock_retry_execute(
self,
statement,
parameters=None,
execution_options=EMPTY_DICT,
max_attempts=3,
**kwargs
):
for attempt in range(max_attempts - 1):
try:
return await self.METHOD_NAME(
statement,
parameters=parameters,
execution_options=execution_options,
**kwargs
)
except OperationalError as e:
error_text = str(e)
if any(msg in error_text for msg in (
"Deadlock found",
"Lock wait timeout exceeded"
)):
logger.warning(
"Encountered deadlock during SQL execution. Attempts: %d",
attempt + 1
)
# Exponential backoff
await asyncio.sleep(0.3 * 2 ** attempt)
else:
raise
# On the final attempt we don't do any error handling
return await self.METHOD_NAME(
statement,
parameters=parameters,
execution_options=execution_options,
**kwargs
)
|
2,902 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
'get_connection_output',
]
@pulumi.output_type
class GetConnectionResult:
"""
API connection
"""
def __init__(__self__, etag=None, METHOD_NAME=None, location=None, name=None, properties=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Resource ETag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ApiConnectionDefinitionResponseProperties':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
subscription_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Get a specific connection
Azure REST API version: 2016-06-01.
:param str connection_name: Connection name
:param str resource_group_name: The resource group
:param str subscription_id: Subscription Id
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['subscriptionId'] = subscription_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_connection)
def get_connection_output(connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionResult]:
"""
Get a specific connection
Azure REST API version: 2016-06-01.
:param str connection_name: Connection name
:param str resource_group_name: The resource group
:param str subscription_id: Subscription Id
"""
...
|
2,903 |
test case 2a
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2023 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.qa_tests_data.classical_damage import (
case_1, case_2, case_1a, case_1b, case_1c, case_2a, case_2b, case_3a,
case_4a, case_4b, case_4c, case_5a, case_6a, case_6b, case_7a, case_7b,
case_7c, case_8a, case_master)
from openquake.calculators.export import export
from openquake.calculators.tests import (
CalculatorTestCase, strip_calc_id, NOT_DARWIN)
import numpy
aae = numpy.testing.assert_almost_equal
class ClassicalDamageCase1TestCase(CalculatorTestCase):
def test_continuous(self):
self.run_calc(case_1.__file__, 'job_continuous.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_continuous.csv', fname)
def test_discrete(self):
self.run_calc(case_1.__file__, 'job_discrete.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_discrete.csv', fname)
def test_interpolation(self):
self.run_calc(case_1.__file__, 'job_interpolation.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_interpolation.csv', fname)
# tests with no damage limit
class ClassicalDamageCase2TestCase(CalculatorTestCase):
def test_continuous(self):
self.run_calc(case_2.__file__, 'job_continuous.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_continuous.csv', fname)
def test_discrete(self):
# a test with a putative poe == 1 causing log(0)
self.run_calc(case_2.__file__, 'job_discrete.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_discrete.csv', fname)
def test_interpolation(self):
self.run_calc(case_2.__file__, 'job_interpolation.ini')
[fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damage_interpolation.csv', fname)
class ClassicalDamageCase8TestCase(CalculatorTestCase):
def test_case_8a(self):
self.run_calc(
case_8a.__file__, 'job_haz.ini,job_risk.ini')
f1, f2 = export(('damages-rlzs', 'csv'), self.calc.datastore)
self.assertEqualFiles(
'expected/damages-rlzs-AkkarBommer2010().csv', f2)
self.assertEqualFiles(
'expected/damages-rlzs-SadighEtAl1997().csv', f1)
[f] = export(('damages-stats', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/damages-stats.csv', f)
class ClassicalDamageTestCase(CalculatorTestCase):
# all the tests here are similar
def check(self, case):
self.run_calc(case.__file__, 'job_haz.ini')
self.run_calc(case.__file__, 'job_risk.ini',
hazard_calculation_id=str(self.calc.datastore.calc_id))
fnames = export(('damages-rlzs', 'csv'), self.calc.datastore)
if len(fnames) == 1:
self.assertEqualFiles(
'expected/damages.csv', fnames[0], delta=1E-5)
else:
for fname in fnames:
self.assertEqualFiles(
'expected/%s' % strip_calc_id(fname), fname, delta=1E-5)
def test_case_1a(self):
self.check(case_1a)
def test_case_1b(self):
self.check(case_1b)
def test_case_1c(self):
self.check(case_1c)
def METHOD_NAME(self):
self.check(case_2a)
def test_case_2b(self):
self.check(case_2b)
def test_case_3a(self):
self.check(case_3a)
def test_case_4a(self):
self.check(case_4a)
def test_case_4b(self):
self.check(case_4b)
def test_case_4c(self):
self.check(case_4c)
def test_case_5a(self):
self.check(case_5a)
def test_case_6a(self):
# this is a tricky test where the region_constraint discards an asset
# so the risk sites are different from the hazard sites
self.check(case_6a)
def test_case_6b(self):
self.check(case_6b)
def test_case_7a(self):
self.check(case_7a)
def test_case_7b(self):
self.check(case_7b)
def test_case_7c(self):
self.check(case_7c)
def test_case_master(self):
self.check(case_master)
fnames = export(('hcurves', 'xml'), self.calc.datastore)
for fname in fnames:
self.assertEqualFiles(
'expected/%s' % strip_calc_id(fname), fname,
delta=1E-4)
|
2,904 |
get pitch counts
|
# Copyright 2023 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.gansynth.lib import datasets
from magenta.models.gansynth.lib import train_util
from magenta.models.gansynth.lib.specgrams_helper import SpecgramsHelper
import tensorflow.compat.v1 as tf
class DataHelper(object):
"""A class for querying and converting data."""
def __init__(self, config):
self._config = config
self._dataset_name = config['dataset_name']
self.dataset = datasets.registry[self._dataset_name](config)
self.specgrams_helper = self.make_specgrams_helper()
def _map_fn(self):
"""Create a mapping function for the dataset."""
raise NotImplementedError
def make_specgrams_helper(self):
"""Create a specgrams helper for the dataset."""
raise NotImplementedError
def data_to_waves(self, data):
"""Converts data representation to waveforms."""
raise NotImplementedError
def waves_to_data(self, waves):
"""Converts data representation to waveforms."""
raise NotImplementedError
def METHOD_NAME(self):
"""Returns a dictionary {pitch value (int): count (int)}."""
return self.dataset.METHOD_NAME()
def provide_one_hot_labels(self, batch_size):
"""Returns a batch of one-hot labels."""
with tf.name_scope('inputs'):
with tf.device('/cpu:0'):
return self.dataset.provide_one_hot_labels(batch_size=batch_size)
def provide_data(self, batch_size):
"""Returns a batch of data and one-hot labels."""
with tf.name_scope('inputs'):
with tf.device('/cpu:0'):
dataset = self.dataset.provide_dataset()
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.map(self._map_fn, num_parallel_calls=4)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
data, one_hot_labels = iterator.get_next()
data.set_shape([batch_size, None, None, None])
one_hot_labels.set_shape([batch_size, None])
return data, one_hot_labels
class DataSTFTHelper(DataHelper):
"""A data helper for Linear Spectrograms."""
def make_specgrams_helper(self):
final_resolutions = train_util.make_resolution_schedule(
**self._config).final_resolutions
return SpecgramsHelper(
audio_length=self._config['audio_length'],
spec_shape=final_resolutions,
overlap=0.75,
sample_rate=self._config['sample_rate'],
mel_downscale=1,
ifreq=True)
def _map_fn(self, wave, one_hot_label): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
waves = wave[tf.newaxis, :, :]
data = self.waves_to_data(waves)
return data[0], one_hot_label
def data_to_waves(self, data):
return self.specgrams_helper.specgrams_to_waves(data)
def waves_to_data(self, waves):
return self.specgrams_helper.waves_to_specgrams(waves)
class DataWaveHelper(DataSTFTHelper):
"""A data helper for raw waveforms.
For compatibility with the spectral network architectues, we add a second
(redundant) channel and zero-pad along the time axis.
"""
def make_specgrams_helper(self):
return SpecgramsHelper(audio_length=64000,
spec_shape=(256, 512),
overlap=0.75,
sample_rate=self._config['sample_rate'],
mel_downscale=2)
def data_to_waves(self, data):
return data[:, 768:-768, 0, :1]
def waves_to_data(self, waves):
waves = waves[:, :, None, :]
pad = tf.zeros([tf.shape(waves)[0], 768, 1, 1])
waves = tf.concat([pad, waves, pad], axis=1)
return tf.concat([waves, waves], axis=3)
class DataSTFTNoIFreqHelper(DataHelper):
"""A data helper for Linear Spectrograms."""
def make_specgrams_helper(self):
final_resolutions = train_util.make_resolution_schedule(
**self._config).final_resolutions
return SpecgramsHelper(
audio_length=self._config['audio_length'],
spec_shape=final_resolutions,
overlap=0.75,
sample_rate=self._config['sample_rate'],
mel_downscale=1,
ifreq=False)
def _map_fn(self, wave, one_hot_label): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
waves = wave[tf.newaxis, :, :]
data = self.waves_to_data(waves)
return data[0], one_hot_label
def data_to_waves(self, data):
return self.specgrams_helper.specgrams_to_waves(data)
def waves_to_data(self, waves):
return self.specgrams_helper.waves_to_specgrams(waves)
class DataMelHelper(DataSTFTHelper):
"""A data helper for Mel Spectrograms."""
def data_to_waves(self, data):
return self.specgrams_helper.melspecgrams_to_waves(data)
def waves_to_data(self, waves):
return self.specgrams_helper.waves_to_melspecgrams(waves)
registry = {
'linear': DataSTFTHelper,
'phase': DataSTFTNoIFreqHelper,
'mel': DataMelHelper,
'wave': DataWaveHelper,
}
|
2,905 |
deprecated methods
|
# -*- coding: utf-8 -*-
#
# LinOTP - the open source solution for two factor authentication
# Copyright (C) 2010-2019 KeyIdentity GmbH
# Copyright (C) 2019- netgo software GmbH
#
# This file is part of LinOTP server.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License, version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-mail: [email protected]
# Contact: www.linotp.org
# Support: www.linotp.de
#
"""
This model contains the linotp processing logic
"""
import warnings
from functools import wraps
def render_calling_path(func):
"""
return the api path inc HTTP methods
- utility for sphimx rendering of api docs:
"""
module = func.__module__
module_name = module.rpartition(".")[-1]
func_name = func.__name__
try:
methods = ", ".join(func.methods)
except:
methods = "GET, POST"
return f"**{methods}** */{module_name}/{func_name}*\n "
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn(
"Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,
)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def METHOD_NAME(deprecated_methods_list):
"""
deprecated_methods - decorator function
mark linotp endpoints as deprecated when accessed with a http method
in the provided list, eg.
@deprecated_methods(['GET'])
def check()
1- A warning for the deprecation will be added to the docstring
2- A warning should be written in case that the 'check' endpoint is
accessed using a Http GET request. The warning log itself should be
implemented in the controllers before calling the method(in progress TODO)
Developer Note: the implementation is not completed: major shortcoming is that its
not possible to access the request method the function is called
with.
:param deprecated_methods_list: a list of methods that are deprecated for the
end point. E.g. ["GET"] or ["POST"] or ["GET", "POST"]
"""
def is_get_deprecated():
return "GET" in deprecated_methods_list
def is_post_deprecated():
return "POST" in deprecated_methods_list
def doc_pretext():
"""Helper function
This is the text that is gonna be prepended to the top of the docstring
"""
if is_get_deprecated():
doc_pretext = """
.. deprecated:: 3.2
Requests using HTTP **GET** method (because it is modifying data).
This endpoint will only be available via HTTP **POST** method in
the future.
"""
if is_post_deprecated():
doc_pretext = """
.. deprecated:: 3.2
Requests using HTTP **POST** method (because it is only reading data).
This endpoint will only be available via HTTP **GET** method in
the future.
"""
return doc_pretext
def doc_posttext():
"""Helper function: This is the text that is gonna be appended to the end of the docstring"""
if is_get_deprecated():
doc_posttext = """ """
if is_post_deprecated():
doc_posttext = doc_posttext = """ """
return doc_posttext
def get_conditional_deprecation_warnings(func_name):
"""Helper function: This is the message which is gonna be printed if the function is called
with the wrong call method. e.g. a POST method(deprecated GET) being called
by Get"""
conditional_deprecation_warnings = []
if is_get_deprecated():
warning_message = (
f"method: [{func_name}] should be called only by POST method"
)
conditional_deprecation_warnings.append(
{"condition_method": "GET", "warning_message": warning_message}
)
if is_post_deprecated():
warning_message = (
f"method: [{func_name}] should be called only by GET method"
)
conditional_deprecation_warnings.append(
{
"condition_method": "POST",
"warning_message": warning_message,
}
)
return conditional_deprecation_warnings
# the actuall decorator is here
def inner_func(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
# update the docstring of the function
wrapper.__doc__ = (
render_calling_path(func)
+ doc_pretext()
+ wrapper.__doc__
+ doc_posttext()
)
# Further implementation: set a flag to log a warning in case of being called by the wrong method
# wrapper.conditional_deprecation_warnings = (
# get_conditional_deprecation_warnings(func_name=wrapper.__name__)
# )
return wrapper
return inner_func
|
2,906 |
get success url
|
import logging
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import DeleteView, TemplateView
from zentral.contrib.inventory.forms import EnrollmentSecretForm
from zentral.contrib.osquery.forms import EnrollmentForm
from zentral.contrib.osquery.models import Configuration, Enrollment
logger = logging.getLogger('zentral.contrib.osquery.views.enrollments')
class CreateEnrollmentView(PermissionRequiredMixin, TemplateView):
permission_required = "osquery.add_enrollment"
template_name = "osquery/enrollment_form.html"
def dispatch(self, request, *args, **kwargs):
self.configuration = get_object_or_404(Configuration, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_forms(self):
secret_form_kwargs = {"prefix": "secret"}
enrollment_form_kwargs = {"configuration": self.configuration,
"initial": {"configuration": self.configuration}}
if self.request.method == "POST":
secret_form_kwargs["data"] = self.request.POST
enrollment_form_kwargs["data"] = self.request.POST
return (EnrollmentSecretForm(**secret_form_kwargs),
EnrollmentForm(**enrollment_form_kwargs))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["configuration"] = self.configuration
if "secret_form" not in kwargs or "enrollment_form" not in kwargs:
ctx["secret_form"], ctx["enrollment_form"] = self.get_forms()
return ctx
def forms_invalid(self, secret_form, enrollment_form):
return self.render_to_response(self.get_context_data(secret_form=secret_form,
enrollment_form=enrollment_form))
def forms_valid(self, secret_form, enrollment_form):
secret = secret_form.save()
secret_form.save_m2m()
enrollment = enrollment_form.save(commit=False)
enrollment.secret = secret
if self.configuration:
enrollment.configuration = self.configuration
enrollment.save()
return redirect(enrollment)
def post(self, request, *args, **kwargs):
secret_form, enrollment_form = self.get_forms()
if secret_form.is_valid() and enrollment_form.is_valid():
return self.forms_valid(secret_form, enrollment_form)
else:
return self.forms_invalid(secret_form, enrollment_form)
class DeleteEnrollmentView(PermissionRequiredMixin, DeleteView):
permission_required = "osquery.delete_enrollment"
def get_queryset(self):
return (Enrollment.objects.select_related("configuration")
.filter(configuration__pk=self.kwargs["configuration_pk"],
distributor_content_type__isnull=True,
distributor_pk__isnull=True))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["configuration"] = self.object.configuration
ctx["enrolled_machine_count"] = self.object.enrolledmachine_set.count()
return ctx
def METHOD_NAME(self):
return self.object.configuration.get_absolute_url()
class EnrollmentBumpVersionView(PermissionRequiredMixin, TemplateView):
permission_required = "osquery.change_enrollment"
template_name = "osquery/enrollment_confirm_version_bump.html"
def dispatch(self, request, *args, **kwargs):
self.enrollment = get_object_or_404(
Enrollment,
pk=kwargs["pk"],
configuration__pk=kwargs["configuration_pk"],
distributor_content_type__isnull=True,
distributor_pk__isnull=True,
)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["enrollment"] = self.enrollment
return ctx
def post(self, request, *args, **kwargs):
self.enrollment.save() # will bump the version
return redirect(self.enrollment)
|
2,907 |
get emma model from strudat
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.emma
# LIBTBX_SET_DISPATCHER_NAME iotbx.emma
from iotbx import crystal_symmetry_from_any
import iotbx.pdb
from iotbx.cns import sdb_reader
from iotbx.kriber import strudat
from iotbx.option_parser import option_parser
from cctbx import euclidean_model_matching as emma
import sys, os
import cctbx.xray
from six.moves import zip
class MultipleEntriesError(RuntimeError): pass
def get_emma_model_from_pdb(file_name=None,
pdb_records=None,
crystal_symmetry=None):
assert [file_name, pdb_records].count(None) == 1
if (pdb_records is None):
pdb_inp = iotbx.pdb.input(file_name=file_name)
else:
pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_records)
crystal_symmetry = pdb_inp.crystal_symmetry(
crystal_symmetry=crystal_symmetry,
weak_symmetry=True)
if (not crystal_symmetry or crystal_symmetry.unit_cell() is None):
raise RuntimeError("Unit cell parameters unknown for model %s." %(
file_name))
if (crystal_symmetry.space_group_info() is None):
raise RuntimeError("Space group unknown.")
positions = []
for atom in pdb_inp.atoms_with_labels():
positions.append(emma.position(
":".join([str(len(positions)+1),
atom.name, atom.resname, atom.chain_id]),
crystal_symmetry.unit_cell().fractionalize(atom.xyz)))
assert len(positions) > 0
result = emma.model(
crystal_symmetry.special_position_settings(),
positions)
if (file_name is not None):
result.label = file_name
return result
def get_emma_model_from_sdb(file_name, crystal_symmetry):
sdb_files = sdb_reader.multi_sdb_parser(open(file_name))
if (len(sdb_files) > 1):
raise MultipleEntriesError(
"SDB file %s may contain only one structure." % file_name)
assert len(sdb_files) == 1
sdb_file = sdb_files[0]
crystal_symmetry = crystal_symmetry.join_symmetry(
other_symmetry=sdb_file.crystal_symmetry(),
force=True)
positions = []
for i,site in enumerate(sdb_file.sites):
if (crystal_symmetry.unit_cell() is None):
raise RuntimeError("Unit cell parameters unknown.")
positions.append(emma.position(
":".join((str(i+1), site.segid, site.type)),
crystal_symmetry.unit_cell().fractionalize((site.x, site.y, site.z))))
assert len(positions) > 0
result = emma.model(
crystal_symmetry.special_position_settings(),
positions)
result.label = sdb_file.file_name
return result
def get_emma_model_from_solve(file_name, crystal_symmetry):
positions = []
for line in open(file_name):
flds = line.split()
if (len(flds) < 4 or flds[0].lower() != "xyz"): continue
site = [float(x) for x in flds[1:4]]
positions.append(emma.position("site"+str(len(positions)+1), site))
assert len(positions) > 0
result = emma.model(
crystal_symmetry.special_position_settings(),
positions)
result.label = file_name
return result
def get_emma_model_from_ins(file_name):
return cctbx.xray.structure.from_shelx(file=open(file_name)).as_emma_model()
def METHOD_NAME(file_name):
strudat_entries = strudat.read_all_entries(open(file_name))
if (len(strudat_entries.entries) > 1):
raise MultipleEntriesError(
"strudat file %s may contain only one structure." % file_name)
assert len(strudat_entries.entries) == 1
return strudat_entries.entries[0].as_xray_structure().as_emma_model()
def get_emma_model(file_name, crystal_symmetry):
if (not os.path.isfile(file_name)):
raise RuntimeError("File not found: %s" % file_name)
try:
return get_emma_model_from_pdb(
file_name=file_name,
crystal_symmetry=crystal_symmetry)
except KeyboardInterrupt: raise
except Exception:
if (iotbx.pdb.is_pdb_file(file_name)): raise
try:
return get_emma_model_from_sdb(
file_name=file_name,
crystal_symmetry=crystal_symmetry)
except MultipleEntriesError:
raise
except KeyboardInterrupt: raise
except Exception:
pass
try:
return get_emma_model_from_solve(
file_name=file_name,
crystal_symmetry=crystal_symmetry)
except KeyboardInterrupt: raise
except Exception:
pass
try:
return get_emma_model_from_ins(file_name=file_name)
except KeyboardInterrupt: raise
except Exception:
pass
try:
return METHOD_NAME(file_name=file_name)
except MultipleEntriesError:
raise
except KeyboardInterrupt: raise
except Exception:
pass
raise RuntimeError("Coordinate file %s: unknown format." % file_name)
def run(args, command_name="phenix.emma"):
command_line = (option_parser(
usage=command_name + " [options]"
+" reference_coordinates other_coordinates",
description="Example: %s model1.pdb model2.sdb" % command_name)
.enable_symmetry_comprehensive()
.option(None, "--output_pdb",
action="store",
type="str",
default="",
help="Output pdb: second model transformed to best match first model",
metavar="STR")
.option(None, "--tolerance",
action="store",
type="float",
default=3.,
help="match tolerance",
metavar="FLOAT")
.option(None, "--diffraction_index_equivalent",
action="store_true",
help="Use only if models are diffraction-index equivalent.")
).process(args=args, nargs=2)
crystal_symmetry = command_line.symmetry
if ( crystal_symmetry.unit_cell() is None
or crystal_symmetry.space_group_info() is None):
for file_name in command_line.args:
crystal_symmetry = crystal_symmetry.join_symmetry(
other_symmetry=crystal_symmetry_from_any.extract_from(
file_name=file_name),
force=False)
output_pdb = command_line.options.output_pdb
if output_pdb:
print("Output pdb:",output_pdb)
tolerance = command_line.options.tolerance
print("Tolerance:", tolerance)
if (tolerance <= 0.):
raise ValueError("Tolerance must be greater than zero.")
print()
diffraction_index_equivalent = \
command_line.options.diffraction_index_equivalent
if (diffraction_index_equivalent):
print("Models are diffraction index equivalent.")
print()
second_model_as_pdb_inp=None
emma_models = []
for file_name in command_line.args:
emma_models.append(get_emma_model(
file_name=file_name,
crystal_symmetry=crystal_symmetry))
if len(emma_models)==2 and os.path.isfile(file_name):
try:
second_model_as_pdb_inp=iotbx.pdb.input(
file_name=file_name)
except Exception as e:
pass
emma_models[0].show("Reference model")
emma_models[1].show("Other model")
for model,label in zip(emma_models, ["reference", "other"]):
if (model.unit_cell() is None):
raise RuntimeError("Unit cell parameters unknown (%s model)." % label)
if (model.space_group_info() is None):
raise RuntimeError("Space group unknown (%s model)." % label)
model_matches = emma.model_matches(
model1=emma_models[0],
model2=emma_models[1],
tolerance=tolerance,
models_are_diffraction_index_equivalent=diffraction_index_equivalent)
if (model_matches.n_matches() == 0):
print("No matches.")
print()
else:
max_n_pairs = None
first=True
for match in model_matches.refined_matches:
if (max_n_pairs is None or len(match.pairs) > max_n_pairs*0.2):
print("." * 79)
print()
match.show()
if first and output_pdb: # 2013-01-25 tt
if second_model_as_pdb_inp:
match.get_transformed_model2(output_pdb=output_pdb,
template_pdb_inp=second_model_as_pdb_inp,
f=sys.stdout)
else:
print("No output model as input model was not PDB")
first=False
if (max_n_pairs is None):
max_n_pairs = len(match.pairs)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
2,908 |
close
|
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import multiprocessing
class FakeAsyncResult(object):
def __init__(self, X):
self.X = X
def wait(self, *a):
pass
def get(self, *a):
return self.X
def ready(self):
return True
def successful(self):
return True
class funcwrapper(object):
def __init__(self, func):
self.func = func
def __call__(self, *X):
#print 'Trying to call', self.func
#print 'with args', X
try:
return self.func(*X)
except:
import traceback
print('Exception while calling your function:')
print(' params:', X)
print(' exception:')
traceback.print_exc()
raise
class memberfuncwrapper(object):
def __init__(self, obj, funcname):
self.obj = obj
self.funcname = funcname
def __call__(self, *X):
func = self.obj.getattr(self.funcname)
#print 'Trying to call', self.func
#print 'with args', X
try:
return func(self.obj, *X)
except:
import traceback
print('Exception while calling your function:')
print(' object:', self.obj)
print(' member function:', self.funcname)
print(' ', func)
print(' params:', X)
print(' exception:')
traceback.print_exc()
raise
class multiproc(object):
def __init__(self, nthreads=1, init=None, initargs=[],
map_chunksize=1, pool=None, wrap_all=False):
self.wrap_all = wrap_all
if pool is not None:
self.pool = pool
self.applyfunc = self.pool.apply_async
else:
if nthreads == 1:
self.pool = None
# self.map = map
self.applyfunc = lambda f,a,k: f(*a, **k)
if init is not None:
init(*initargs)
else:
self.pool = multiprocessing.Pool(nthreads, init, initargs)
# self.map = self.pool.map
self.applyfunc = self.pool.apply_async
self.async_results = []
self.map_chunksize = map_chunksize
def map(self, f, args, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool:
if wrap or self.wrap_all:
f = funcwrapper(f)
#print 'pool.map: f', f
#print 'args', args
#print 'cs', cs
return self.pool.map(f, args, cs)
return list(map(f, args))
def map_async(self, func, iterable, wrap=False):
if self.pool is None:
return FakeAsyncResult(map(func, iterable))
if wrap or self.wrap_all:
return self.pool.map_async(funcwrapper(func), iterable)
return self.pool.map_async(func, iterable)
def imap(self, func, iterable, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool is None:
import itertools
if 'imap' in dir(itertools):
# py2
return itertools.imap(func, iterable)
else:
# py3
return map(func, iterable)
if wrap or self.wrap_all:
func = funcwrapper(func)
return self.pool.imap(func, iterable, chunksize=cs)
def imap_unordered(self, func, iterable, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool is None:
import itertools
if 'imap' in dir(itertools):
# py2
return itertools.imap(func, iterable)
else:
# py3
return map(func, iterable)
if wrap or self.wrap_all:
func = funcwrapper(func)
return self.pool.imap_unordered(func, iterable, chunksize=cs)
def apply(self, f, args, wrap=False, kwargs={}):
if self.pool is None:
return FakeAsyncResult(f(*args, **kwargs))
if wrap:
f = funcwrapper(f)
res = self.applyfunc(f, args, kwargs)
self.async_results.append(res)
return res
def waitforall(self):
print('Waiting for async results to finish...')
for r in self.async_results:
print(' waiting for', r)
r.wait()
print('all done')
self.async_results = []
def METHOD_NAME(self):
if self.pool is not None:
self.pool.METHOD_NAME()
self.pool = None
|
2,909 |
test reuse
|
#!/usr/bin/env python
import threading
import time
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
class FakeTimelineTest(absltest.TestCase):
def testRunSingleSleep(self):
log = []
def foo():
while True:
log.append("foo")
time.sleep(10)
thread = threading.Thread(name="foo-thread", target=foo)
with test_lib.FakeTimeline(thread) as foo_timeline:
log.append("start")
foo_timeline.Run(duration=rdfvalue.Duration.From(5, rdfvalue.SECONDS))
log.append("5 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration.From(3, rdfvalue.SECONDS))
log.append("3 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration.From(4, rdfvalue.SECONDS))
log.append("4 seconds have passed")
foo_timeline.Run(duration=rdfvalue.Duration.From(22, rdfvalue.SECONDS))
log.append("22 seconds have passed")
self.assertEqual(log, [
"start",
"foo",
"5 seconds have passed",
"3 seconds have passed",
"foo",
"4 seconds have passed",
"foo",
"foo",
"22 seconds have passed",
])
def testRunMultiSleep(self):
log = []
def barz():
while True:
time.sleep(10)
log.append("bar")
time.sleep(5)
log.append("baz")
thread = threading.Thread(name="barz=thread", target=barz)
with test_lib.FakeTimeline(thread) as barz_timeline:
log.append("start")
barz_timeline.Run(duration=rdfvalue.Duration.From(5, rdfvalue.SECONDS))
log.append("5 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration.From(7, rdfvalue.SECONDS))
log.append("7 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration.From(1, rdfvalue.SECONDS))
log.append("1 second has passed")
barz_timeline.Run(duration=rdfvalue.Duration.From(3, rdfvalue.SECONDS))
log.append("3 seconds have passed")
barz_timeline.Run(duration=rdfvalue.Duration.From(20, rdfvalue.SECONDS))
log.append("20 seconds have passed")
self.assertEqual(log, [
"start",
"5 seconds have passed",
"bar",
"7 seconds have passed",
"1 second has passed",
"baz",
"3 seconds have passed",
"bar",
"baz",
"20 seconds have passed",
])
def testRunSleepZero(self):
log = []
def norf():
time.sleep(0)
log.append("norf")
time.sleep(0)
log.append("norf")
time.sleep(0)
log.append("norf")
thread = threading.Thread(name="norf-thread", target=norf)
with test_lib.FakeTimeline(thread) as norf_timeline:
log.append("start")
norf_timeline.Run(duration=rdfvalue.Duration.From(0, rdfvalue.SECONDS))
log.append("rest")
norf_timeline.Run(duration=rdfvalue.Duration.From(0, rdfvalue.SECONDS))
log.append("stop")
self.assertEqual(log, [
"start",
"norf",
"norf",
"norf",
"rest",
"stop",
])
def testRunException(self):
log = []
def quux():
time.sleep(10)
log.append("foo")
time.sleep(10)
raise Exception("bar")
thread = threading.Thread(name="quux-thread", target=quux)
with test_lib.FakeTimeline(thread) as quux_timeline:
log.append("start")
quux_timeline.Run(duration=rdfvalue.Duration.From(6, rdfvalue.SECONDS))
log.append("6 seconds have passed")
quux_timeline.Run(duration=rdfvalue.Duration.From(5, rdfvalue.SECONDS))
log.append("5 seconds have passed")
quux_timeline.Run(duration=rdfvalue.Duration.From(7, rdfvalue.SECONDS))
log.append("7 seconds have passed")
self.assertEqual(log, [
"start",
"6 seconds have passed",
"foo",
"5 seconds have passed",
"7 seconds have passed",
])
with self.assertRaisesRegex(Exception, "bar"):
quux_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
def testNoRuns(self):
log = []
def thud():
log.append("thud")
with test_lib.FakeTimeline(
threading.Thread(name="thud-thread", target=thud)):
pass
self.assertEqual(log, [])
def testRunAfterFinish(self):
log = []
def moof():
log.append("moof")
with test_lib.FakeTimeline(
threading.Thread(name="moof-thread", target=moof)) as moof_timeline:
moof_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
moof_timeline.Run(duration=rdfvalue.Duration.From(20, rdfvalue.SECONDS))
moof_timeline.Run(duration=rdfvalue.Duration.From(30, rdfvalue.SECONDS))
self.assertEqual(log, ["moof"])
def testRunWithoutContext(self):
weez_timeline = test_lib.FakeTimeline(
threading.Thread(name="weez-thread", target=lambda: None))
with self.assertRaisesRegex(AssertionError, "called without context"):
weez_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
def METHOD_NAME(self):
log = []
def blargh():
log.append("blargh")
blargh_timeline = test_lib.FakeTimeline(
threading.Thread(name="blargh-thread", target=blargh))
with blargh_timeline:
blargh_timeline.Run(duration=rdfvalue.Duration.From(5, rdfvalue.SECONDS))
self.assertEqual(log, ["blargh"])
with self.assertRaisesRegex(AssertionError, "cannot be reused"):
with blargh_timeline:
blargh_timeline.Run(
duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
def testTimePassage(self):
log = []
def fhesh():
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d"))
time.sleep(
rdfvalue.Duration.From(2,
rdfvalue.DAYS).ToFractional(rdfvalue.SECONDS))
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d"))
time.sleep(
rdfvalue.Duration.From(15, rdfvalue.SECONDS).ToFractional(
rdfvalue.SECONDS))
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d %H:%M:%S"))
time.sleep(
rdfvalue.Duration.From(20, rdfvalue.MINUTES).ToFractional(
rdfvalue.SECONDS))
log.append(rdfvalue.RDFDatetime.Now().Format("%Y-%m-%d %H:%M:%S"))
fhesh_timeline = test_lib.FakeTimeline(
thread=threading.Thread(name="fhesh-thread", target=fhesh),
now=rdfvalue.RDFDatetime.FromHumanReadable("2077-01-01"))
with fhesh_timeline:
log.append("beep (0)")
fhesh_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
log.append("beep (1)")
fhesh_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
log.append("beep (2)")
fhesh_timeline.Run(duration=rdfvalue.Duration.From(2, rdfvalue.DAYS))
log.append("beep (3)")
fhesh_timeline.Run(duration=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
log.append("beep (4)")
fhesh_timeline.Run(duration=rdfvalue.Duration.From(30, rdfvalue.MINUTES))
log.append("beep (5)")
self.assertEqual(log, [
"beep (0)",
"2077-01-01",
"beep (1)",
"beep (2)",
"2077-01-03",
"2077-01-03 00:00:15",
"beep (3)",
"beep (4)",
"2077-01-03 00:20:15",
"beep (5)",
])
if __name__ == "__main__":
absltest.main()
|
2,910 |
mongo data
|
import datetime
from syscore.exceptions import missingData
from syscore.constants import arg_not_supplied, success
from sysexecution.orders.named_order_objects import missing_order
from sysdata.mongodb.mongo_generic import mongoDataWithSingleKey
from sysexecution.orders.base_orders import Order
from sysexecution.orders.instrument_orders import instrumentOrder
from sysexecution.orders.contract_orders import contractOrder
from sysexecution.orders.broker_orders import brokerOrder
from syslogging.logger import *
from sysdata.production.historic_orders import (
genericOrdersData,
strategyHistoricOrdersData,
contractHistoricOrdersData,
brokerHistoricOrdersData,
)
from sysobjects.production.tradeable_object import (
instrumentStrategy,
futuresContractStrategy,
)
ORDER_ID_STORE_KEY = "_ORDER_ID_STORE_KEY"
class mongoGenericHistoricOrdersData(genericOrdersData):
"""
Read and write data class to get roll state data
"""
def _collection_name(self):
raise NotImplementedError("Need to inherit for a specific data type")
def _order_class(self):
raise NotImplementedError("Need to inherit for a specific data type")
def _name(self):
return "Historic orders"
def __init__(self, mongo_db=None, log=get_logger("mongoGenericHistoricOrdersData")):
# Not needed as we don't store anything in _state attribute used in parent class
# If we did have _state would risk breaking if we forgot to override methods
# super().__init__()
collection_name = self._collection_name()
self._mongo_data = mongoDataWithSingleKey(
collection_name, "order_id", mongo_db=mongo_db
)
super().__init__(log=log)
@property
def METHOD_NAME(self):
return self._mongo_data
def __repr__(self):
return "%s (%s)" % (self._name, str(self.METHOD_NAME))
def add_order_to_data(self, order: Order, ignore_duplication: bool = False):
# Duplicates will be overriden, so be careful
order_id = order.order_id
no_existing_order = self.get_order_with_orderid(order_id) is missing_order
if no_existing_order:
return self._add_order_to_data_no_checking(order)
else:
if ignore_duplication:
return self.update_order_with_orderid(order_id, order)
else:
raise Exception(
"Can't add order %s as order id %d already exists!"
% (str(order), order_id)
)
def _add_order_to_data_no_checking(self, order: Order):
# Duplicates will be overriden, so be careful
mongo_record = order.as_dict()
self.METHOD_NAME.add_data(order.order_id, mongo_record, allow_overwrite=True)
def get_order_with_orderid(self, order_id: int):
try:
result_dict = self.METHOD_NAME.get_result_dict_for_key(order_id)
except missingData:
return missing_order
order_class = self._order_class()
order = order_class.from_dict(result_dict)
return order
def _delete_order_with_orderid_without_checking(self, order_id):
self.METHOD_NAME.delete_data_without_any_warning(order_id)
def update_order_with_orderid(self, order_id, order):
mongo_record = order.as_dict()
self.METHOD_NAME.add_data(order_id, mongo_record)
def get_list_of_order_ids(self) -> list:
order_ids = self.METHOD_NAME.get_list_of_keys()
return order_ids
def get_list_of_order_ids_in_date_range(
self,
period_start: datetime.datetime,
period_end: datetime.datetime = arg_not_supplied,
) -> list:
if period_end is arg_not_supplied:
period_end = datetime.datetime.now()
find_dict = dict(fill_datetime={"$gte": period_start, "$lt": period_end})
list_of_order_dicts = self.METHOD_NAME.get_list_of_result_dict_for_custom_dict(
find_dict
)
order_ids = [order_dict["order_id"] for order_dict in list_of_order_dicts]
return order_ids
class mongoStrategyHistoricOrdersData(
mongoGenericHistoricOrdersData, strategyHistoricOrdersData
):
def _collection_name(self):
return "_STRATEGY_HISTORIC_ORDERS"
def _order_class(self):
return instrumentOrder
def _name(self):
return "Historic instrument/strategy orders"
def get_list_of_order_ids_for_instrument_strategy(
self, instrument_strategy: instrumentStrategy
) -> list:
old_list_of_order_id = (
self._get_list_of_order_ids_for_instrument_strategy_specify_key(
instrument_strategy, "old_key"
)
)
new_list_of_order_id = (
self._get_list_of_order_ids_for_instrument_strategy_specify_key(
instrument_strategy, "key"
)
)
return old_list_of_order_id + new_list_of_order_id
def _get_list_of_order_ids_for_instrument_strategy_specify_key(
self, instrument_strategy: instrumentStrategy, keyfield: str
) -> list:
object_key = getattr(instrument_strategy, keyfield)
custom_dict = dict(key=object_key)
list_of_result_dicts = self.METHOD_NAME.get_list_of_result_dict_for_custom_dict(
custom_dict
)
list_of_order_id = [result["order_id"] for result in list_of_result_dicts]
return list_of_order_id
class mongoContractHistoricOrdersData(
mongoGenericHistoricOrdersData, contractHistoricOrdersData
):
def _collection_name(self):
return "_CONTRACT_HISTORIC_ORDERS"
def _order_class(self):
return contractOrder
def _name(self):
return "Historic contract orders"
class mongoBrokerHistoricOrdersData(
mongoGenericHistoricOrdersData, brokerHistoricOrdersData
):
def _collection_name(self):
return "_BROKER_HISTORIC_ORDERS"
def _order_class(self):
return brokerOrder
def _name(self):
return "Historic broker orders"
def get_list_of_order_ids_for_instrument_and_contract_str(
self, instrument_code: str, contract_str: str
) -> list:
order_id_list = self.get_list_of_order_ids()
key_list = [
self.METHOD_NAME.get_result_dict_for_key(order_id)["key"]
for order_id in order_id_list
]
contract_strategies = [
futuresContractStrategy.from_key(key) for key in key_list
]
def _contains_both(
futures_contract_strategy: futuresContractStrategy,
instrument_code: str,
contract_str: str,
):
list_of_date_str = futures_contract_strategy.contract_date.list_of_date_str
if (
futures_contract_strategy.instrument_code == instrument_code
and contract_str in list_of_date_str
):
return True
else:
return False
order_ids = [
orderid
for orderid, futures_contract_strategy in zip(
order_id_list, contract_strategies
)
if _contains_both(
futures_contract_strategy,
instrument_code=instrument_code,
contract_str=contract_str,
)
]
return order_ids
|
2,911 |
atomic symlink
|
#!/usr/bin/env python3
# This script lets you update a hierarchy of files in an atomic way by
# first creating a new hierarchy using rsync's --link-dest option, and
# then swapping the hierarchy into place. **See the usage message for
# more details and some important caveats!**
import os, sys, re, subprocess, shutil
ALT_DEST_ARG_RE = re.compile('^--[a-z][^ =]+-dest(=|$)')
RSYNC_PROG = '/usr/bin/rsync'
def main():
cmd_args = sys.argv[1:]
if '--help' in cmd_args:
usage_and_exit()
if len(cmd_args) < 2:
usage_and_exit(True)
dest_dir = cmd_args[-1].rstrip('/')
if dest_dir == '' or dest_dir.startswith('-'):
usage_and_exit(True)
if not os.path.isdir(dest_dir):
die(dest_dir, "is not a directory or a symlink to a dir.\nUse --help for help.")
bad_args = [ arg for arg in cmd_args if ALT_DEST_ARG_RE.match(arg) ]
if bad_args:
die("You cannot use the", ' or '.join(bad_args), "option with atomic-rsync.\nUse --help for help.")
# We ignore exit-code 24 (file vanished) by default.
allowed_exit_codes = '0 ' + os.environ.get('ATOMIC_RSYNC_OK_CODES', '24')
try:
allowed_exit_codes = set(int(num) for num in re.split(r'[, ]+', allowed_exit_codes) if num != '')
except ValueError:
die('Invalid integer in ATOMIC_RSYNC_OK_CODES:', allowed_exit_codes[2:])
symlink_content = os.readlink(dest_dir) if os.path.islink(dest_dir) else None
dest_arg = dest_dir
dest_dir = os.path.realpath(dest_dir) # The real destination dir with all symlinks dereferenced
if dest_dir == '/':
die('You must not use "/" as the destination directory.\nUse --help for help.')
old_dir = new_dir = None
if symlink_content is not None and dest_dir.endswith(('-1','-2')):
if not symlink_content.endswith(dest_dir[-2:]):
die("Symlink suffix out of sync with dest_dir name:", symlink_content, 'vs', dest_dir)
num = 3 - int(dest_dir[-1]);
old_dir = None
new_dir = dest_dir[:-1] + str(num)
symlink_content = symlink_content[:-1] + str(num)
else:
old_dir = dest_dir + '~old~'
new_dir = dest_dir + '~new~'
cmd_args[-1] = new_dir + '/'
if old_dir is not None and os.path.isdir(old_dir):
shutil.rmtree(old_dir)
if os.path.isdir(new_dir):
shutil.rmtree(new_dir)
child = subprocess.run([RSYNC_PROG, '--link-dest=' + dest_dir, *cmd_args])
if child.returncode not in allowed_exit_codes:
die('The rsync copy failed with code', child.returncode, exitcode=child.returncode)
if not os.path.isdir(new_dir):
die('The rsync copy failed to create:', new_dir)
if old_dir is None:
METHOD_NAME(symlink_content, dest_arg)
else:
os.rename(dest_dir, old_dir)
os.rename(new_dir, dest_dir)
def METHOD_NAME(target, link):
newlink = link + "~new~"
try:
os.unlink(newlink); # Just in case
except OSError:
pass
os.symlink(target, newlink)
os.rename(newlink, link)
def usage_and_exit(use_stderr=False):
usage_msg = """\
Usage: atomic-rsync [RSYNC-OPTIONS] [HOST:]/SOURCE/DIR/ /DEST/DIR/
atomic-rsync [RSYNC-OPTIONS] HOST::MOD/DIR/ /DEST/DIR/
This script lets you update a hierarchy of files in an atomic way by first
creating a new hierarchy (using hard-links to leverage the existing files),
and then swapping the new hierarchy into place. You must be pulling files
to a local directory, and that directory must already exist. For example:
mkdir /local/files-1
ln -s files-1 /local/files
atomic-rsync -aiv host:/remote/files/ /local/files/
If /local/files is a symlink to a directory that ends in -1 or -2, the copy
will go to the alternate suffix and the symlink will be changed to point to
the new dir. This is a fully atomic update. If the destination is not a
symlink (or not a symlink to a *-1 or a *-2 directory), this will instead
create a directory with "~new~" suffixed, move the current directory to a
name with "~old~" suffixed, and then move the ~new~ directory to the original
destination name (this double rename is not fully atomic, but is rapid). In
both cases, the prior destintaion directory will be preserved until the next
update, at which point it will be deleted.
By default, rsync exit-code 24 (file vanished) is allowed without halting the
atomic update. If you want to change that, specify the environment variable
ATOMIC_RSYNC_OK_CODES with numeric values separated by spaces and/or commas.
Specify an empty string to only allow a successful copy. An override example:
ATOMIC_RSYNC_OK_CODES='23 24' atomic-rsync -aiv host:src/ dest/
See the errcode.h file for a list of all the exit codes.
See the "rsync" command for its list of options. You may not use the
--link-dest, --compare-dest, or --copy-dest options (since this script
uses --link-dest to make the transfer efficient).
"""
print(usage_msg, file=sys.stderr if use_stderr else sys.stdout)
sys.exit(1 if use_stderr else 0)
def die(*args, exitcode=1):
print(*args, file=sys.stderr)
sys.exit(exitcode)
if __name__ == '__main__':
main()
# vim: sw=4 et
|
2,912 |
test set multiple via indirect
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
import pytest
from pyNN.space import Sphere, RandomStructure
import pyNN.spiNNaker as sim
from spinnaker_testbase import BaseTestCase
class TestPopulation(BaseTestCase):
# NO unittest_setup() as sim.setup is called
def test_properties(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label)
self.assertEqual(n_neurons, pop_1.size)
self.assertEqual(label, pop_1.label)
self.assertEqual(sim.IF_curr_exp, type(pop_1.celltype))
v_init = -60
pop_1.initialize(v=v_init)
initial_values = pop_1.initial_values
vs = initial_values["v"]
assert [-60, -60, -60, -60, -60] == vs
v_init = [-60 + index for index in range(n_neurons)]
pop_1.initialize(v=v_init)
initial_values = pop_1.initial_values
vs = initial_values["v"]
assert [-60, -59, -58, -57, -56] == vs
_ = pop_1.all_cells
_ = pop_1.local_cells
self.assertEqual(n_neurons, pop_1.local_size)
_ = pop_1.structure
sim.end()
def test_position_generator(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label,
structure=RandomStructure(Sphere(5.0)))
try:
gen = pop_1.position_generator
print(gen(0))
except NotImplementedError as e:
msg = "Depends on https://github.com/SpiNNakerManchester" \
"/sPyNNaker8/pull/73"
raise SkipTest(msg) from e
sim.end()
def test_set(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label)
pop_1.set(i_offset=2)
sim.end()
def test_set_multiple(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label)
pop_1.set(i_offset=[2, 3, 4, 5, 6])
sim.end()
def METHOD_NAME(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(
n_neurons, sim.IF_curr_exp(i_offset=0), label=label)
view = pop_1[0:3]
view.set(i_offset=[2, 3, 4])
sim.end()
def test_selector(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label)
pop_1.set(tau_m=2)
values = pop_1.get("tau_m")
self.assertEqual([2, 2, 2, 2, 2], values)
values = pop_1[1:3].get("tau_m")
self.assertEqual([2, 2], values)
pop_1[1:3].set(tau_m=3)
values = pop_1.get("tau_m")
self.assertEqual([2, 3, 3, 2, 2], values)
values = pop_1.get(["cm", "v_thresh"])
self.assertEqual([1.0, 1.0, 1.0, 1.0, 1.0], values['cm'])
self.assertEqual(
[-50.0, -50.0, -50.0, -50.0, -50.0], values["v_thresh"])
values = pop_1[1, 3, 4].get(["cm", "v_thresh"])
self.assertEqual([1.0, 1.0, 1.0], values['cm'])
self.assertEqual(
[-50.0, -50.0, -50.0], values["v_thresh"])
sim.end()
def test_init_by_in(self):
sim.setup(timestep=1.0)
pop = sim.Population(4, sim.IF_curr_exp())
assert [-65.0, -65.0, -65.0, -65.0] == pop.initial_values["v"]
pop[1:2].initialize(v=-60)
assert [-65, -60, -65, -65] == pop.initial_values["v"]
pop[2:3].initialize(v=12)
assert -60 == pop[1].get_initial_value("v")
sim.end()
def test_init_bad(self):
sim.setup(timestep=1.0)
pop = sim.Population(4, sim.IF_curr_exp())
with pytest.raises(Exception):
pop.set_initial_value(variable="NOT_THERE", value="Anything")
with pytest.raises(Exception):
pop.get_initial_value(variable="NOT_THERE")
sim.end()
def test_no_init(self):
sim.setup(timestep=1.0)
pop = sim.Population(4, sim.SpikeSourceArray())
with pytest.raises(KeyError):
pop.initialize(v="Anything")
with pytest.raises(KeyError):
_ = pop.initial_values
sim.end()
def test_initial_values(self):
sim.setup(timestep=1.0)
pop = sim.Population.create(
cellclass=sim.IF_curr_exp, cellparams=None, n=4)
initial_values = pop.initial_values
assert "v" in initial_values
initial_values = pop[3:4].initial_values
assert {"v": [-65], "isyn_exc": [0], "isyn_inh": [0]} == initial_values
sim.end()
def test_iter(self):
sim.setup(timestep=1.0)
pop = sim.Population(4, sim.IF_curr_exp(), label="a label")
iterator = iter(pop)
self.assertEqual(0, next(iterator).id)
self.assertEqual(1, next(iterator).id)
self.assertEqual(2, next(iterator).id)
self.assertEqual(3, next(iterator).id)
with pytest.raises(StopIteration):
next(iterator)
iterator = pop.all()
self.assertEqual(0, next(iterator).id)
self.assertEqual(1, next(iterator).id)
self.assertEqual(2, next(iterator).id)
self.assertEqual(3, next(iterator).id)
with pytest.raises(StopIteration):
next(iterator)
sim.end()
def test_base(self):
n_neurons = 5
label = "pop_1"
sim.setup(timestep=1.0)
pop_1 = sim.Population(n_neurons, sim.IF_curr_exp(), label=label)
assert n_neurons == pop_1.local_size
|
2,913 |
test custom evenv nested router
|
import pytest
from aiogram.dispatcher.event.bases import UNHANDLED, SkipHandler, skip
from aiogram.dispatcher.event.telegram import TelegramEventObserver
from aiogram.dispatcher.router import Router
class TestRouter:
def test_including_routers(self):
router1 = Router()
router2 = Router()
router3 = Router()
assert router1.parent_router is None
assert router2.parent_router is None
assert router3.parent_router is None
with pytest.raises(RuntimeError, match="Self-referencing routers is not allowed"):
router1.include_router(router1)
router1.include_router(router2)
with pytest.raises(RuntimeError, match="Router is already attached"):
router1.include_router(router2)
router2.include_router(router3)
with pytest.raises(RuntimeError, match="Circular referencing of Router is not allowed"):
router3.include_router(router1)
assert router1.parent_router is None
assert router1.sub_routers == [router2]
assert router2.parent_router is router1
assert router2.sub_routers == [router3]
assert router3.parent_router is router2
assert router3.sub_routers == []
def test_including_many_routers(self):
router = Router()
router1 = Router()
router2 = Router()
router.include_routers(router1, router2)
assert router.sub_routers == [router1, router2]
def test_including_many_routers_bad_type(self):
router = Router()
with pytest.raises(ValueError, match="At least one router must be provided"):
router.include_routers()
def test_include_router_by_string_bad_type(self):
router = Router()
with pytest.raises(ValueError, match=r"router should be instance of Router"):
router.include_router(self)
def test_set_parent_router_bad_type(self):
router = Router()
with pytest.raises(ValueError, match=r"router should be instance of Router"):
router.parent_router = object()
def test_observers_config(self):
router = Router()
assert router.observers["message"] == router.message
assert router.observers["edited_message"] == router.edited_message
assert router.observers["channel_post"] == router.channel_post
assert router.observers["edited_channel_post"] == router.edited_channel_post
assert router.observers["inline_query"] == router.inline_query
assert router.observers["chosen_inline_result"] == router.chosen_inline_result
assert router.observers["callback_query"] == router.callback_query
assert router.observers["shipping_query"] == router.shipping_query
assert router.observers["pre_checkout_query"] == router.pre_checkout_query
assert router.observers["poll"] == router.poll
async def test_emit_startup(self):
router1 = Router()
router2 = Router()
router1.include_router(router2)
results = []
@router1.startup()
async def startup1():
results.append(1)
@router2.startup()
async def startup2():
results.append(2)
await router2.emit_startup()
assert results == [2]
await router1.emit_startup()
assert results == [2, 1, 2]
async def test_emit_shutdown(self):
router1 = Router()
router2 = Router()
router1.include_router(router2)
results = []
@router1.shutdown()
async def shutdown1():
results.append(1)
@router2.shutdown()
async def shutdown2():
results.append(2)
await router2.emit_shutdown()
assert results == [2]
await router1.emit_shutdown()
assert results == [2, 1, 2]
def test_skip(self):
with pytest.raises(SkipHandler):
skip()
with pytest.raises(SkipHandler, match="KABOOM"):
skip("KABOOM")
async def test_global_filter_in_nested_router(self):
r1 = Router()
r2 = Router()
async def handler(evt):
return evt
r1.include_router(r2)
r1.message.filter(lambda evt: False)
r2.message.register(handler)
assert await r1.propagate_event(update_type="message", event=None) is UNHANDLED
async def test_router_chain_tail(self):
r1 = Router(name="Router 1")
r2_1 = Router(name="Router 2-1")
r2_2 = Router(name="Router 2-2")
r3 = Router(name="Router 3")
r1.include_router(r2_1)
r1.include_router(r2_2)
r2_1.include_router(r3)
assert tuple(r1.chain_tail) == (r1, r2_1, r3, r2_2)
assert tuple(r2_1.chain_tail) == (r2_1, r3)
assert tuple(r2_2.chain_tail) == (r2_2,)
assert tuple(r3.chain_tail) == (r3,)
async def test_router_chain_head(self):
r1 = Router(name="Router 1")
r2_1 = Router(name="Router 2-1")
r2_2 = Router(name="Router 2-2")
r3 = Router(name="Router 3")
r1.include_router(r2_1)
r1.include_router(r2_2)
r2_1.include_router(r3)
assert tuple(r1.chain_head) == (r1,)
assert tuple(r2_1.chain_head) == (r2_1, r1)
assert tuple(r2_2.chain_head) == (r2_2, r1)
assert tuple(r3.chain_head) == (r3, r2_1, r1)
async def METHOD_NAME(self):
r1 = Router()
r2 = Router()
r3 = Router()
r3.observers["custom-event"] = TelegramEventObserver(r3, event_name="custom-event")
async def handler(evt):
return evt
r1.include_router(r2)
r1.include_router(r3)
r3.observers["custom-event"].register(handler)
assert await r1.propagate_event(update_type="custom-event", event=None) is None
assert await r2.propagate_event(update_type="custom-event", event=None) is UNHANDLED
assert await r3.propagate_event(update_type="custom-event", event=None) is None
|
2,914 |
test jobs view expected job
|
#
# Copyright 2019-2023 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service jobs view tests."""
import copy
import time
import uuid
from datetime import datetime
import jwt
import pytest
from marshmallow.utils import isoformat
from werkzeug.utils import secure_filename
from renku.ui.service.cache.models.project import Project
from renku.ui.service.errors import UserAnonymousError
from renku.ui.service.serializers.headers import JWT_TOKEN_SECRET, encode_b64
@pytest.mark.service
def test_jobs_view_identity_protected(svc_client):
"""Check serving of user requested jobs."""
headers = {
"Content-Type": "application/json",
"accept": "application/json",
}
response = svc_client.get("/jobs", headers=headers)
assert {"error"} == set(response.json.keys())
assert UserAnonymousError.code == response.json["error"]["code"]
@pytest.mark.service
def test_jobs_view_empty_result(svc_client, identity_headers):
"""Check empty result for user requested jobs."""
response = svc_client.get("/jobs", headers=identity_headers)
assert {"result"} == set(response.json.keys())
assert [] == response.json["result"]["jobs"]
@pytest.mark.service
def METHOD_NAME(svc_client_cache):
"""Check non-empty result for user requested job."""
svc_client, headers, cache = svc_client_cache
user_id = encode_b64(secure_filename("9ab2fc80-3a5c-426d-ae78-56de01d214df"))
user = cache.ensure_user({"user_id": user_id})
job_data = {
"job_id": uuid.uuid4().hex,
"state": "CREATED",
"renku_op": "dataset_import",
"created_at": isoformat(datetime.now()),
"updated_at": isoformat(datetime.now()),
"extras": {"progress": 42},
}
project = Project(
project_id="123", user_id=user.user_id, owner="renkumeister", name="testproject", slug="testproject"
)
project.abs_path.mkdir(parents=True, exist_ok=True)
project.save()
job = cache.make_job(user, project=project, job_data=job_data)
response = svc_client.get("/jobs", headers=headers)
assert 1 == len(response.json["result"]["jobs"])
assert {"job_id", "state", "created_at", "updated_at", "extras", "client_extras", "renku_op", "project"} == set(
response.json["result"]["jobs"][0].keys()
)
cache.invalidate_job(user, job.job_id)
response = svc_client.get("/jobs", headers=headers)
assert 0 == len(response.json["result"]["jobs"])
@pytest.mark.service
def test_jobs_view_check_exclusion(svc_client_cache):
"""Check non-empty result for user requested jobs."""
svc_client, headers, cache = svc_client_cache
user_id = encode_b64(secure_filename("9ab2fc80-3a5c-426d-ae78-56de01d214df"))
user = cache.ensure_user({"user_id": user_id})
excluded_user = cache.ensure_user({"user_id": "excluded_user"})
for _ in range(10):
job_data = {
"job_id": uuid.uuid4().hex,
"state": "CREATED",
"created_at": isoformat(datetime.utcnow()),
"updated_at": isoformat(datetime.utcnow()),
"extras": {"progress": 42},
"renku_op": "dataset_import",
}
project = Project(
project_id="123", user_id=user.user_id, owner="renkumeister", name="testproject", slug="testproject"
)
project.abs_path.mkdir(parents=True, exist_ok=True)
project.save()
job1 = cache.make_job(user, project=project, job_data=job_data)
assert job1
new_job = copy.deepcopy(job_data)
new_job["job_id"] = uuid.uuid4().hex
job2 = cache.make_job(excluded_user, project=project, job_data=new_job)
assert job2
assert job1.job_id != job2.job_id
response = svc_client.get("/jobs", headers=headers)
assert {"result"} == set(response.json.keys())
assert 10 == len(response.json["result"]["jobs"])
for job in response.json["result"]["jobs"]:
assert {"job_id", "state", "created_at", "updated_at", "extras", "client_extras", "renku_op", "project"} == set(
job.keys()
)
@pytest.mark.service
def test_job_details_auth(svc_client):
"""Check authorization for listing a specific job."""
headers = {
"Content-Type": "application/json",
"accept": "application/json",
}
response = svc_client.get("/jobs/myjob", headers=headers)
assert {"error"} == set(response.json.keys())
assert UserAnonymousError.code == response.json["error"]["code"]
@pytest.mark.service
def test_job_details_empty(svc_client, identity_headers):
"""Check job details for a user."""
response = svc_client.get("/jobs/myjob", headers=identity_headers)
assert {"result"} == set(response.json.keys())
assert response.json["result"] is None
@pytest.mark.service
def test_job_details_by_user(svc_client_with_user):
"""Check job details for a user."""
svc_client, headers, cache, user = svc_client_with_user
jobs = [
{
"job_id": uuid.uuid4().hex,
"state": "CREATED",
"created_at": isoformat(datetime.now()),
"updated_at": isoformat(datetime.now()),
"extras": {"progress": 42},
"renku_op": "dataset_import",
}
for _ in range(10)
]
project = Project(
project_id="123", user_id=user.user_id, owner="renkumeister", name="testproject", slug="testproject"
)
project.abs_path.mkdir(parents=True, exist_ok=True)
project.save()
for job_data in jobs:
cache.make_job(user, job_data=job_data, project=project)
jwt_data = {
"jti": "12345",
"exp": int(time.time()) + 1e6,
"nbf": 0,
"iat": 1595317694,
"iss": "https://stable.dev.renku.ch/auth/realms/Renku",
"aud": ["renku"],
"sub": "12345",
"typ": "ID",
"azp": "renku",
"nonce": "12345",
"auth_time": 1595317694,
"session_state": "12345",
"acr": "1",
"email_verified": False,
"preferred_username": "[email protected]",
"given_name": "user",
"family_name": "user one",
"name": "User One",
"email": "[email protected]",
}
excluded_user_headers = {
"Content-Type": "application/json",
"Renku-User": jwt.encode(jwt_data, JWT_TOKEN_SECRET, algorithm="HS256"),
"Authorization": headers["Authorization"],
}
for job in jobs:
response = svc_client.get("/jobs/{}".format(job["job_id"]), headers=headers)
assert response
assert job["job_id"] == response.json["result"]["job_id"]
response = svc_client.get("/jobs/{}".format(job["job_id"]), headers=excluded_user_headers)
assert response.json["result"] is None
|
2,915 |
count
|
# -*- coding: utf-8 -*-
"""
The /api/v1/attachments API implementation.
"""
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import Http404
from django.utils.translation import gettext as _
from rest_framework import renderers, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from onadata.apps.api.permissions import AttachmentObjectPermissions
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.xform import XForm
from onadata.libs import filters
from onadata.libs.mixins.authenticate_header_mixin import AuthenticateHeaderMixin
from onadata.libs.mixins.cache_control_mixin import CacheControlMixin
from onadata.libs.mixins.etags_mixin import ETagsMixin
from onadata.libs.pagination import StandardPageNumberPagination
from onadata.libs.renderers.renderers import (
MediaFileContentNegotiation,
MediaFileRenderer,
)
from onadata.libs.serializers.attachment_serializer import AttachmentSerializer
from onadata.libs.utils.image_tools import image_url
from onadata.libs.utils.viewer_tools import get_path
def get_attachment_data(attachment, suffix):
"""Returns attachment file contents."""
if suffix in list(settings.THUMB_CONF):
image_url(attachment, suffix)
suffix = settings.THUMB_CONF.get(suffix).get("suffix")
f = default_storage.open(get_path(attachment.media_file.name, suffix))
return f.read()
return attachment.media_file.read()
# pylint: disable=too-many-ancestors
class AttachmentViewSet(
AuthenticateHeaderMixin,
CacheControlMixin,
ETagsMixin,
viewsets.ReadOnlyModelViewSet,
):
"""
GET, List attachments implementation.
"""
content_negotiation_class = MediaFileContentNegotiation
filter_backends = (filters.AttachmentFilter, filters.AttachmentTypeFilter)
lookup_field = "pk"
queryset = Attachment.objects.filter(
instance__deleted_at__isnull=True, deleted_at__isnull=True
)
permission_classes = (AttachmentObjectPermissions,)
serializer_class = AttachmentSerializer
pagination_class = StandardPageNumberPagination
renderer_classes = (
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
MediaFileRenderer,
)
def retrieve(self, request, *args, **kwargs):
# pylint: disable=attribute-defined-outside-init
self.object = self.get_object()
if (
isinstance(request.accepted_renderer, MediaFileRenderer)
and self.object.media_file is not None
):
suffix = request.query_params.get("suffix")
try:
data = get_attachment_data(self.object, suffix)
except IOError as e:
if str(e).startswith("File does not exist"):
raise Http404() from e
raise ParseError(e) from e
else:
return Response(data, content_type=self.object.mimetype)
filename = request.query_params.get("filename")
serializer = self.get_serializer(self.object)
if filename:
if filename == self.object.media_file.name:
return Response(serializer.get_download_url(self.object))
raise Http404(_(f"Filename '{filename}' not found."))
return Response(serializer.data)
@action(methods=["GET"], detail=False)
def METHOD_NAME(self, request, *args, **kwargs):
"""Returns the number of attachments the user has access to."""
data = {"count": self.filter_queryset(self.get_queryset()).METHOD_NAME()}
return Response(data=data)
def list(self, request, *args, **kwargs):
if request.user.is_anonymous:
xform = request.query_params.get("xform")
if xform:
xform = XForm.objects.get(id=xform)
if not xform.shared_data:
raise Http404(_("Not Found"))
# pylint: disable=attribute-defined-outside-init
self.object_list = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_serializer(page, many=True)
return Response(serializer.data)
return super().list(request, *args, **kwargs)
|
2,916 |
ant iter
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2011 (ita)
"""
Common mistakes highlighting.
There is a performance impact, so this tool is only loaded when running ``waf -v``
"""
typos = {
'feature':'features',
'sources':'source',
'targets':'target',
'include':'includes',
'export_include':'export_includes',
'define':'defines',
'importpath':'includes',
'installpath':'install_path',
'iscopy':'is_copy',
'uses':'use',
}
meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects']
import sys
from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils
from waflib.Tools import ccroot
def check_same_targets(self):
mp = Utils.defaultdict(list)
uids = {}
def check_task(tsk):
if not isinstance(tsk, Task.Task):
return
if hasattr(tsk, 'no_errcheck_out'):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except KeyError:
uids[tsk.uid()] = [tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
# raised if not a task generator, which should be uncommon
check_task(tg)
dupe = False
for (k, v) in mp.items():
if len(v) > 1:
dupe = True
msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "")
Logs.error(msg)
for x in v:
if Logs.verbose > 1:
Logs.error(' %d. %r', 1 + v.index(x), x.generator)
else:
Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None))
Logs.error('If you think that this is an error, set no_errcheck_out on the task instance')
if not dupe:
for (k, v) in uids.items():
if len(v) > 1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid')
tg_details = tsk.generator.name
if Logs.verbose > 2:
tg_details = tsk.generator
for tsk in v:
Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details)
def check_invalid_constraints(self):
feat = set()
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for (x, y) in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext = set()
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid = ext & feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid))
# the build scripts have been read, so we can check for invalid after/before attributes on task classes
for cls in list(Task.classes.values()):
if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str):
raise Errors.WafError('Class %r has hcode value %r of type <str>, expecting <bytes> (use Utils.h_cmd() ?)' % (cls, cls.hcode))
for x in ('before', 'after'):
for y in Utils.to_list(getattr(cls, x, [])):
if not Task.classes.get(y):
Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__)
if getattr(cls, 'rule', None):
Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__)
def replace(m):
"""
Replaces existing BuildContext methods to verify parameter names,
for example ``bld(source=)`` has no ending *s*
"""
oldcall = getattr(Build.BuildContext, m)
def call(self, *k, **kw):
ret = oldcall(self, *k, **kw)
for x in typos:
if x in kw:
if x == 'iscopy' and 'subst' in getattr(self, 'features', ''):
continue
Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret)
return ret
setattr(Build.BuildContext, m, call)
def enhance_lib():
"""
Modifies existing classes and methods to enable error verification
"""
for m in meths_typos:
replace(m)
# catch '..' in ant_glob patterns
def ant_glob(self, *k, **kw):
if k:
lst = Utils.to_list(k[0])
for pat in lst:
sp = pat.split('/')
if '..' in sp:
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0])
if '.' in sp:
Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0])
return self.old_ant_glob(*k, **kw)
Node.Node.old_ant_glob = Node.Node.ant_glob
Node.Node.ant_glob = ant_glob
# catch ant_glob on build folders
def METHOD_NAME(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False):
if remove:
try:
if self.is_child_of(self.ctx.bldnode) and not quiet:
quiet = True
Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self)
except AttributeError:
pass
return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet)
Node.Node.old_ant_iter = Node.Node.METHOD_NAME
Node.Node.METHOD_NAME = METHOD_NAME
# catch conflicting ext_in/ext_out/before/after declarations
old = Task.is_before
def is_before(t1, t2):
ret = old(t1, t2)
if ret and old(t2, t1):
Logs.error('Contradictory order constraints in classes %r %r', t1, t2)
return ret
Task.is_before = is_before
# check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose
# so we only issue a warning
def check_err_features(self):
lst = self.to_list(self.features)
if 'shlib' in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib')
for x in ('c', 'cxx', 'd', 'fc'):
if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]:
Logs.error('%r features is probably missing %r', self, x)
TaskGen.feature('*')(check_err_features)
# check for erroneous order constraints
def check_err_order(self):
if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features):
for x in ('before', 'after', 'ext_in', 'ext_out'):
if hasattr(self, x):
Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self)
else:
for x in ('before', 'after'):
for y in self.to_list(getattr(self, x, [])):
if not Task.classes.get(y):
Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self)
TaskGen.feature('*')(check_err_order)
# check for @extension used with @feature/@before_method/@after_method
def check_compile(self):
check_invalid_constraints(self)
try:
ret = self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile = Build.BuildContext.compile
Build.BuildContext.compile = check_compile
# check for invalid build groups #914
def use_rec(self, name, **kw):
try:
y = self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx = self.bld.get_group_idx(self)
odx = self.bld.get_group_idx(y)
if odx > idx:
msg = "Invalid 'use' across build groups:"
if Logs.verbose > 1:
msg += '\n target %r\n uses:\n %r' % (self, y)
else:
msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name)
raise Errors.WafError(msg)
self.orig_use_rec(name, **kw)
TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec = use_rec
# check for env.append
def _getattr(self, name, default=None):
if name == 'append' or name == 'add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name == 'prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return super(ConfigSet.ConfigSet, self).__getattr__(name, default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__ = _getattr
def options(opt):
"""
Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options
"""
enhance_lib()
|
2,917 |
test nonascii
|
from io import BytesIO
from translate.convert import po2tmx, test_convert
from translate.misc.xml_helpers import XML_NS
from translate.storage import tmx
class TestPO2TMX:
@staticmethod
def po2tmx(posource, sourcelanguage="en", targetlanguage="af", comment=None):
"""helper that converts po source to tmx source without requiring files"""
inputfile = BytesIO(posource.encode("utf-8"))
outputfile = BytesIO()
outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)
po2tmx.convertpo(
inputfile,
outputfile,
templatefile=None,
sourcelanguage=sourcelanguage,
targetlanguage=targetlanguage,
comment=comment,
)
return outputfile.tmxfile
def test_basic(self):
minipo = r"""# Afrikaans translation of program ABC
#
msgid ""
msgstr ""
"Project-Id-Version: program 2.1-branch\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-01-09 07:15+0100\n"
"PO-Revision-Date: 2004-03-30 17:02+0200\n"
"Last-Translator: Zuza Software Foundation <[email protected]>\n"
"Language-Team: Afrikaans <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
# Please remember to do something
#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4
msgid "Applications"
msgstr "Toepassings"
"""
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert tmx.translate("Applications") == "Toepassings"
assert tmx.translate("bla") is None
xmltext = bytes(tmx).decode("utf-8")
assert xmltext.index('creationtool="Translate Toolkit"')
assert xmltext.index("adminlang")
assert xmltext.index("creationtoolversion")
assert xmltext.index("datatype")
assert xmltext.index("o-tmf")
assert xmltext.index("segtype")
assert xmltext.index("srclang")
def test_sourcelanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, sourcelanguage="xh")
print("The generated xml:")
print(bytes(tmx))
header = tmx.document.find("header")
assert header.get("srclang") == "xh"
def test_targetlanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, targetlanguage="xh")
print("The generated xml:")
print(bytes(tmx))
tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1]
# tag[0] will be the source, we want the target tuv
assert tuv.get("{%s}lang" % XML_NS) == "xh"
def test_multiline(self):
"""Test multiline po entry"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert tmx.translate("First part and extra") == "Eerste deel en ekstra"
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r"""msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
"""
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r"""msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
"""
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert (
tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom"
)
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r"""msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
"""
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert tmx.translate('Hello "Everyone"') == 'Good day "All"'
assert tmx.translate(r"Use \".") == r"Gebruik \"."
def test_exclusions(self):
"""Test that empty and fuzzy messages are excluded"""
minipo = r"""#, fuzzy
msgid "One"
msgstr "Een"
msgid "Two"
msgstr ""
msgid ""
msgstr "Drie"
"""
tmx = self.po2tmx(minipo)
print("The generated xml:")
print(bytes(tmx))
assert b"<tu" not in bytes(tmx)
assert len(tmx.units) == 0
def METHOD_NAME(self):
"""Tests that non-ascii conversion works."""
minipo = """msgid "Bézier curve"
msgstr "Bézier-kurwe"
"""
tmx = self.po2tmx(minipo)
print(bytes(tmx))
assert tmx.translate("Bézier curve") == "Bézier-kurwe"
def test_nonecomments(self):
"""Tests that none comments are imported."""
minipo = """#My comment rules
msgid "Bézier curve"
msgstr "Bézier-kurwe"
"""
tmx = self.po2tmx(minipo)
print(bytes(tmx))
unit = tmx.findunits("Bézier curve")
assert len(unit[0].getnotes()) == 0
def test_otherscomments(self):
"""Tests that others comments are imported."""
minipo = """#My comment rules
msgid "Bézier curve"
msgstr "Bézier-kurwe"
"""
tmx = self.po2tmx(minipo, comment="others")
print(bytes(tmx))
unit = tmx.findunits("Bézier curve")
assert unit[0].getnotes() == "My comment rules"
def test_sourcecomments(self):
"""Tests that source comments are imported."""
minipo = """#: ../PuzzleFourSided.h:45
msgid "Bézier curve"
msgstr "Bézier-kurwe"
"""
tmx = self.po2tmx(minipo, comment="source")
print(bytes(tmx))
unit = tmx.findunits("Bézier curve")
assert unit[0].getnotes() == "../PuzzleFourSided.h:45"
def test_typecomments(self):
"""Tests that others comments are imported."""
minipo = """#, csharp-format
msgid "Bézier curve"
msgstr "Bézier-kurwe"
"""
tmx = self.po2tmx(minipo, comment="type")
print(bytes(tmx))
unit = tmx.findunits("Bézier curve")
assert unit[0].getnotes() == "csharp-format"
class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):
"""Tests running actual po2tmx commands on files"""
convertmodule = po2tmx
expected_options = [
"-l LANG, --language=LANG",
"--source-language=LANG",
"--comments",
]
|
2,918 |
xform inputs
|
#!/usr/bin/env python
#
# weathering.py
#
# - module to compute the weathering of an oil that contains one or more
# "pseudo components".
#
# Built-in Oil Types are in the OilTypes dict.
#
# NOTE:
# Right now we will support the three most common exponential decay methods.
# These are:
# - half life
# - the amount of time required for a quantity to fall to half its value
# - Basically our calculation is M_0 * (half ** (time / t_half))
# - mean lifetime (tau)
# - Average length of time that an element remains in the set.
# - This is probably not as popular as half life, but we should cover it
# just in case.
# - Basically our calculation is M_0 * np.exp(-time / tau)
# half-life = tau * np.log(2)
# tau = half-life / np.log(2)
# - decay constant (lambda)
# - Exponential positive constant value which solves the differential
# rate of change for our decaying quantity.
# - This is probably not as popular as half life, but we should cover it
# just in case.
# - Basically our calculation is M_0 * np.exp(-time * lambda)
# half-life = np.log(2) / lambda
# lambda * half-life = np.log(2)
# lambda = np.log(2) / half-life
from collections import namedtuple
import numpy
np = numpy
WeatheringComponent = namedtuple('WeatheringComponent',
''' fraction,
factor,
''')
class weather_curve:
'''
This is an object designed to compute the weathering of an oil
that contains one or more "pseudo components".
- Each pseudo component is assumed to be a known substance that
has a known rate of decay that can be expressed using an
exponential decay function.
- Each pseudo component has a quantitative value that represents a
fraction of a total mass that adds up to 1.0. Thus, we require that
the sum of the component mass fractions adhere to this constraint.
- It is assumed that all components have exponential decay factors
that are solvable using a common functional method.
- Right now we support the three most common exponential decay methods.
These are:
- half life. This is the amount of time required for a quantity to
fall to half its value
- mean lifetime. This is the average length of time that an element
remains in the set.
- decay constant. Positive constant value which solves the
differential rate of change for our decaying quantity.
'''
def __init__(self, components, method="halflife"):
'''
:param components: The properties of each component.
:type components: Sequence of WeatheringComponents
(WC1, WC2, WC3, ....WCi).
The sum of the component fractional values must
add up to 1.0
:param method: the method in which the decay_factor is to be used.
:type method: set({'halflife', 'mean-lifetime', 'decay-constant'})
'''
fractions, factors = zip(*components)
self.fractions = np.asarray(fractions, dtype=np.float32).reshape(-1,)
self.factors = np.asarray(factors, dtype=np.float32).reshape(-1,)
# only six digit, because float32
if round(self.fractions.sum(), 6) != 1.0:
raise ValueError('The sum of our components {0} must add up '
'to one'.format(self.fractions.sum()))
methods = {'halflife': self._halflife,
'mean-lifetime': self._mean_lifetime,
'decay-constant': self._decay_constant,
}
self.method = methods[method]
def METHOD_NAME(self, M_0, time):
'''
make sure our mass and time arguments are a good fit
for our calculations
'''
M_0 = np.asarray(M_0, dtype=np.float32).reshape(-1, 1)
time = np.asarray(time, dtype=np.float32).reshape(-1, 1)
return M_0, time
def _halflife(self, M_0, time):
'Assumes our factors are half-life values'
half = np.float32(0.5)
self.total_mass = (self.fractions * M_0) * (half ** (time / self.factors))
return self.total_mass.sum(1)
def _mean_lifetime(self, M_0, time):
'Assumes our factors are mean lifetime values (tau)'
self.total_mass = (self.fractions * M_0) * np.exp(-time / self.factors)
return self.total_mass.sum(1)
def _decay_constant(self, M_0, time):
'Assumes our factors are decay constant values'
self.total_mass = (self.fractions * M_0) * np.exp(-time * self.factors)
return self.total_mass.sum(1)
def weather(self, M_0, time):
'Compute the decayed mass at time specified'
M_0, time = self.METHOD_NAME(M_0, time)
return self.method(M_0, time)
## Parameters for combined weathering and bio-degradation for "medium crude"
## used for FL Staits TAP analysis
mass_fractions = [0.25, 0.1, 0.107, 0.2, 0.186, 0.109, 0.048]
combined_half_lives = [21.0, 422.0, 2.0, 1358.0, 1982.0, 7198.0, 14391.0]
OilTypes = {None: None,
# Medium Crude parameters from OSSM
'MediumCrude': weather_curve(((.22, 14.4),
(.26, 48.6),
(.52, 1e9)),
),
"FL_Straits_MediumCrude": weather_curve(zip(mass_fractions,
combined_half_lives)),
}
|
2,919 |
test parser provider hook add list
|
import shutil
import pytest
import os
import subprocess
import sys
from tackle.main import tackle
from tackle.settings import settings
@pytest.fixture()
def temporary_uninstall():
"""Fixture to uninstall a package and install it after the test."""
def f(package):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"uninstall",
"--quiet",
"--disable-pip-version-check",
"-y",
package,
]
)
return f
def test_parser_provider_import_installs_requirements(
change_curdir_fixtures, temporary_uninstall
):
"""Validate that if a package is missing, that it will be installed and usable."""
temporary_uninstall('requests')
try:
import requests
# Fails in CI - I believe requests is available from system python as locally
# this assert works.
# assert False
except ImportError:
assert True
tackle('test-install-dep.yaml')
import requests # noqa
assert requests
def test_parser_provider_import_requirements_installs_requirements(
chdir, temporary_uninstall
):
"""Validate that if a package is missing, that it will be installed and usable."""
chdir(os.path.join('fixtures', 'test-provider-reqs'))
temporary_uninstall('art')
try:
import art
assert False
except ImportError:
assert True
o = tackle()
temporary_uninstall('art')
assert o['art']
def test_parser_hooks_raises_error_on_unknown_hook_type(change_curdir_fixtures):
"""Verify raising error.
Verify that the hook parser raises the right error when the hook type is
not in the providers.
"""
from tackle.exceptions import UnknownHookTypeException
with pytest.raises(UnknownHookTypeException):
tackle('unknown-hook-type.yaml')
def test_parser_provider_hook_add(change_curdir_fixtures):
"""Validate adding providers.
Validate that you can give a `__provider` key to point to
additional providers and make them available as a type.
"""
os.chdir('test-provider')
o = tackle('context_provider.yaml')
assert o['things'] == 'bar'
def METHOD_NAME(change_curdir_fixtures):
"""Validate that hooks are imported when called from the same directory."""
o = tackle('context-provider-list.yaml')
assert o['things'] == 'bar'
assert o['stuff'] == 'bar'
@pytest.mark.parametrize("input_file", [None, "file.yaml"])
def test_providers_local_hooks_dir(chdir_fixture, input_file):
"""
Check that when you call from a child dir that you import the hooks from the dir
with the file being called. Tests both the case that the parent file is a
tackle.yaml file and that when you import the file via a tackle hook, that the
hooks are imported and usable.
"""
chdir_fixture(os.path.join('child', 'dir'))
o = tackle(input_file)
assert o['do'] == 'foo'
@pytest.fixture()
def remove_provider():
"""Fixture to remove a provider."""
def f(provider):
provider = provider.split('/')
provider_path = os.path.join(settings.provider_dir, *provider)
if os.path.exists(provider_path):
shutil.rmtree(provider_path)
return f
def test_providers_released(chdir_fixture, remove_provider):
"""
Check that when we call a released provider, that we only use commits from the
latest release and not anything that was added after the release. Check the external
fixture for details.
"""
remove_provider("robcxyz/tackle-fixture-released")
o = tackle("robcxyz/tackle-fixture-released")
assert 'released_added_later' not in o
assert o['released_hook'] == 'foo'
def test_providers_released_latest(chdir_fixture, remove_provider):
"""
Check that when we call a released provider, that when we include the latest flag
we use the latest commit.
"""
remove_provider("robcxyz/tackle-fixture-released")
o = tackle(
"robcxyz/tackle-fixture-released",
latest=True,
)
assert 'released_added_later' in o
assert o['released_hook'] == 'foo'
# Then test that when we run the provider again that it uses the latest release.
o = tackle("robcxyz/tackle-fixture-released")
assert 'released_added_later' not in o
assert o['released_hook'] == 'foo'
def test_providers_unreleased_import(chdir_fixture, remove_provider):
"""Check that we can use an unreleased provider."""
remove_provider("robcxyz/tackle-fixture-unreleased")
o = tackle("robcxyz/tackle-fixture-unreleased", no_input=True)
assert o['this'] == 'that'
def test_providers_hook_dirs(change_dir):
"""Check that we can import hooks by supplying a directory."""
o = tackle("hook-dirs.yaml", hook_dirs=[str(os.path.join('fixtures', '.hooks'))])
assert o['t'] == 'things'
|
2,920 |
test raw vs encoding none
|
"""
tests.unit.payload_test
~~~~~~~~~~~~~~~~~~~~~~~
"""
import copy
import datetime
import logging
import salt.exceptions
import salt.payload
from salt.defaults import _Constant
from salt.utils import immutabletypes
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
def assert_no_ordered_dict(data):
if isinstance(data, OrderedDict):
raise AssertionError("Found an ordered dictionary")
if isinstance(data, dict):
for value in data.values():
assert_no_ordered_dict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
assert_no_ordered_dict(chunk)
def test_list_nested_odicts():
idata = {"pillar": [OrderedDict(environment="dev")]}
odata = salt.payload.loads(salt.payload.dumps(idata.copy()))
assert_no_ordered_dict(odata)
assert idata == odata
def test_datetime_dump_load():
"""
Check the custom datetime handler can understand itself
"""
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: dtvalue}
sdata = salt.payload.dumps(idata.copy())
odata = salt.payload.loads(sdata)
assert (
sdata
== b"\x81\xc7\x18N20010203T04:05:06.000007\xc7\x18N20010203T04:05:06.000007"
)
assert idata == odata
def test_verylong_dump_load():
"""
Test verylong encoder/decoder
"""
idata = {"jid": 20180227140750302662}
sdata = salt.payload.dumps(idata.copy())
odata = salt.payload.loads(sdata)
idata["jid"] = "{}".format(idata["jid"])
assert idata == odata
def test_immutable_dict_dump_load():
"""
Test immutable dict encoder/decoder
"""
idata = {"dict": {"key": "value"}}
sdata = salt.payload.dumps({"dict": immutabletypes.ImmutableDict(idata["dict"])})
odata = salt.payload.loads(sdata)
assert idata == odata
def test_immutable_list_dump_load():
"""
Test immutable list encoder/decoder
"""
idata = {"list": [1, 2, 3]}
sdata = salt.payload.dumps({"list": immutabletypes.ImmutableList(idata["list"])})
odata = salt.payload.loads(sdata)
assert idata == odata
def test_immutable_set_dump_load():
"""
Test immutable set encoder/decoder
"""
idata = {"set": ["red", "green", "blue"]}
sdata = salt.payload.dumps({"set": immutabletypes.ImmutableSet(idata["set"])})
odata = salt.payload.loads(sdata)
assert idata == odata
def test_odict_dump_load():
"""
Test odict just works. It wasn't until msgpack 0.2.0
"""
data = OrderedDict()
data["a"] = "b"
data["y"] = "z"
data["j"] = "k"
data["w"] = "x"
sdata = salt.payload.dumps({"set": data})
odata = salt.payload.loads(sdata)
assert {"set": dict(data)}, odata
def test_mixed_dump_load():
"""
Test we can handle all exceptions at once
"""
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
od = OrderedDict()
od["a"] = "b"
od["y"] = "z"
od["j"] = "k"
od["w"] = "x"
idata = {
dtvalue: dtvalue, # datetime
"jid": 20180227140750302662, # long int
"dict": immutabletypes.ImmutableDict({"key": "value"}), # immutable dict
"list": immutabletypes.ImmutableList([1, 2, 3]), # immutable list
"set": immutabletypes.ImmutableSet(("red", "green", "blue")), # immutable set
"odict": od, # odict
}
edata = {
dtvalue: dtvalue, # datetime, == input
"jid": "20180227140750302662", # string repr of long int
"dict": {"key": "value"}, # builtin dict
"list": [1, 2, 3], # builtin list
"set": ["red", "green", "blue"], # builtin set
"odict": dict(od), # builtin dict
}
sdata = salt.payload.dumps(idata)
odata = salt.payload.loads(sdata)
assert edata == odata
def test_recursive_dump_load():
"""
Test recursive payloads are (mostly) serialized
"""
data = {"name": "roscivs"}
data["data"] = data # Data all the things!
sdata = salt.payload.dumps(data)
odata = salt.payload.loads(sdata)
assert "recursion" in odata["data"].lower()
def test_recursive_dump_load_with_identical_non_recursive_types():
"""
If identical objects are nested anywhere, they should not be
marked recursive unless they're one of the types we iterate
over.
"""
repeating = "repeating element"
data = {
"a": "a", # Test CPython implementation detail. Short
"b": "a", # strings are interned.
"c": 13, # So are small numbers.
"d": 13,
"fnord": repeating,
# Let's go for broke and make a crazy nested structure
"repeating": [
[[[[{"one": repeating, "two": repeating}], repeating, 13, "a"]]],
repeating,
repeating,
repeating,
],
}
# We need a nested dictionary to trigger the exception
data["repeating"][0][0][0].append(data)
# If we don't deepcopy the data it gets mutated
sdata = salt.payload.dumps(copy.deepcopy(data))
odata = salt.payload.loads(sdata)
# Delete the recursive piece - it's served its purpose, and our
# other test tests that it's actually marked as recursive.
del odata["repeating"][0][0][0][-1], data["repeating"][0][0][0][-1]
assert odata == data
def METHOD_NAME():
"""
Test that we handle the new raw parameter in 5.0.2 correctly based on
encoding. When encoding is None loads should return bytes
"""
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: "strval"}
sdata = salt.payload.dumps(idata.copy())
odata = salt.payload.loads(sdata, encoding=None)
assert isinstance(odata[dtvalue], str)
def test_raw_vs_encoding_utf8():
"""
Test that we handle the new raw parameter in 5.0.2 correctly based on
encoding. When encoding is utf-8 loads should return unicode
"""
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: "strval"}
sdata = salt.payload.dumps(idata.copy())
odata = salt.payload.loads(sdata, encoding="utf-8")
assert isinstance(odata[dtvalue], str)
def test_constants():
"""
That that we handle encoding and decoding of constants.
"""
constant = _Constant("Foo", "bar")
sdata = salt.payload.dumps(constant)
odata = salt.payload.loads(sdata)
assert odata == constant
|
2,921 |
dispatch command
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Readline-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import readline
import six
from tensorflow.python.debug.cli import base_ui
from tensorflow.python.debug.cli import debugger_cli_common
class ReadlineUI(base_ui.BaseUI):
"""Readline-based Command-line UI."""
def __init__(self, on_ui_exit=None, config=None):
base_ui.BaseUI.__init__(self, on_ui_exit=on_ui_exit, config=config)
self._init_input()
def _init_input(self):
readline.parse_and_bind("set editing-mode emacs")
# Disable default readline delimiter in order to receive the full text
# (not just the last word) in the completer.
readline.set_completer_delims("\n")
readline.set_completer(self._readline_complete)
readline.parse_and_bind("tab: complete")
self._input = six.moves.input
def _readline_complete(self, text, state):
context, prefix, except_last_word = self._analyze_tab_complete_input(text)
candidates, _ = self._tab_completion_registry.get_completions(context,
prefix)
candidates = [(except_last_word + candidate) for candidate in candidates]
return candidates[state]
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
print(title)
if init_command is not None:
self.METHOD_NAME(init_command)
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
return exit_token
def _ui_loop(self):
while True:
command = self._get_user_command()
exit_token = self.METHOD_NAME(command)
if exit_token is not None:
return exit_token
def _get_user_command(self):
print("")
return self._input(self.CLI_PROMPT).strip()
def METHOD_NAME(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
print(str(e))
return
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=None)
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
print("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
print("Failed to write output to %s" % output_file_path)
def _display_output(self, screen_output):
for line in screen_output.lines:
print(line)
|
2,922 |
delete objects
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the object_storage_service benchmark worker process."""
import time
import unittest
import object_storage_api_tests # noqa: importing for flags
import object_storage_interface
import validate_service
class MockObjectStorageService(object_storage_interface.ObjectStorageServiceBase): # noqa
def __init__(self):
self.bucket = None
self.objects = {}
def _CheckBucket(self, bucket):
"""Make sure that we are only passed one bucket name.
Args:
bucket: the name of a bucket.
Raises: ValueError, if this object has been passed a different
bucket name previously.
"""
if self.bucket is None:
self.bucket = bucket
elif self.bucket != bucket:
raise ValueError(
'MockObjectStorageService passed two bucket names: %s and %s' %
(self.bucket, bucket))
def ListObjects(self, bucket, prefix):
self._CheckBucket(bucket)
return [value
for name, value in self.objects.iteritems()
if name.startswith(prefix)]
def METHOD_NAME(self, bucket, objects_to_delete, objects_deleted=None):
self._CheckBucket(bucket)
for name in objects_to_delete:
if name in self.objects:
del self.objects[name]
if objects_deleted is not None:
objects_deleted.append(name)
def WriteObjectFromBuffer(self, bucket, object, stream, size):
self._CheckBucket(bucket)
stream.seek(0)
self.objects[object] = stream.read(size)
return time.time(), 0.001
def ReadObject(self, bucket, object):
self._CheckBucket(bucket)
self.objects[object]
return time.time(), 0.001
class TestScenarios(unittest.TestCase):
"""Test that the benchmark scenarios complete.
Specifically, given a correctly operating service
(MockObjectStorageService), verify that the benchmarking scenarios
run to completion without raising an exception.
"""
def setUp(self):
self.FLAGS = object_storage_api_tests.FLAGS
self.FLAGS([])
self.objects_written_file = self.FLAGS.objects_written_file
self.FLAGS.objects_written_file = '/tmp/objects-written'
def tearDown(self):
self.FLAGS.objects_written_file = self.objects_written_file
def testOneByteRW(self):
object_storage_api_tests.OneByteRWBenchmark(MockObjectStorageService())
def testListConsistency(self):
object_storage_api_tests.ListConsistencyBenchmark(
MockObjectStorageService())
def testSingleStreamThroughput(self):
object_storage_api_tests.SingleStreamThroughputBenchmark(
MockObjectStorageService())
def testCleanupBucket(self):
object_storage_api_tests.CleanupBucket(MockObjectStorageService())
def testMultiStreamWriteAndRead(self):
service = MockObjectStorageService()
# Have to sequence MultiStreamWrites and MultiStreamReads because
# MultiStreamReads will read from the objects_written_file that
# MultiStreamWrites generates.
object_storage_api_tests.MultiStreamWrites(service)
object_storage_api_tests.MultiStreamReads(service)
class TestValidateService(unittest.TestCase):
"""Validate the ValidateService script."""
def setUp(self):
self.FLAGS = object_storage_api_tests.FLAGS
self.FLAGS([])
self.objects_written_file = self.FLAGS.objects_written_file
self.FLAGS.objects_written_file = '/tmp/objects-written'
def testValidateService(self):
validate_service.ValidateService(MockObjectStorageService())
if __name__ == '__main__':
unittest.main()
|
2,923 |
setup 30 days of stats for chart
|
import functools
import logging
import faker
import pytest
from dateutil.relativedelta import relativedelta
from django import urls
from django.utils import timezone
from waffle.testutils import override_flag
from ....accounts import models as ac_models
from ... import choices as gc_choices
from ... import factories as gc_factories
from ... import models as gc_models
from .. import range_of_dates, view_in_browser
logger = logging.getLogger(__name__)
console = logging.StreamHandler()
# logger.setLevel(logging.DEBUG)
# logger.addHandler(console)
fake = faker.Faker()
class TestDailyStatsView:
"""
Check that we render the right templates
"""
def _setup_top_green_domains(self):
"""
Set up a set of daily stats listing the top domains
for a given date
"""
yesterday = timezone.now() - relativedelta(days=1)
thirty_days_back = timezone.now() - relativedelta(days=30)
last_30_days = range_of_dates(thirty_days_back, yesterday)
for x in range(10):
realist_random_no = int(fake.numerify()) * int(fake.numerify())
domain = fake.domain_name()
for given_datetime in last_30_days:
daily_total = gc_choices.DailyStatChoices.DAILY_TOTAL
domain_key = f"{daily_total}:domain:{domain}"
# set up domain to check against, with
# corresponding hosting provider
gc_factories.GreenDomainFactory(url=domain)
gc_models.DailyStat.objects.create(
stat_key=domain_key,
count=realist_random_no,
green=gc_choices.GreenStatChoice.YES,
stat_date=given_datetime.date(),
)
def _get_top_green_hosters(self):
"""
Set up a set of stats for the top providers for a
given date
"""
yesterday = timezone.now() - relativedelta(days=1)
thirty_days_back = timezone.now() - relativedelta(days=30)
last_30_days = range_of_dates(thirty_days_back, yesterday)
for x in range(10):
realist_random_no = int(fake.numerify()) * int(fake.numerify())
provider_name = fake.company()
for given_datetime in last_30_days:
daily_total = gc_choices.DailyStatChoices.DAILY_TOTAL
provider = gc_factories.HostingProviderFactory(name=provider_name)
domain_key = f"{daily_total}:provider:{provider.id}"
gc_models.DailyStat.objects.create(
stat_key=domain_key,
count=realist_random_no,
green=gc_choices.GreenStatChoice.YES,
stat_date=given_datetime.date(),
)
def METHOD_NAME(
self, hosting_provider_with_sample_user, green_ip
):
# for each date, add one green and two grey checks for every day
now = timezone.now()
thirty_days_back = now - relativedelta(days=30)
yesterday = now - relativedelta(days=1)
last_30_days = range_of_dates(thirty_days_back, yesterday)
for given_datetime in last_30_days:
gc_factories.GreencheckFactory.create(
date=given_datetime + relativedelta(hours=2),
hostingprovider=hosting_provider_with_sample_user.id,
greencheck_ip=green_ip.id,
ip=green_ip.ip_end,
green=gc_choices.BoolChoice.YES,
)
gc_factories.GreencheckFactory.create_batch(
size=2, date=given_datetime + relativedelta(hours=2),
)
gc_models.DailyStat.create_counts_for_date_range(last_30_days, "total_count")
@override_flag("greencheck-stats", active=True)
def test_stat_view_templates(self, db, client):
"""
Test that we render successfully and the write template
"""
stat_path = urls.reverse("greencheck-stats-index")
res = client.get(stat_path)
assert res.status_code == 200
assert "greencheck/stats_index.html" in [tmpl.name for tmpl in res.templates]
@override_flag("greencheck-stats", active=True)
def test_stat_view_headlines(
self,
db,
client,
hosting_provider_with_sample_user: ac_models.Hostingprovider,
green_ip: gc_models.GreencheckIp,
):
"""
Check that we have the aggregated headline figures available to us
"""
self.METHOD_NAME(
hosting_provider_with_sample_user, green_ip
)
stat_path = urls.reverse("greencheck-stats-index")
res = client.get(stat_path)
assert res.status_code == 200
# do we have plausible numbers now?
headlines = res.context_data["stats"]["headlines"]
stats = [
stat.count
for stat in gc_models.DailyStat.objects.filter(
stat_key="total_daily_checks", green="yes"
)
]
total_green = functools.reduce(lambda x, y: x + y, stats)
assert headlines["green"] == total_green
@override_flag("greencheck-stats", active=True)
def test_stat_view_chart(
self,
db,
hosting_provider_with_sample_user: ac_models.Hostingprovider,
green_ip: gc_models.GreencheckIp,
client,
):
"""
Test that we fetch the last 30 days of stats, and make it avaiable for graphing
"""
self.METHOD_NAME(
hosting_provider_with_sample_user, green_ip
)
stat_path = urls.reverse("greencheck-stats-index")
res = client.get(stat_path)
assert res.status_code == 200
stats = res.context_data["stats"]
assert "headlines" in stats.keys()
assert "chart_data" in stats.keys()
chart_data = stats["chart_data"]
# we want to see the green, grey, for our charts
assert len(chart_data["green"]) == 30
assert len(chart_data["grey"]) == 30
@pytest.mark.flaky
@override_flag("greencheck-stats", active=True)
def test_stat_view_top_domains(
self, db, client,
):
"""
Test that we have can display a list of the top domains
"""
self._setup_top_green_domains()
stat_path = urls.reverse("greencheck-stats-index")
res = client.get(stat_path)
stats = res.context_data["stats"]
assert "top_green_domains" in stats.keys()
top_green_domains = stats["top_green_domains"]
assert len(top_green_domains) == 10
@override_flag("greencheck-stats", active=True)
def test_stat_view_top_providers(self, db, client):
"""
Test that we can display a list of the top domains
"""
self._get_top_green_hosters()
stat_path = urls.reverse("greencheck-stats-index")
res = client.get(stat_path)
stats = res.context_data["stats"]
assert "top_green_hosters" in stats.keys()
top_green_hosters = stats["top_green_hosters"]
assert len(top_green_hosters) == 10
|
2,924 |
test charsxp encoding
|
import copy
import gc
import pytest
import rpy2.rinterface as rinterface
rinterface.initr()
def test_invalid_init():
with pytest.raises(ValueError):
rinterface.Sexp('a')
def test_init_from_existing():
sexp = rinterface.baseenv.find('letters')
sexp_new = rinterface.Sexp(sexp)
assert sexp_new._sexpobject is sexp._sexpobject
def test_typeof():
assert isinstance(rinterface.baseenv.typeof, int)
def test_get():
sexp = rinterface.baseenv.find('letters')
assert sexp.typeof == rinterface.RTYPES.STRSXP
sexp = rinterface.baseenv.find('pi')
assert sexp.typeof == rinterface.RTYPES.REALSXP
sexp = rinterface.baseenv.find('options')
assert sexp.typeof == rinterface.RTYPES.CLOSXP
@pytest.mark.parametrize('cls',
(rinterface.IntSexpVector, rinterface.ListSexpVector))
def test_list_attrs(cls):
x = cls((1, 2, 3))
assert len(x.list_attrs()) == 0
x.do_slot_assign('a', rinterface.IntSexpVector((33, )))
assert len(x.list_attrs()) == 1
assert 'a' in x.list_attrs()
def test_do_slot():
sexp = rinterface.baseenv.find('.Platform')
names = sexp.do_slot('names')
assert len(names) > 1
assert 'OS.type' in names
def test_names():
sexp = rinterface.baseenv.find('.Platform')
names = sexp.names
assert len(names) > 1
assert 'OS.type' in names
def test_names_set():
sexp = rinterface.IntSexpVector([1, 2, 3])
assert sexp.names.rid == rinterface.NULL.rid
sexp.names = rinterface.StrSexpVector(['a', 'b', 'c'])
assert len(sexp.names) > 1
assert tuple(sexp.names) == ('a', 'b', 'c')
def test_names_set_invalid():
sexp = rinterface.IntSexpVector([1, 2, 3])
assert sexp.names.rid == rinterface.NULL.rid
with pytest.raises(ValueError):
sexp.names = ('a', 'b', 'c')
def test_do_slot_missing():
sexp = rinterface.baseenv.find('pi')
with pytest.raises(LookupError):
sexp.do_slot('foo')
def test_do_slot_not_string():
sexp = rinterface.baseenv.find('pi')
with pytest.raises(ValueError):
sexp.do_slot(None)
def test_do_slot_empty_string():
sexp = rinterface.baseenv.find('pi')
with pytest.raises(ValueError):
sexp.do_slot('')
def test_do_slot_assign_create():
sexp = rinterface.IntSexpVector([])
slot_value = rinterface.IntSexpVector([3, ])
sexp.do_slot_assign('foo', slot_value)
slot_value_back = sexp.do_slot('foo')
assert len(slot_value_back) == len(slot_value)
assert all(x == y for x, y in zip(slot_value, slot_value_back))
def test_do_slot_reassign():
sexp = rinterface.IntSexpVector([])
slot_value_a = rinterface.IntSexpVector([3, ])
sexp.do_slot_assign('foo', slot_value_a)
slot_value_b = rinterface.IntSexpVector([5, 6])
sexp.do_slot_assign('foo', slot_value_b)
slot_value_back = sexp.do_slot('foo')
assert len(slot_value_b) == len(slot_value_back)
assert all(x == y for x, y in zip(slot_value_b, slot_value_back))
def test_do_slot_assign_empty_string():
sexp = rinterface.IntSexpVector([])
slot_value = rinterface.IntSexpVector([3, ])
with pytest.raises(ValueError):
sexp.do_slot_assign('', slot_value)
def test_sexp_rsame_true():
sexp_a = rinterface.baseenv.find("letters")
sexp_b = rinterface.baseenv.find("letters")
assert sexp_a.rsame(sexp_b)
def test_sexp_rsame_false():
sexp_a = rinterface.baseenv.find("letters")
sexp_b = rinterface.baseenv.find("pi")
assert not sexp_a.rsame(sexp_b)
def test_sexp_rsame_invalid():
sexp_a = rinterface.baseenv.find("letters")
with pytest.raises(ValueError):
sexp_a.rsame('foo')
def test___sexp__():
sexp = rinterface.IntSexpVector([1, 2, 3])
sexp_count = sexp.__sexp_refcount__
sexp_cobj = sexp.__sexp__
d = dict(rinterface._rinterface.protected_rids())
assert sexp_count == d[sexp.rid]
assert sexp_count == sexp.__sexp_refcount__
sexp2 = rinterface.IntSexpVector([4, 5, 6, 7])
sexp2_rid = sexp2.rid
sexp2.__sexp__ = sexp_cobj
del(sexp)
gc.collect()
d = dict(rinterface._rinterface.protected_rids())
assert d.get(sexp2_rid) is None
def test_rclass_get():
sexp = rinterface.baseenv.find('character')(1)
assert len(sexp.rclass) == 1
assert sexp.rclass[0] == 'character'
sexp = rinterface.baseenv.find('matrix')(0)
if rinterface.evalr('R.version$major')[0] >= '4':
assert tuple(sexp.rclass) == ('matrix', 'array')
else:
assert tuple(sexp.rclass) == ('matrix', )
sexp = rinterface.baseenv.find('array')(0)
assert len(sexp.rclass) == 1
assert sexp.rclass[0] == 'array'
sexp = rinterface.baseenv.find('new.env')()
assert len(sexp.rclass) == 1
assert sexp.rclass[0] == 'environment'
def test_rclass_get_sym():
# issue #749
fit = rinterface.evalr("""
stats::lm(y ~ x, data=base::data.frame(y=1:10, x=2:11))
""")
assert tuple(fit[9].rclass) == ('call', )
def test_rclass_set():
sexp = rinterface.IntSexpVector([1, 2, 3])
sexp.rclass = rinterface.StrSexpVector(['foo'])
assert len(sexp.rclass) == 1
assert sexp.rclass[0] == 'foo'
sexp.rclass = 'bar'
assert len(sexp.rclass) == 1
assert sexp.rclass[0] == 'bar'
def test_rclass_set_invalid():
sexp = rinterface.IntSexpVector([1, 2, 3])
with pytest.raises(TypeError):
sexp.rclass = rinterface.StrSexpVector(123)
def test__sexp__wrongtypeof():
sexp = rinterface.IntSexpVector([1, 2, 3])
cobj = sexp.__sexp__
sexp = rinterface.StrSexpVector(['a', 'b'])
assert len(sexp) == 2
with pytest.raises(ValueError):
sexp.__sexp__ = cobj
def test__sexp__set():
x = rinterface.IntSexpVector([1, 2, 3])
x_s = x.__sexp__
x_rid = x.rid
# The Python reference count of the capsule is incremented,
# not the rpy2 reference count
assert x.__sexp_refcount__ == 1
y = rinterface.IntSexpVector([4, 5, 6])
y_count = y.__sexp_refcount__
y_rid = y.rid
assert y_count == 1
assert x_rid in [elt[0] for elt in rinterface._rinterface.protected_rids()]
x.__sexp__ = y.__sexp__
# x_s is still holding a refcount to the capsule
assert x_rid in [elt[0] for elt in rinterface._rinterface.protected_rids()]
# when gone, the capsule will be collected and the id no longer preserved
del(x_s)
assert x_rid not in [elt[0] for elt in
rinterface._rinterface.protected_rids()]
assert x.rid == y.rid
assert y_rid == y.rid
@pytest.mark.xfail(reason='WIP')
def test_deepcopy():
sexp = rinterface.IntSexpVector([1, 2, 3])
assert sexp.named == 0
rinterface.baseenv.find("identity")(sexp)
assert sexp.named >= 2
sexp2 = sexp.__deepcopy__()
assert sexp.typeof == sexp2.typeof
assert list(sexp) == list(sexp2)
assert not sexp.rsame(sexp2)
assert sexp2.named == 0
# should be the same as above, but just in case:
sexp3 = copy.deepcopy(sexp)
assert sexp.typeof == sexp3.typeof
assert list(sexp) == list(sexp3)
assert not sexp.rsame(sexp3)
assert sexp3.named == 0
def test_rid():
globalenv_id = rinterface.baseenv.find('.GlobalEnv').rid
assert globalenv_id == rinterface.globalenv.rid
def test_NULL_nonzero():
assert not rinterface.NULL
def METHOD_NAME():
encoding = rinterface.NA_Character.encoding
assert encoding == rinterface.sexp.CETYPE.CE_NATIVE
def test_charsxp_nchar():
v = rinterface.StrSexpVector(['abc', 'de', ''])
cs = v.get_charsxp(0)
assert cs.nchar() == 3
cs = v.get_charsxp(1)
assert cs.nchar() == 2
cs = v.get_charsxp(2)
assert cs.nchar() == 0
def test_missingtype():
assert not rinterface.MissingArg
|
2,925 |
assert token fail
|
import pytest
from xdsl.utils.exceptions import ParseError
from xdsl.utils.lexer import Input, Lexer, Token
def get_token(input: str) -> Token:
file = Input(input, "<unknown>")
lexer = Lexer(file)
token = lexer.lex()
return token
def assert_single_token(
input: str, expected_kind: Token.Kind, expected_text: str | None = None
):
if expected_text is None:
expected_text = input
token = get_token(input)
assert token.kind == expected_kind
assert token.text == expected_text
def METHOD_NAME(input: str):
file = Input(input, "<unknown>")
lexer = Lexer(file)
with pytest.raises(ParseError):
lexer.lex()
@pytest.mark.parametrize(
"text,kind",
[
("->", Token.Kind.ARROW),
(":", Token.Kind.COLON),
(",", Token.Kind.COMMA),
("...", Token.Kind.ELLIPSIS),
("=", Token.Kind.EQUAL),
(">", Token.Kind.GREATER),
("{", Token.Kind.L_BRACE),
("(", Token.Kind.L_PAREN),
("[", Token.Kind.L_SQUARE),
("<", Token.Kind.LESS),
("-", Token.Kind.MINUS),
("+", Token.Kind.PLUS),
("?", Token.Kind.QUESTION),
("}", Token.Kind.R_BRACE),
(")", Token.Kind.R_PAREN),
("]", Token.Kind.R_SQUARE),
("*", Token.Kind.STAR),
("|", Token.Kind.VERTICAL_BAR),
("{-#", Token.Kind.FILE_METADATA_BEGIN),
("#-}", Token.Kind.FILE_METADATA_END),
],
)
def test_punctuation(text: str, kind: Token.Kind):
assert_single_token(text, kind)
@pytest.mark.parametrize("text", [".", "&", "/"])
def test_punctuation_fail(text: str):
METHOD_NAME(text)
@pytest.mark.parametrize(
"text", ['""', '"@"', '"foo"', '"\\""', '"\\n"', '"\\\\"', '"\\t"']
)
def test_str_literal(text: str):
assert_single_token(text, Token.Kind.STRING_LIT)
@pytest.mark.parametrize("text", ['"', '"\\"', '"\\a"', '"\n"', '"\v"', '"\f"'])
def test_str_literal_fail(text: str):
METHOD_NAME(text)
@pytest.mark.parametrize(
"text", ["a", "A", "_", "a_", "a1", "a1_", "a1_2", "a1_2_3", "a$_.", "a$_.1"]
)
def test_bare_ident(text: str):
"""bare-id ::= (letter|[_]) (letter|digit|[_$.])*"""
assert_single_token(text, Token.Kind.BARE_IDENT)
@pytest.mark.parametrize(
"text",
[
"@a",
"@A",
"@_",
"@a_",
"@a1",
"@a1_",
"@a1_2",
"@a1_2_3",
"@a$_.",
"@a$_.1",
'@""',
'@"@"',
'@"foo"',
'@"\\""',
'@"\\n"',
'@"\\\\"',
'@"\\t"',
],
)
def test_at_ident(text: str):
"""at-ident ::= `@` (bare-id | string-literal)"""
assert_single_token(text, Token.Kind.AT_IDENT)
@pytest.mark.parametrize(
"text",
["@", '@"', '@"\\"', '@"\\a"', '@"\n"', '@"\v"', '@"\f"', '@ "a"', "@ f", "@$"],
)
def test_at_ident_fail(text: str):
"""at-ident ::= `@` (bare-id | string-literal)"""
METHOD_NAME(text)
@pytest.mark.parametrize(
"text", ["0", "1234", "a", "S", "$", "_", ".", "-", "e_.$-324", "e5$-e_", "foo"]
)
def test_prefixed_ident(text: str):
"""hash-ident ::= `#` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)"""
"""percent-ident ::= `%` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)"""
"""caret-ident ::= `^` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)"""
"""exclamation-ident ::= `!` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)"""
assert_single_token("#" + text, Token.Kind.HASH_IDENT)
assert_single_token("%" + text, Token.Kind.PERCENT_IDENT)
assert_single_token("^" + text, Token.Kind.CARET_IDENT)
assert_single_token("!" + text, Token.Kind.EXCLAMATION_IDENT)
@pytest.mark.parametrize("text", ["+", '""', "#", "%", "^", "!", "\n", ""])
def test_prefixed_ident_fail(text: str):
"""
hash-ident ::= `#` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)
percent-ident ::= `%` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)
caret-ident ::= `^` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)
exclamation-ident ::= `!` (digit+ | (letter|[$._-]) (letter|[$._-]|digit)*)
"""
METHOD_NAME("#" + text)
METHOD_NAME("%" + text)
METHOD_NAME("^" + text)
METHOD_NAME("!" + text)
@pytest.mark.parametrize(
"text,expected",
[("0x0", "0"), ("0e", "0"), ("0$", "0"), ("0_", "0"), ("0-", "0"), ("0.", "0")],
)
def test_prefixed_ident_split(text: str, expected: str):
"""Check that the prefixed identifier is split at the right character."""
assert_single_token("#" + text, Token.Kind.HASH_IDENT, "#" + expected)
assert_single_token("%" + text, Token.Kind.PERCENT_IDENT, "%" + expected)
assert_single_token("^" + text, Token.Kind.CARET_IDENT, "^" + expected)
assert_single_token("!" + text, Token.Kind.EXCLAMATION_IDENT, "!" + expected)
@pytest.mark.parametrize("text", ["0", "01", "123456789", "99", "0x1234", "0xabcdef"])
def test_integer_literal(text: str):
assert_single_token(text, Token.Kind.INTEGER_LIT)
@pytest.mark.parametrize(
"text,expected", [("0a", "0"), ("0xg", "0"), ("0xfg", "0xf"), ("0xf.", "0xf")]
)
def test_integer_literal_split(text: str, expected: str):
assert_single_token(text, Token.Kind.INTEGER_LIT, expected)
@pytest.mark.parametrize(
"text", ["0.", "1.", "0.2", "38.1243", "92.54e43", "92.5E43", "43.3e-54", "32.E+25"]
)
def test_float_literal(text: str):
assert_single_token(text, Token.Kind.FLOAT_LIT)
@pytest.mark.parametrize(
"text,expected", [("3.9e", "3.9"), ("4.5e+", "4.5"), ("5.8e-", "5.8")]
)
def test_float_literal_split(text: str, expected: str):
assert_single_token(text, Token.Kind.FLOAT_LIT, expected)
@pytest.mark.parametrize("text", ["0", " 0", " 0", "\n0", "\t0", "// Comment\n0"])
def test_whitespace_skip(text: str):
assert_single_token(text, Token.Kind.INTEGER_LIT, "0")
@pytest.mark.parametrize("text", ["", " ", "\n\n", "// Comment\n"])
def test_eof(text: str):
assert_single_token(text, Token.Kind.EOF, "")
@pytest.mark.parametrize(
"text, expected",
[
("0", 0),
("010", 10),
("123456789", 123456789),
("0x1234", 4660),
("0xabcdef23", 2882400035),
],
)
def test_token_get_int_value(text: str, expected: int):
token = get_token(text)
assert token.kind == Token.Kind.INTEGER_LIT
assert token.get_int_value() == expected
@pytest.mark.parametrize(
"text, expected",
[
("0.", 0.0),
("1.", 1.0),
("0.2", 0.2),
("38.1243", 38.1243),
("92.54e43", 92.54e43),
("92.5E43", 92.5e43),
("43.3e-54", 43.3e-54),
("32.E+25", 32.0e25),
],
)
def test_token_get_float_value(text: str, expected: float):
token = get_token(text)
assert token.kind == Token.Kind.FLOAT_LIT
assert token.get_float_value() == expected
@pytest.mark.parametrize(
"text,expected",
[
('""', ""),
('"@"', "@"),
('"foo"', "foo"),
('"\\""', '"'),
('"\\n"', "\n"),
('"\\\\"', "\\"),
('"\\t"', "\t"),
],
)
def test_token_get_string_literal_value(text: str, expected: float):
token = get_token(text)
assert token.kind == Token.Kind.STRING_LIT
assert token.get_string_literal_value() == expected
|
2,926 |
upgrade registrar
|
"""convert allowlists to IMA policies
Revision ID: 8c0f8ded1f90
Revises: 039322ea079b
Create Date: 2022-10-27 18:18:31.674283
"""
import copy
import datetime
import json
import sqlalchemy as sa
from alembic import op
from keylime import keylime_logging
from keylime.ima import ima
logger = keylime_logging.init_logging("db_migrations")
# revision identifiers, used by Alembic.
revision = "8c0f8ded1f90"
down_revision = "039322ea079b"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()[f"upgrade_{engine_name}"]()
def downgrade(engine_name):
globals()[f"downgrade_{engine_name}"]()
def METHOD_NAME():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
# get existing table metadata
conn = op.get_bind()
meta = sa.MetaData()
meta.reflect(bind=conn, only=("allowlists",))
allowlists = meta.tables["allowlists"]
results = conn.execute(sa.text("SELECT id, ima_policy FROM allowlists")).fetchall()
# Update allowlist entries with converted IMA policies
for old_ima_policy_id, old_ima_policy in results:
try:
old_ima_policy = json.loads(old_ima_policy)
except Exception as e:
message = "Error loading JSON-formatted Keylime policy: %s", repr(e)
logger.error(message)
raise e
alist_json = old_ima_policy["allowlist"]
new_ima_policy = copy.deepcopy(ima.EMPTY_RUNTIME_POLICY)
new_ima_policy["meta"]["timestamp"] = str(datetime.datetime.now())
new_ima_policy["meta"]["generator"] = ima.RUNTIME_POLICY_GENERATOR.LegacyAllowList
for key in new_ima_policy.keys():
if key == "digests":
digests = alist_json.get("hashes")
if not digests:
message = "Allowlist %s does not have a valid hash list!", old_ima_policy_id
logger.error(message)
raise Exception(message)
new_ima_policy[key] = alist_json["hashes"]
elif key == "excludes":
new_ima_policy["excludes"] = old_ima_policy["exclude"]
elif key == "meta":
# Skip old metadata
continue
else:
to_migrate = alist_json.get(key, None)
if to_migrate is None:
logger.info("Runtime policy field '%s' not found in existing allowlist; using default value", key)
else:
new_ima_policy[key] = alist_json[key]
new_ima_policy = json.dumps(new_ima_policy)
conn.execute(allowlists.update().where(allowlists.c.id == old_ima_policy_id).values(ima_policy=new_ima_policy))
def downgrade_cloud_verifier():
# get existing table metadata
conn = op.get_bind()
meta = sa.MetaData()
meta.reflect(bind=conn, only=("allowlists",))
allowlists = meta.tables["allowlists"]
results = conn.execute(sa.text("SELECT id, ima_policy FROM allowlists")).fetchall()
# Update allowlist entries with converted IMA policies
for ima_policy_id, ima_policy in results:
try:
ima_policy = json.loads(ima_policy)
except Exception as e:
message = "Error loading JSON-formatted Keylime policy: %s", repr(e)
logger.error(message)
raise e
allowlist = {
"meta": {
"version": 5,
"generator": ima.RUNTIME_POLICY_GENERATOR.CompatibleAllowList,
"checksum": "",
},
"release": 0,
"hashes": {},
"keyrings": {},
"ima": {"ignored_keyrings": [], "log_hash_alg": "sha1"},
}
allowlist["meta"]["timestamp"] = str(datetime.datetime.now())
for key in allowlist.keys(): # pylint: disable=consider-iterating-dictionary
if key == "hashes":
digests = ima_policy.get("digests")
if not digests:
message = "Runtime policy %s does not have a valid hash list!", ima_policy_id
logger.error(message)
raise Exception(message)
allowlist[key] = ima_policy["digests"]
elif key == "meta":
# Skip old metadata
continue
else:
to_migrate = ima_policy.get(key, None)
if to_migrate is None:
logger.info("Allowlist field '%s' not found in existing IMA policy; using default value", key)
else:
allowlist[key] = ima_policy[key]
downgraded_policy = {}
downgraded_policy["allowlist"] = allowlist
downgraded_policy["exclude"] = ima_policy["excludes"]
downgraded_policy = json.dumps(downgraded_policy)
conn.execute(allowlists.update().where(allowlists.c.id == ima_policy_id).values(ima_policy=downgraded_policy))
|
2,927 |
send request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import MultiapiServiceClientConfiguration
from .operations import MultiapiServiceClientOperationsMixin, OperationGroupOneOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MultiapiServiceClient(MultiapiServiceClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword
"""Service client for multiapi client testing.
:ivar operation_group_one: OperationGroupOneOperations operations
:vartype operation_group_one: multiapisecurity.v1.aio.operations.OperationGroupOneOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param base_url: Service URL. Default value is "http://localhost:3000".
:type base_url: str
:keyword api_version: Api Version. Default value is "1.0.0". Note that overriding this default
value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self, credential: "AsyncTokenCredential", base_url: str = "http://localhost:3000", **kwargs: Any
) -> None:
self._config = MultiapiServiceClientConfiguration(credential=credential, **kwargs)
self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operation_group_one = OperationGroupOneOperations(
self._client, self._config, self._serialize, self._deserialize, "1.0.0"
)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MultiapiServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
2,928 |
get run with polling
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, consider-using-f-string
import time
from msrest import Deserializer
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.profiles import ResourceType
from azure.cli.command_modules.acr._constants import get_acr_task_models
from azure.core.polling import PollingMethod, LROPoller
def METHOD_NAME(cmd,
client,
run_id,
registry_name,
resource_group_name):
deserializer = Deserializer(
{k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)})
def deserialize_run(response):
return deserializer('Run', response)
return LROPoller(
client=client,
initial_response=client.get(
resource_group_name, registry_name, run_id, cls=lambda x, y, z: x),
deserialization_callback=deserialize_run,
polling_method=RunPolling(
cmd=cmd,
registry_name=registry_name,
run_id=run_id
))
class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes
def __init__(self, cmd, registry_name, run_id, timeout=30):
self._cmd = cmd
self._registry_name = registry_name
self._run_id = run_id
self._timeout = timeout
self._client = None
self._response = None # Will hold latest received response
self._url = None # The URL used to get the run
self._deserialize = None # The deserializer for Run
self.operation_status = ""
self.operation_result = None
def initialize(self, client, initial_response, deserialization_callback):
self._client = client._client # pylint: disable=protected-access
self._response = initial_response
self._url = initial_response.http_request.url
self._deserialize = deserialization_callback
self._set_operation_status(initial_response)
def run(self):
while not self.finished():
time.sleep(self._timeout)
self._update_status()
if self.operation_status not in get_succeeded_run_status(self._cmd):
from knack.util import CLIError
raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. "
"Show run details by 'az acr task show-run -r {} --run-id {}'. "
"Show run logs by 'az acr task logs -r {} --run-id {}'.".format(
self._run_id,
self.operation_status,
self._registry_name,
self._run_id,
self._registry_name,
self._run_id
))
def status(self):
return self.operation_status
def finished(self):
return self.operation_status in get_finished_run_status(self._cmd)
def resource(self):
return self.operation_result
def _set_operation_status(self, response):
if response.http_response.status_code == 200:
self.operation_result = self._deserialize(response)
self.operation_status = self.operation_result.status
return
raise CloudError(response)
def _update_status(self):
self._response = self._client._pipeline.run( # pylint: disable=protected-access
self._client.get(self._url), stream=False)
self._set_operation_status(self._response)
def get_succeeded_run_status(cmd):
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
return [RunStatus.succeeded.value]
def get_finished_run_status(cmd):
RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs')
return [RunStatus.succeeded.value,
RunStatus.failed.value,
RunStatus.canceled.value,
RunStatus.error.value,
RunStatus.timeout.value]
|
2,929 |
test sct analyze lesion matches expected dummy
|
# pytest unit tests for sct_analyze_lesion
import pytest
import logging
import math
import pickle
import numpy as np
from spinalcordtoolbox.utils import sct_test_path, extract_fname
from spinalcordtoolbox.scripts import sct_analyze_lesion, sct_label_utils
logger = logging.getLogger(__name__)
@pytest.fixture()
def dummy_lesion(request, tmp_path):
"""Define a fake voxel lesion using the specified dimensions."""
starting_coord, dim = request.param
# Format the coordinates into a str argument that `-create` can accept
coordinates = []
for x in range(starting_coord[0], starting_coord[0] + dim[0]):
for y in range(starting_coord[1], starting_coord[1] + dim[1]):
for z in range(starting_coord[2], starting_coord[2] + dim[2]):
coord = [str(x), str(y), str(z), "1"]
coordinates.append(",".join(coord))
create_arg = ":".join(coordinates)
# Create the lesion mask file and output to a temporary directory
path_ref = sct_test_path("t2", "t2.nii.gz")
path_out = str(tmp_path/"lesion.nii.gz")
sct_label_utils.main(argv=['-i', path_ref, '-o', path_out,
'-create', create_arg])
# Compute the expected (voxel) measurements from the provided dimensions
# NB: Actual measurements will differ slightly due to spine curvature
measurements = {
# NB: 'sct_analyze_lesion' treats lesions as cylinders. So:
# - Vertical axis: Length of the cylinder
'length [mm]': dim[1],
# - Horizontal plane: Cross-sectional slices of the cylinder.
# Specifically, 'max_equivalent_diameter' takes the
# cross-sectional area of the lesion (which is computed
# using square voxels), then finds the diameter of an
# equivalent *circle* with that same area:
# a = pi*r^2
# -> a = pi*(d/2)^2
# -> d = 2*sqrt(a/pi)
'max_equivalent_diameter [mm]': 2 * np.sqrt(dim[0] * dim[2] / np.pi),
'volume [mm3]': dim[0] * dim[1] * dim[2],
}
return path_out, measurements
@pytest.mark.sct_testing
@pytest.mark.parametrize("dummy_lesion, rtol", [
# Straight region of `t2.nii.gz` -> little curvature -> smaller tolerance
([(29, 45, 25), (3, 10, 2)], 0.001),
([(29, 27, 25), (1, 4, 1)], 0.001), # NB: Example from #3633
# Curved region of `t2.nii.gz` -> lots of curvature -> larger tolerance
([(29, 0, 25), (4, 15, 3)], 0.01)
], indirect=["dummy_lesion"])
def METHOD_NAME(dummy_lesion, rtol, tmp_path):
"""Run the CLI script and verify that the lesion measurements match
expected values."""
# Run the analysis on the dummy lesion file
path_lesion, expected_measurements = dummy_lesion
sct_analyze_lesion.main(argv=['-m', path_lesion,
'-s', sct_test_path("t2", "t2_seg-manual.nii.gz"),
'-ofolder', str(tmp_path)])
# Load analysis results from pickled pandas.Dataframe
_, fname, _ = extract_fname(path_lesion)
with open(tmp_path/f"{fname}_analysis.pkl", 'rb') as f:
measurements = pickle.load(f)['measures']
# Validate analysis results
for key, expected_value in expected_measurements.items():
if key == 'volume [mm3]':
np.testing.assert_equal(measurements.at[0, key], expected_value)
else:
# The length/diameter won't match exactly due to angle adjustment
# from spinal cord centerline curvature
np.testing.assert_allclose(measurements.at[0, key],
expected_value, rtol=rtol)
# The values will be adjusted according to the cos of the angle
# between the spinal cord centerline and the S-I axis, as per:
# https://github.com/spinalcordtoolbox/spinalcordtoolbox/pull/3681#discussion_r804822552
if key == 'max_equivalent_diameter [mm]':
assert measurements.at[0, key] < expected_value
elif key == 'length [mm]':
assert measurements.at[0, key] > expected_value
@pytest.mark.sct_testing
@pytest.mark.parametrize("dummy_lesion, rtol", [
# Straight region of `t2.nii.gz` -> little curvature -> smaller tolerance
([(29, 45, 25), (3, 10, 2)], 0.001),
([(29, 27, 25), (1, 4, 1)], 0.001), # NB: Example from #3633
# Curved region of `t2.nii.gz` -> lots of curvature -> larger tolerance
([(29, 0, 25), (4, 15, 3)], 0.01)
], indirect=["dummy_lesion"])
def test_sct_analyze_lesion_matches_expected_dummy_lesion_measurements_without_segmentation(dummy_lesion, rtol,
tmp_path):
"""Run the CLI script without providing SC segmentation -- only volume is computed. Max_equivalent_diameter and
length are nan."""
# Run the analysis on the dummy lesion file
path_lesion, expected_measurements = dummy_lesion
sct_analyze_lesion.main(argv=['-m', path_lesion,
'-ofolder', str(tmp_path)])
# Load analysis results from pickled pandas.Dataframe
_, fname, _ = extract_fname(path_lesion)
with open(tmp_path/f"{fname}_analysis.pkl", 'rb') as f:
measurements = pickle.load(f)['measures']
# Validate analysis results
for key, expected_value in expected_measurements.items():
if key == 'volume [mm3]':
np.testing.assert_equal(measurements.at[0, key], expected_value)
# The max_equivalent_diameter and length are nan because no segmentation is provided
elif key == 'max_equivalent_diameter [mm]':
assert math.isnan(measurements.at[0, key])
elif key == 'length [mm]':
assert math.isnan(measurements.at[0, key])
|
2,930 |
get version flags
|
import os
import re
from collections import deque
from typing import Any, Dict, List, Optional, Tuple, Type
from dmoj.cptbox import TracedPopen
from dmoj.executors.compiled_executor import CompiledExecutor
from dmoj.executors.mixins import SingleDigitVersionMixin
from dmoj.judgeenv import env
from dmoj.utils.cpp_demangle import demangle
from dmoj.utils.unicode import utf8bytes, utf8text
GCC_ENV = env.runtime.gcc_env or {}
GCC_COMPILE = os.environ.copy()
GCC_COMPILE.update(env.runtime.gcc_compile or {})
MAX_ERRORS = 5
recppexc = re.compile(br"terminate called after throwing an instance of \'([A-Za-z0-9_:]+)\'\r?$", re.M)
class GCCExecutor(SingleDigitVersionMixin, CompiledExecutor):
defines: List[str] = []
flags: List[str] = []
arch = 'gcc_target_arch'
has_color = False
source_dict: Dict[str, bytes] = {}
def __init__(self, problem_id: str, source_code: bytes, **kwargs) -> None:
self.source_dict = kwargs.pop('aux_sources', {})
if source_code:
self.source_dict[problem_id + self.ext] = source_code
self.defines = kwargs.pop('defines', [])
super().__init__(problem_id, source_code, **kwargs)
def create_files(self, problem_id: str, source_code: bytes, *args, **kwargs) -> None:
self.source_paths = []
for name, source in self.source_dict.items():
if '.' not in name:
name += '.' + self.ext
with open(self._file(name), 'wb') as fo:
fo.write(utf8bytes(source))
self.source_paths.append(name)
def get_binary_cache_key(self) -> bytes:
command = self.get_command()
assert command is not None
key_components = (
[self.problem, command, self.get_march_flag()] + self.get_defines() + self.get_flags() + self.get_ldflags()
)
return utf8bytes(''.join(key_components)) + b''.join(self.source_dict.values())
def get_ldflags(self) -> List[str]:
return []
def get_flags(self) -> List[str]:
return self.flags + [f'-fmax-errors={MAX_ERRORS}']
def get_defines(self) -> List[str]:
return ['-DONLINE_JUDGE'] + self.defines
def get_compile_args(self) -> List[str]:
command = self.get_command()
assert command is not None
return (
[command, '-Wall']
+ (['-fdiagnostics-color=always'] if self.has_color else [])
+ self.source_paths
+ self.get_defines()
+ ['-O2', '-lm', self.get_march_flag()]
+ self.get_flags()
+ self.get_ldflags()
+ ['-s', '-o', self.get_compiled_file()]
)
def get_compile_env(self) -> Optional[Dict[str, str]]:
return GCC_COMPILE
def get_env(self) -> Dict[str, str]:
env = super().get_env() or {}
env.update(GCC_ENV)
return env
def parse_feedback_from_stderr(self, stderr: bytes, process: TracedPopen) -> str:
if not stderr or len(stderr) > 2048:
return ''
match = deque(recppexc.finditer(stderr), maxlen=1)
if not match:
return ''
exception = match[0].group(1)
# We call `demangle` because if the child process exits by running out of memory,
# __cxa_demangle will fail to allocate memory to demangle the name, resulting in errors
# like `St9bad_alloc`, the mangled form of the name.
return '' if len(exception) > 40 else utf8text(demangle(exception), 'replace')
@classmethod
def get_march_flag(cls) -> str:
conf_arch = cls.runtime_dict.get(cls.arch, 'native')
if conf_arch:
return f'-march={conf_arch}'
# arch must've been explicitly disabled
return ''
@classmethod
def METHOD_NAME(cls, command: str) -> List[str]:
return ['-dumpversion']
@classmethod
def autoconfig_run_test(cls, result: Dict[str, Any]) -> Tuple[Dict[str, str], bool, str, str]:
# Some versions of GCC/Clang (like those in Raspbian or ARM64 Debian)
# can't autodetect the CPU, in which case our unconditional passing of
# -march=native breaks. Here we try to see if -march=native works, and
# if not fall back to a generic (slow) build.
for target in ['native', None]:
result[cls.arch] = target
executor: Type[GCCExecutor] = type('Executor', (cls,), {'runtime_dict': result})
executor.__module__ = cls.__module__
errors: List[str] = []
success = executor.run_self_test(output=False, error_callback=errors.append)
if success:
assert cls.command is not None
message = f'Using {result[cls.command]} ({target or "generic"} target)'
# Don't pollute the YAML in the default case
if target == 'native':
del result[cls.arch]
return result, success, message, ''
return result, success, 'Failed self-test', '\n'.join(errors)
@classmethod
def autoconfig(cls) -> Tuple[Optional[Dict[str, Any]], bool, str, str]:
return super().autoconfig()
@classmethod
def initialize(cls) -> bool:
res = super().initialize()
if res:
versions = cls.get_runtime_versions()
cls.has_color = versions is not None and versions[0][1] is not None and versions[0][1] > (4, 9)
return res
class CPPExecutor(GCCExecutor):
std: str
ext: str = 'cpp'
def get_flags(self):
return ([f'-std={self.std}']) + super().get_flags()
|
2,931 |
build
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
from spack.package import *
class Lorene(MakefilePackage):
"""LORENE: Langage Objet pour la RElativite NumeriquE.
LORENE is a set of C++ classes to solve various problems
arising in numerical relativity, and more generally in
computational astrophysics. It provides tools to solve
partial differential equations by means of multi-domain
spectral methods."""
homepage = "https://lorene.obspm.fr/index.html"
cvs = ":pserver:anonymous:[email protected]:/cvsroot%module=Lorene"
maintainers("eschnett")
version("2021.4.22", date="2021-04-22")
variant("fftw", default=True, description="Use external FFTW for spectral transformations")
variant(
"bin_star",
default=True,
description="Build Bin_star solver for binary neutron star systems",
)
depends_on("blas")
depends_on("fftw @3:", when="+fftw")
depends_on("gsl")
depends_on("lapack")
depends_on("pgplot")
parallel = False
def edit(self, spec, prefix):
blas_libs = spec["blas"].libs.link_flags
fftw_incdirs = "-I" + spec["fftw"].prefix.include if "+fftw" in spec else ""
fftw_libdirs = "-L" + spec["fftw"].prefix.lib if "+fftw" in spec else ""
fftw_libs = spec["fftw"].libs.link_flags
gsl_incdirs = "-I" + spec["gsl"].prefix.include
gsl_libdirs = "-L" + spec["gsl"].prefix.lib
gsl_libs = spec["gsl"].libs.link_flags
lapack_libs = spec["lapack"].libs.link_flags
pgplot_incdirs = "-I" + spec["pgplot"].prefix.include
pgplot_libdirs = "-L" + spec["pgplot"].prefix.lib
pgplot_libs = spec["pgplot"].libs.link_flags
substitutions = [
("@CXX@", self.compiler.cxx),
("@CXXFLAGS@", "-g -I$(HOME_LORENE)/C++/Include -O3 -DNDEBUG"),
("@CXXFLAGS_G@", "-g -I$(HOME_LORENE)/C++/Include"),
("@F77@", self.compiler.f77),
("@F77FLAGS@", "-ffixed-line-length-none -g -O3"),
("@F77FLAGS_G@", "-ffixed-line-length-none -g"),
(
"@INC@",
(
"-I$(HOME_LORENE)/C++/Include "
+ "-I$(HOME_LORENE)/C++/Include_extra "
+ fftw_incdirs
+ " "
+ gsl_incdirs
+ " "
+ pgplot_incdirs
),
),
("@RANLIB@", "ls"),
("@MAKEDEPEND@", ": >$(df).d"),
("@FFT_DIR@", "FFTW3"),
("@LIB_CXX@", fftw_libdirs + " " + fftw_libs + " -lgfortran"),
("@LIB_GSL@", gsl_libdirs + " " + gsl_libs),
("@LIB_LAPACK@", lapack_libs + " " + blas_libs),
("@LIB_PGPLOT@", pgplot_libdirs + " " + pgplot_libs),
]
local_settings_template = join_path(
os.path.dirname(inspect.getmodule(self).__file__), "local_settings.template"
)
local_settings = join_path(self.stage.source_path, "local_settings")
copy(local_settings_template, local_settings)
for key, value in substitutions:
filter_file(key, value, local_settings)
def METHOD_NAME(self, spec, prefix):
args = ["HOME_LORENE=" + self.build_directory]
# (We could build the documentation as well.)
# (We could circumvent the build system and simply compile all
# source files, and do so in parallel.)
make("cpp", "fortran", "export", *args)
if "+bin_star" in spec:
with working_dir(join_path("Codes", "Bin_star")):
make(
"-f",
"Makefile_O2",
"coal",
"lit_bin",
"init_bin",
"coal_regu",
"init_bin_regu",
"analyse",
"prepare_seq",
*args,
)
def install(self, spec, prefix):
mkdirp(prefix.lib)
install_tree("Lib", prefix.lib)
mkdirp(prefix.bin)
if "+bin_star" in spec:
for exe in [
"coal",
"lit_bin",
"init_bin",
"coal_regu",
"init_bin_regu",
"analyse",
"prepare_seq",
]:
install(join_path("Codes", "Bin_star", exe), prefix.bin)
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries("liblorene*", root=self.prefix, shared=shared, recursive=True)
|
2,932 |
binary search
|
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2019 David R. MacIver
# ([email protected]), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
"""A module for miscellaneous useful bits and bobs that don't
obviously belong anywhere else. If you spot a better home for
anything that lives here, please move it."""
from __future__ import absolute_import, division, print_function
from hypothesis.internal.compat import (
array_or_list,
hbytes,
int_to_bytes,
integer_types,
)
def replace_all(buffer, replacements):
"""Substitute multiple replacement values into a buffer.
Replacements is a list of (start, end, value) triples.
"""
result = bytearray()
prev = 0
offset = 0
for u, v, r in replacements:
result.extend(buffer[prev:u])
result.extend(r)
prev = v
offset += len(r) - (v - u)
result.extend(buffer[prev:])
assert len(result) == len(buffer) + offset
return hbytes(result)
ARRAY_CODES = ["B", "H", "I", "L", "Q", "O"]
NEXT_ARRAY_CODE = dict(zip(ARRAY_CODES, ARRAY_CODES[1:]))
class IntList(object):
"""Class for storing a list of non-negative integers compactly.
We store them as the smallest size integer array we can get
away with. When we try to add an integer that is too large,
we upgrade the array to the smallest word size needed to store
the new value."""
__slots__ = ("__underlying",)
def __init__(self, values=()):
for code in ARRAY_CODES:
try:
self.__underlying = array_or_list(code, values)
break
except OverflowError:
pass
else: # pragma: no cover
raise AssertionError("Could not create storage for %r" % (values,))
if isinstance(self.__underlying, list):
for v in self.__underlying:
if v < 0 or not isinstance(v, integer_types):
raise ValueError("Could not create IntList for %r" % (values,))
@classmethod
def of_length(self, n):
return IntList(array_or_list("B", [0]) * n)
def count(self, n):
return self.__underlying.count(n)
def __repr__(self):
return "IntList(%r)" % (list(self),)
def __len__(self):
return len(self.__underlying)
def __getitem__(self, i):
if isinstance(i, slice):
return IntList(self.__underlying[i])
return self.__underlying[i]
def __delitem__(self, i):
del self.__underlying[i]
def __iter__(self):
return iter(self.__underlying)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying == other.__underlying
def __ne__(self, other):
if self is other:
return False
if not isinstance(other, IntList):
return NotImplemented
return self.__underlying != other.__underlying
def append(self, n):
i = len(self)
self.__underlying.append(0)
self[i] = n
def __setitem__(self, i, n):
while True:
try:
self.__underlying[i] = n
return
except OverflowError:
assert n > 0
self.__upgrade()
def extend(self, ls):
for n in ls:
self.append(n)
def __upgrade(self):
code = NEXT_ARRAY_CODE[self.__underlying.typecode]
self.__underlying = array_or_list(code, self.__underlying)
def METHOD_NAME(lo, hi, f):
"""Binary searches in [lo , hi) to find
n such that f(n) == f(lo) but f(n + 1) != f(lo).
It is implicitly assumed and will not be checked
that f(hi) != f(lo).
"""
reference = f(lo)
while lo + 1 < hi:
mid = (lo + hi) // 2
if f(mid) == reference:
lo = mid
else:
hi = mid
return lo
def uniform(random, n):
"""Returns an hbytes of length n, distributed uniformly at random."""
return int_to_bytes(random.getrandbits(n * 8), n)
class LazySequenceCopy(object):
"""A "copy" of a sequence that works by inserting a mask in front
of the underlying sequence, so that you can mutate it without changing
the underlying sequence. Effectively behaves as if you could do list(x)
in O(1) time. The full list API is not supported yet but there's no reason
in principle it couldn't be."""
def __init__(self, values):
self.__values = values
self.__len = len(values)
self.__mask = None
def __len__(self):
return self.__len
def pop(self):
if len(self) == 0:
raise IndexError("Cannot pop from empty list")
result = self[-1]
self.__len -= 1
if self.__mask is not None:
self.__mask.pop(self.__len, None)
return result
def __getitem__(self, i):
i = self.__check_index(i)
default = self.__values[i]
if self.__mask is None:
return default
else:
return self.__mask.get(i, default)
def __setitem__(self, i, v):
i = self.__check_index(i)
if self.__mask is None:
self.__mask = {}
self.__mask[i] = v
def __check_index(self, i):
n = len(self)
if i < -n or i >= n:
raise IndexError("Index %d out of range [0, %d)" % (i, n))
if i < 0:
i += n
assert 0 <= i < n
return i
def clamp(lower, value, upper):
"""Given a value and lower/upper bounds, 'clamp' the value so that
it satisfies lower <= value <= upper."""
return max(lower, min(value, upper))
def swap(ls, i, j):
"""Swap the elements ls[i], ls[j]."""
if i == j:
return
ls[i], ls[j] = ls[j], ls[i]
|
2,933 |
test returns unfired deferred
|
"""
Tests for ``magic_folder.test.eliotutil``.
"""
import logging
from testtools import (
TestCase,
)
from testtools.matchers import (
Is,
IsInstance,
Equals,
AfterPreprocessing,
)
from testtools.twistedsupport import (
succeeded,
failed,
)
from eliot import (
Message,
)
from eliot.twisted import DeferredContext
from eliot.testing import (
capture_logging,
assertHasAction,
)
from twisted.internet.defer import (
succeed,
)
from twisted.internet.task import deferLater
from twisted.internet import reactor
from ..util.eliotutil import (
log_call_deferred,
log_inline_callbacks,
_EliotLogging,
)
from .common import (
AsyncTestCase,
)
class EliotLoggedTestTests(AsyncTestCase):
def test_returns_none(self):
Message.log(hello="world")
def test_returns_fired_deferred(self):
Message.log(hello="world")
return succeed(None)
def METHOD_NAME(self):
Message.log(hello="world")
# @eliot_logged_test automatically gives us an action context but it's
# still our responsibility to maintain it across stack-busting
# operations.
d = DeferredContext(deferLater(reactor, 0.0, lambda: None))
d.addCallback(lambda ignored: Message.log(goodbye="world"))
# We didn't start an action. We're not finishing an action.
return d.result
# Opt out of the great features of common.SyncTestCase because we're
# interacting with Eliot in a very obscure, particular, fragile way. :/
class EliotLoggingTests(TestCase):
"""
Tests for ``_EliotLogging``.
"""
def test_stdlib_event_relayed(self):
"""
An event logged using the stdlib logging module is delivered to the Eliot
destination.
"""
collected = []
service = _EliotLogging([collected.append], capture_logs=True)
service.startService()
self.addCleanup(service.stopService)
# The first destination added to the global log destinations gets any
# buffered messages delivered to it. We don't care about those.
# Throw them on the floor. Sorry.
del collected[:]
logging.critical("oh no")
self.assertThat(
collected,
AfterPreprocessing(
len,
Equals(1),
),
)
def test_twisted_event_relayed(self):
"""
An event logged with a ``twisted.logger.Logger`` is delivered to the Eliot
destination.
"""
collected = []
service = _EliotLogging([collected.append], capture_logs=True)
service.startService()
self.addCleanup(service.stopService)
from twisted.logger import Logger
Logger().critical("oh no")
self.assertThat(
collected,
AfterPreprocessing(
len, Equals(1),
),
)
class LogCallDeferredTests(TestCase):
"""
Tests for ``log_call_deferred``.
"""
@capture_logging(
lambda self, logger:
assertHasAction(self, logger, u"the-action", succeeded=True),
)
def test_return_value(self, logger):
"""
The decorated function's return value is passed through.
"""
result = object()
@log_call_deferred(action_type=u"the-action")
def f():
return result
self.assertThat(f(), succeeded(Is(result)))
@capture_logging(
lambda self, logger: assertHasAction(
self, logger, "the-action", succeeded=True, startFields={"thing": "value"}
),
)
def test_args_logged(self, logger):
"""
The decorated function's arguments are logged.
"""
@log_call_deferred(action_type="the-action", include_args=True)
def f(self, reactor, thing):
pass
f(object(), object(), "value")
@capture_logging(
lambda self, logger: assertHasAction(
self, logger, "the-action", succeeded=True, startFields={"thing": "value"}
),
)
def test_args_logged_explicit(self, logger):
"""
The decorated function's arguments are logged.
"""
@log_call_deferred(action_type="the-action", include_args=["thing"])
def f(thing, other):
pass
f("value", object())
@capture_logging(
lambda self, logger: assertHasAction(
self, logger, "the-action", succeeded=True, startFields={"thing": "value"}
),
)
def test_args_logged_inline_callbacks(self, logger):
"""
The decorated function's arguments are logged.
"""
@log_inline_callbacks(action_type="the-action", include_args=["thing"])
def f(thing, other):
yield
f("value", object())
@capture_logging(
lambda self, logger:
assertHasAction(self, logger, u"the-action", succeeded=False),
)
def test_raise_exception(self, logger):
"""
An exception raised by the decorated function is passed through.
"""
class Result(Exception):
pass
@log_call_deferred(action_type=u"the-action")
def f():
raise Result()
self.assertThat(
f(),
failed(
AfterPreprocessing(
lambda f: f.value,
IsInstance(Result),
),
),
)
|
2,934 |
n create data
|
"""Export and import meshes with specular values."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2005, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
import nose.tools
from integration import SingleNif
from integration.modules.scene import n_gen_header, b_gen_header
from integration.modules.geometry.trishape import b_gen_geometry, n_gen_geometry
from integration.modules.property.material import b_gen_material, n_gen_material
from integration.modules.property.specular import b_gen_specular, n_gen_specular
class TestSpecularProperty(SingleNif):
"""Test import/export of meshes with material based specular property."""
g_path = "property/specular"
g_name = "test_specular"
b_name = "Cube"
def b_create_header(self):
b_gen_header.b_create_oblivion_info()
def n_create_header(self):
n_gen_header.n_create_header_oblivion(self.n_data)
def b_create_data(self):
b_obj = b_gen_geometry.b_create_base_geometry(self.b_name)
b_mat = b_gen_material.b_create_material_block(b_obj)
b_gen_material.b_create_set_default_material_property(b_mat)
b_gen_specular.b_create_alter_specular_property(b_mat) # update specular
def b_check_data(self):
b_obj = bpy.data.objects[self.b_name]
b_gen_geometry.b_check_geom_obj(b_obj)
b_mat = b_gen_material.b_check_material_block(b_obj)
b_gen_specular.b_check_specular_property(b_mat)
def METHOD_NAME(self):
n_gen_geometry.n_create_blocks(self.n_data)
n_trishape = self.n_data.roots[0].children[0]
n_gen_material.n_attach_material_prop(n_trishape)
n_gen_specular.n_alter_material_specular(n_trishape.properties[0]) # set material alpha
n_gen_specular.n_attach_specular_prop(n_trishape) # add nialphaprop
return self.n_data
def n_check_data(self):
n_nitrishape = self.n_data.roots[0].children[0]
n_gen_geometry.n_check_trishape(n_nitrishape)
nose.tools.assert_equal(n_nitrishape.num_properties, 2)
n_mat_prop = n_nitrishape.properties[1]
n_gen_material.n_check_material_block(n_mat_prop)
n_gen_specular.n_check_material_specular(n_mat_prop)
n_specular_prop = n_nitrishape.properties[0]
n_gen_specular.n_check_specular_block(n_specular_prop)
n_gen_specular.n_check_specular_property(n_specular_prop)
|
2,935 |
test dhcp
|
from packetbeat import BaseTest
"""
Tests for the DHCPv4 protocol.
"""
class Test(BaseTest):
def METHOD_NAME(self):
self.render_config_template()
self.run_packetbeat(pcap="dhcp.pcap")
objs = self.read_output(types=['dhcpv4'])
assert len(objs) == 4
assert "event.start" in objs[0]
assert objs[0]["client.ip"] == "0.0.0.0"
assert objs[0]["client.port"] == 68
assert objs[0]["destination.ip"] == "255.255.255.255"
assert objs[0]["dhcpv4.client_mac"] == "00-0B-82-01-FC-42"
assert objs[0]["dhcpv4.flags"] == "unicast"
assert objs[0]["dhcpv4.hardware_type"] == "Ethernet"
assert objs[0]["dhcpv4.hops"] == 0
assert objs[0]["dhcpv4.op_code"] == "bootrequest"
assert objs[0]["dhcpv4.option.message_type"] == "discover"
assert objs[0]["dhcpv4.option.parameter_request_list"] == [
"Subnet Mask",
"Router",
"Domain Name Server",
"NTP Servers"
]
assert objs[0]["dhcpv4.option.requested_ip_address"] == "0.0.0.0"
assert objs[0]["dhcpv4.seconds"] == 0
assert objs[0]["dhcpv4.transaction_id"] == "0x00003d1d"
assert objs[0]["event.dataset"] == "dhcpv4"
assert objs[0]["server.ip"] == "255.255.255.255"
assert objs[0]["server.port"] == 67
assert objs[0]["source.ip"] == "0.0.0.0"
assert objs[0]["status"] == "OK"
assert objs[0]["network.type"] == "ipv4"
assert objs[0]["network.transport"] == "udp"
assert objs[0]["network.protocol"] == "dhcpv4"
assert objs[0]["network.bytes"] == 272
assert objs[0]["network.community_id"] == "1:t9O1j0qj71O4wJM7gnaHtgmfev8="
assert objs[0]["type"] == "dhcpv4"
assert "event.start" in objs[1]
assert objs[1]["client.ip"] == "192.168.0.10"
assert objs[1]["client.port"] == 68
assert objs[1]["destination.ip"] == "192.168.0.10"
assert objs[1]["dhcpv4.assigned_ip"] == "192.168.0.10"
assert objs[1]["dhcpv4.client_mac"] == "00-0B-82-01-FC-42"
assert objs[1]["dhcpv4.flags"] == "unicast"
assert objs[1]["dhcpv4.hardware_type"] == "Ethernet"
assert objs[1]["dhcpv4.hops"] == 0
assert objs[1]["dhcpv4.op_code"] == "bootreply"
assert objs[1]["dhcpv4.option.ip_address_lease_time_sec"] == 3600
assert objs[1]["dhcpv4.option.message_type"] == "offer"
assert objs[1]["dhcpv4.option.rebinding_time_sec"] == 3150
assert objs[1]["dhcpv4.option.renewal_time_sec"] == 1800
assert objs[1]["dhcpv4.option.server_identifier"] == "192.168.0.1"
assert objs[1]["dhcpv4.option.subnet_mask"] == "255.255.255.0"
assert objs[1]["dhcpv4.seconds"] == 0
assert objs[1]["dhcpv4.transaction_id"] == "0x00003d1d"
assert objs[1]["event.dataset"] == "dhcpv4"
assert objs[1]["network.bytes"] == 300
assert objs[1]["network.community_id"] == "1:VbRSZnvQqvLiQRhYHLrdVI17sLQ="
assert objs[1]["network.protocol"] == "dhcpv4"
assert objs[1]["network.transport"] == "udp"
assert objs[1]["network.type"] == "ipv4"
assert objs[1]["server.bytes"] == 300
assert objs[1]["server.ip"] == "192.168.0.1"
assert objs[1]["server.port"] == 67
assert objs[1]["source.ip"] == "192.168.0.1"
assert objs[1]["status"] == "OK"
assert objs[1]["type"] == "dhcpv4"
assert "event.start" in objs[2]
assert objs[2]["client.ip"] == "0.0.0.0"
assert objs[2]["client.port"] == 68
assert objs[2]["dhcpv4.client_mac"] == "00-0B-82-01-FC-42"
assert objs[2]["dhcpv4.flags"] == "unicast"
assert objs[2]["dhcpv4.hardware_type"] == "Ethernet"
assert objs[2]["dhcpv4.hops"] == 0
assert objs[2]["dhcpv4.op_code"] == "bootrequest"
assert objs[2]["dhcpv4.option.message_type"] == "request"
assert objs[2]["dhcpv4.option.parameter_request_list"] == [
"Subnet Mask",
"Router",
"Domain Name Server",
"NTP Servers"
]
assert objs[2]["dhcpv4.option.requested_ip_address"] == "192.168.0.10"
assert objs[2]["dhcpv4.option.server_identifier"] == "192.168.0.1"
assert objs[2]["dhcpv4.seconds"] == 0
assert objs[2]["dhcpv4.transaction_id"] == "0x00003d1e"
assert objs[2]["event.dataset"] == "dhcpv4"
assert objs[2]["network.bytes"] == 272
assert objs[2]["network.community_id"] == "1:t9O1j0qj71O4wJM7gnaHtgmfev8="
assert objs[2]["network.protocol"] == "dhcpv4"
assert objs[2]["network.transport"] == "udp"
assert objs[2]["network.type"] == "ipv4"
assert objs[2]["server.ip"] == "255.255.255.255"
assert objs[2]["server.port"] == 67
assert objs[2]["status"] == "OK"
assert objs[2]["type"] == "dhcpv4"
assert "event.start" in objs[3]
assert objs[3]["client.ip"] == "192.168.0.10"
assert objs[3]["client.port"] == 68
assert objs[3]["destination.ip"] == "192.168.0.10"
assert objs[3]["dhcpv4.assigned_ip"] == "192.168.0.10"
assert objs[3]["dhcpv4.client_mac"] == "00-0B-82-01-FC-42"
assert objs[3]["dhcpv4.flags"] == "unicast"
assert objs[3]["dhcpv4.hardware_type"] == "Ethernet"
assert objs[3]["dhcpv4.hops"] == 0
assert objs[3]["dhcpv4.op_code"] == "bootreply"
assert objs[3]["dhcpv4.option.ip_address_lease_time_sec"] == 3600
assert objs[3]["dhcpv4.option.message_type"] == "ack"
assert objs[3]["dhcpv4.option.rebinding_time_sec"] == 3150
assert objs[3]["dhcpv4.option.renewal_time_sec"] == 1800
assert objs[3]["dhcpv4.option.server_identifier"] == "192.168.0.1"
assert objs[3]["dhcpv4.option.subnet_mask"] == "255.255.255.0"
assert objs[3]["dhcpv4.seconds"] == 0
assert objs[3]["dhcpv4.transaction_id"] == "0x00003d1e"
assert objs[3]["event.dataset"] == "dhcpv4"
assert objs[3]["network.bytes"] == 300
assert objs[3]["network.community_id"] == "1:VbRSZnvQqvLiQRhYHLrdVI17sLQ="
assert objs[3]["network.protocol"] == "dhcpv4"
assert objs[3]["network.transport"] == "udp"
assert objs[3]["network.type"] == "ipv4"
assert objs[3]["server.ip"] == "192.168.0.1"
assert objs[3]["server.port"] == 67
assert objs[3]["source.ip"] == "192.168.0.1"
assert objs[3]["status"] == "OK"
assert objs[3]["type"] == "dhcpv4"
|
2,936 |
add spec
|
import pathlib
from typing import Any, Dict, List, Optional
import jinja2
import yaml
from conda_lock.common import get_in
from conda_lock.models.lock_spec import Dependency, LockSpecification
from conda_lock.src_parser.conda_common import conda_spec_to_versioned_dep
from conda_lock.src_parser.selectors import filter_platform_selectors
class UndefinedNeverFail(jinja2.Undefined):
"""
Copied from https://github.com/conda/conda-build/blob/master/conda_build/jinja_context.py
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and
{{ MY_UNDEFINED_VAR|int }}. This can mask lots of errors in jinja templates, so it
should only be used for a first-pass parse, when you plan on running a 'strict'
second pass later.
Note:
When using this class, any usage of an undefined variable in a jinja template is recorded
in the (global) all_undefined_names class member. Therefore, after jinja rendering,
you can detect which undefined names were used by inspecting that list.
Be sure to clear the all_undefined_names list before calling template.render().
"""
all_undefined_names: List[Optional[str]] = []
def __init__( # type: ignore
self,
hint=None,
obj=jinja2.utils.missing,
name=None,
exc=jinja2.exceptions.UndefinedError,
) -> None:
jinja2.Undefined.__init__(self, hint, obj, name, exc)
# Using any of these methods on an Undefined variable
# results in another Undefined variable.
# fmt: off
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda self, *args, **kwargs: self._return_undefined(self._undefined_name)
# fmt: on
# Accessing an attribute of an Undefined variable
# results in another Undefined variable.
def __getattr__(self, k: str) -> "UndefinedNeverFail":
try:
return object.__getattr__(self, k) # type: ignore
except AttributeError:
return self._return_undefined(self._undefined_name + "." + k) # type: ignore
# Unlike the methods above, Python requires that these
# few methods must always return the correct type
__str__ = __repr__ = lambda self: self._return_value(str()) # type: ignore
__unicode__ = lambda self: self._return_value("") # noqa: E731
__int__ = lambda self: self._return_value(0) # type: ignore # noqa: E731
__float__ = lambda self: self._return_value(0.0) # type: ignore # noqa: E731
__nonzero__ = lambda self: self._return_value(False) # noqa: E731
def _return_undefined(self, result_name: str) -> "UndefinedNeverFail": # type: ignore
# Record that this undefined variable was actually used.
UndefinedNeverFail.all_undefined_names.append(self._undefined_name)
return UndefinedNeverFail(
hint=self._undefined_hint,
obj=self._undefined_obj,
name=result_name,
exc=self._undefined_exception,
)
def _return_value(self, value=None): # type: ignore
# Record that this undefined variable was actually used.
UndefinedNeverFail.all_undefined_names.append(self._undefined_name)
return value
def parse_meta_yaml_file(
meta_yaml_file: pathlib.Path,
platforms: List[str],
) -> LockSpecification:
"""Parse a simple meta-yaml file for dependencies assuming the target platforms.
* This will emit one dependency set per target platform. These may differ
if the dependencies depend on platform selectors.
* This does not support multi-output files and will ignore all lines with
selectors other than platform.
"""
if not meta_yaml_file.exists():
raise FileNotFoundError(f"{meta_yaml_file} not found")
with meta_yaml_file.open("r") as fo:
t = jinja2.Template(fo.read(), undefined=UndefinedNeverFail)
rendered = t.render()
meta_yaml_data = yaml.safe_load(rendered)
channels = get_in(["extra", "channels"], meta_yaml_data, [])
try:
# conda-lock will use `--override-channels` so nodefaults is redundant.
channels.remove("nodefaults")
except ValueError:
pass
# parse with selectors for each target platform
dep_map = {
platform: _parse_meta_yaml_file_for_platform(meta_yaml_file, platform)
for platform in platforms
}
return LockSpecification(
dependencies=dep_map,
channels=channels,
sources=[meta_yaml_file],
)
def _parse_meta_yaml_file_for_platform(
meta_yaml_file: pathlib.Path,
platform: str,
) -> List[Dependency]:
"""Parse a simple meta-yaml file for dependencies, assuming the target platform.
* This does not support multi-output files and will ignore all lines with selectors other than platform
"""
with meta_yaml_file.open("r") as fo:
filtered_recipe = "\n".join(
filter_platform_selectors(fo.read(), platform=platform)
)
t = jinja2.Template(filtered_recipe, undefined=UndefinedNeverFail)
rendered = t.render()
meta_yaml_data = yaml.safe_load(rendered)
dependencies: List[Dependency] = []
def METHOD_NAME(spec: str, category: str) -> None:
if spec is None:
return
dep = conda_spec_to_versioned_dep(spec, category)
dependencies.append(dep)
def add_requirements_from_recipe_or_output(yaml_data: Dict[str, Any]) -> None:
for s in get_in(["requirements", "host"], yaml_data, []):
METHOD_NAME(s, "main")
for s in get_in(["requirements", "run"], yaml_data, []):
METHOD_NAME(s, "main")
for s in get_in(["test", "requires"], yaml_data, []):
METHOD_NAME(s, "dev")
add_requirements_from_recipe_or_output(meta_yaml_data)
for output in get_in(["outputs"], meta_yaml_data, []):
add_requirements_from_recipe_or_output(output)
return dependencies
|
2,937 |
set unknown threshold
|
#!/usr/bin/env python3
###################################################################################################
################################################################################
# Copyright (C) 2023 Maxim Integrated Products, Inc., All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Maxim Integrated
# Products, Inc. shall not be used except as stated in the Maxim Integrated
# Products, Inc. Branding Policy.
#
# The mere transfer of this software does not imply any licenses
# of trade secrets, proprietary technology, copyrights, patents,
# trademarks, maskwork rights, or any other form of intellectual
# property whatsoever. Maxim Integrated Products, Inc. retains all
# ownership rights.
#
###############################################################################
"""Includes face identifier class to decide on person for given embedding
"""
import time
import numpy as np
from ai85_adapter import AI85SimulatorAdapter, AI85UartAdapter
from utils import load_data_arrs
class FaceID:
"""
Class to identify embedding as either one of the faces in embeddings db or unknown.
"""
face_db = None
ai85_adapter = None
thresh_for_unknowns = 0.0
def __init__(self, face_db_path, unknown_threshold=0.0, ai85_adapter=None):
'''
:param face_db_path: Path to the face db file.
:param unknown_threshold: Distance threshold to identify subjects who are not in the db.
:param ai85_adapter: Type of the adapter used. Options: {None, 'sim', 'uart'}
(default: None).
'''
subject_names_list, self.subj_list, self.embedding_list, self.img_list = load_data_arrs(face_db_path, #pylint: disable=line-too-long
load_img_prevs=True) #pylint: disable=line-too-long
self.subj_ids = np.array(list(range(len(subject_names_list))))
self.METHOD_NAME(unknown_threshold)
self.set_ai85_adapter(ai85_adapter)
def has_ai85_adapter(self):
"""Checks if AI85 adapter is set"""
if self.ai85_adapter is None:
return False
return True
def __release_ai85_adapter(self):
if self.ai85_adapter:
del self.ai85_adapter
self.ai85_adapter = None
def __get_sorted_mean_distances_to_subjects(self, embedding):
dists = embedding.astype(np.float32) - np.array(self.embedding_list.astype(np.float32))
dists = np.sqrt(np.sum(np.square(dists), axis=1))
mean_dists = np.zeros((len(self.subj_ids), ), dtype=np.float32)
for i, sid in enumerate(self.subj_ids):
mean_dists[i] = dists[self.subj_list == sid].mean()
idx = np.argsort(mean_dists)
mean_dists = mean_dists[idx]
subjects = self.subj_ids[idx]
return subjects, mean_dists
def __get_min_mean_distance_subject(self, embedding):
subjects, mean_dists = self.__get_sorted_mean_distances_to_subjects(embedding)
return subjects[0], mean_dists[0]
def METHOD_NAME(self, unknown_threshold):
"""Sets threshold for unknown decision"""
self.thresh_for_unknowns = unknown_threshold
def set_ai85_adapter(self, adapter, **adapter_params):
"""Sets AI85 adapter"""
if adapter is None:
self.__release_ai85_adapter()
elif adapter.lower() == 'sim':
self.__release_ai85_adapter()
if 'model_path' in adapter_params:
self.ai85_adapter = AI85SimulatorAdapter(adapter_params['model_path'])
else:
print('Path to model checkpoint file should be declared as "model_path"!!!')
self.__validate_db()
print('Simulator activated!!')
elif adapter.lower() == 'uart':
self.__release_ai85_adapter()
self.ai85_adapter = AI85UartAdapter(adapter_params['uart_port'],
adapter_params['baud_rate'],
adapter_params['embedding_len'])
print('UART connection activated!!')
else:
print('Unknown AI85 Source selection')
def __validate_db(self):
fail = False
for i, img in enumerate(self.img_list):
emb = self.ai85_adapter.get_network_out(img)
emb = emb[:, :, 0, 0].astype(np.int8).flatten()
if np.sum(emb != self.embedding_list[i]) > 0:
print(f'DB Validation Error: Issue in sample {i}')
fail = True
if not fail:
print('Success in DB validation!')
def run(self, img):
"""Runs face identifier."""
subject_id = -1
dist = -1
ai85_time = 0
db_match_time = 0
if self.ai85_adapter:
t_start = time.time()
embedding = self.ai85_adapter.get_network_out(img)
embedding = np.squeeze(embedding)
t_get_emb = time.time()
subject_id, dist = self.__get_sorted_mean_distances_to_subjects(embedding)
unknown_idx = np.where(dist > self.thresh_for_unknowns)[0]
if unknown_idx.size > 0:
subject_id = np.insert(subject_id, unknown_idx[0], -1)
dist = np.insert(dist, unknown_idx[0], self.thresh_for_unknowns)
else:
subject_id = np.append(subject_id, -1)
dist = np.append(dist, self.thresh_for_unknowns)
t_db_match = time.time()
ai85_time = t_get_emb - t_start
db_match_time = t_db_match - t_get_emb
return subject_id, dist, ai85_time, db_match_time
|
2,938 |
on 204
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud l2network delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the provided layer 2 (L2) network.
:example: Delete L2 network
az networkcloud l2network delete --name "l2NetworkName" --resource-group "resourceGroupName"
"""
_aaz_info = {
"version": "2023-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/l2networks/{}", "2023-07-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.l2_network_name = AAZStrArg(
options=["-n", "--name", "--l2-network-name"],
help="The name of the L2 network.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.L2NetworksDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class L2NetworksDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/l2Networks/{l2NetworkName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"l2NetworkName", self.ctx.args.l2_network_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def METHOD_NAME(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
2,939 |
get execute combine result
|
# Copyright 2022 XProbe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import pandas as pd
from ...core import OutputType
from ...utils import implements
from .aggregation import DataFrameGroupByAgg
from .custom_aggregation import (
DataFrameCustomGroupByAggMixin,
register_custom_groupby_agg_func,
)
@register_custom_groupby_agg_func("nunique")
class DataFrameCustomGroupByNuniqueMixin(DataFrameCustomGroupByAggMixin):
@classmethod
def _get_level_indexes(
cls, op: DataFrameGroupByAgg, data: pd.DataFrame
) -> List[int]:
"""
When group by level, get the level index list.
Level can be int, level name, or sequence of such.
This function calculates the corresponding indexes.
Parameters
----------
op
data
Returns
-------
"""
index = [data.index.name] if data.index.name else data.index.names
index = pd.Index(index)
level = op.groupby_params["level"]
if isinstance(level, int):
indexes = [level]
elif isinstance(level, str):
indexes = [index.get_loc(level)]
else:
level = list(level)
if isinstance(level[0], int):
indexes = level
else:
indexes = index.get_indexer(level).tolist()
return indexes
@classmethod
def _get_selection_columns(cls, op: DataFrameGroupByAgg) -> Union[None, List]:
"""
Get groupby selection columns from op parameters.
If this returns None, it means all columns are required.
Parameters
----------
op
Returns
-------
"""
if "selection" in op.groupby_params:
selection = op.groupby_params["selection"]
if isinstance(selection, (tuple, list)):
selection = [n for n in selection]
else:
selection = [selection]
return selection
@classmethod
def _get_execute_map_result(
cls, op: DataFrameGroupByAgg, in_data: pd.DataFrame
) -> Union[pd.DataFrame, pd.Series]:
selections = cls._get_selection_columns(op)
by_cols = op.raw_groupby_params["by"]
if by_cols is not None:
cols = (
[*selections, *by_cols] if selections is not None else in_data.columns
)
res = in_data[cols].drop_duplicates(subset=cols).set_index(by_cols)
else: # group by level
selections = selections if selections is not None else in_data.columns
level_indexes = cls._get_level_indexes(op, in_data)
in_data = in_data.reset_index()
index_names = in_data.columns[level_indexes].tolist()
cols = [*index_names, *selections]
res = in_data[cols].drop_duplicates().set_index(index_names)
# if sort=True is specified, sort index when finishing drop_duplicates.
if op.raw_groupby_params["sort"]:
res = res.sort_index()
if op.output_types[0] == OutputType.series:
res = res.squeeze()
return res
@classmethod
def METHOD_NAME(
cls, op: DataFrameGroupByAgg, in_data: pd.DataFrame
) -> Union[pd.DataFrame, pd.Series]:
# in_data.index.names means MultiIndex (groupby on multi cols)
index_col = in_data.index.name or in_data.index.names
res = in_data.reset_index().drop_duplicates().set_index(index_col)
if op.output_types[0] == OutputType.series:
res = res.squeeze()
return res
@classmethod
def _get_execute_agg_result(
cls, op: DataFrameGroupByAgg, in_data: pd.DataFrame
) -> Union[pd.DataFrame, pd.Series]:
groupby_params = op.groupby_params.copy()
cols = in_data.index.name or in_data.index.names
by = op.raw_groupby_params["by"]
if by is not None:
if op.output_types[0] == OutputType.dataframe:
groupby_params.pop("level", None)
groupby_params["by"] = cols
in_data = in_data.reset_index()
else:
# When group by multi levels, we must get the actual all levels from raw_groupby_params,
# since level field in op.groupby_params is not correct.
groupby_params["level"] = op.raw_groupby_params["level"]
res = in_data.groupby(**groupby_params).nunique()
return res
@classmethod
@implements(DataFrameCustomGroupByAggMixin.execute_map)
def execute_map(cls, op, in_data: pd.DataFrame) -> Union[pd.DataFrame, pd.Series]:
return cls._get_execute_map_result(op, in_data)
@classmethod
@implements(DataFrameCustomGroupByAggMixin.execute_combine)
def execute_combine(
cls, op, in_data: pd.DataFrame
) -> Union[pd.DataFrame, pd.Series]:
return cls.METHOD_NAME(op, in_data)
@classmethod
@implements(DataFrameCustomGroupByAggMixin.execute_agg)
def execute_agg(cls, op, in_data: pd.DataFrame) -> Union[pd.DataFrame, pd.Series]:
return cls._get_execute_agg_result(op, in_data)
|
2,940 |
close files
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from phactori import *
import datetime
#phactori_combine_to_single_python_file_subpiece_begin_1
class PhactoriDataArtifactMetaDataControl:
def __init__(self, myProcessId, numProcesses):
self.outputEnabled = True
self.artifactListCsvFileName = None
self.artifactListCsvFileHandle = None
self.artifactListTryOpenCount = 0
self.thisProcessIsWritingProcess = -1
if myProcessId == int(numProcesses/2):
self.thisProcessIsWritingProcess = 1
else:
self.thisProcessIsWritingProcess = 0
if PhactoriDbg(100):
myDebugPrint3("PhactoriDataArtifactMetaDataControl:__init__ " + \
str(myProcessId) + ", " + str(numProcesses) + ", " + \
str(self.thisProcessIsWritingProcess) + "\n", 100)
def EnableOutput(self, onOff):
self.outputEnabled = onOff
def METHOD_NAME(self):
if self.artifactListCsvFileHandle != None:
self.artifactListCsvFileHandle.close()
def ThisProcessIsWritingProcess(self):
return self.thisProcessIsWritingProcess > 0
def DataArtifactOutputListOpen(self):
if self.artifactListCsvFileHandle == None:
if self.artifactListTryOpenCount > 0:
return False
self.artifactListTryOpenCount += 1
myNow = datetime.datetime.now()
myFormat = "%Y-%m-%d-%H:%M:%S"
self.artifactListCsvFileName = "DataArtifactList_" + myNow.strftime(myFormat) + ".csv"
self.artifactListCsvFileHandle = open(self.artifactListCsvFileName, "w")
if self.artifactListCsvFileHandle == None:
print("AddImageToDataArtifactOutputList: unable to open file:\n",
self.artifactListCsvFileName)
return False
return True
def CheckIfDoOutput(self):
if self.outputEnabled == False:
return False
if self.ThisProcessIsWritingProcess() == False:
return False
if self.DataArtifactOutputListOpen() == False:
return False
return True
def AddDataArtifactToDataArtifactOutputList(self, fileName):
if self.CheckIfDoOutput() == False:
return
myNow = datetime.datetime.now()
outDate = str(myNow)
outStr = fileName + "," + outDate + "\n"
self.artifactListCsvFileHandle.write(outStr)
self.artifactListCsvFileHandle.flush()
def AddDataExportToDataArtifactOutputList(self, fileName):
self.AddDataArtifactToDataArtifactOutputList(fileName)
def AddImageToDataArtifactOutputList(self, fileName):
self.AddDataArtifactToDataArtifactOutputList(fileName)
if __name__ == '__main__':
pdamdc = PhactoriDataArtifactMetaDataControl(1,2)
pdamdc.AddImageToDataArtifactOutputList("CatalystOutput/test1.png")
pdamdc.AddDataExportToDataArtifactOutputList("CatalystVtkDataOutput/test2.vtm")
#phactori_combine_to_single_python_file_subpiece_end_1
|
2,941 |
console describe
|
from __future__ import annotations
from typing import List
import typer
from rich.console import Console
from rich.table import Table
from ...helpers import to_json, to_yaml
from ...resource import Resource
from ...system import system
from .. import common, helpers
from ..console import console
@console.command(name="list")
def METHOD_NAME(
# Source
source: List[str] = common.source,
name: str = common.resource_name,
type: str = common.type,
path: str = common.path,
scheme: str = common.scheme,
format: str = common.format,
encoding: str = common.encoding,
innerpath: str = common.innerpath,
compression: str = common.compression,
# Dialect
dialect: str = common.dialect,
header_rows: str = common.header_rows,
header_join: str = common.header_join,
comment_char: str = common.comment_char,
comment_rows: str = common.comment_rows,
sheet: str = common.sheet,
table: str = common.table,
keys: str = common.keys,
keyed: bool = common.keyed,
# Detector
buffer_size: int = common.buffer_size,
sample_size: int = common.sample_size,
field_type: str = common.field_type,
field_names: str = common.field_names,
field_confidence: float = common.field_confidence,
field_float_numbers: bool = common.field_float_numbers,
field_missing_values: str = common.field_missing_values,
# Command
basepath: str = common.basepath,
yaml: bool = common.yaml,
json: bool = common.json,
# System
debug: bool = common.debug,
trusted: bool = common.trusted,
standards: str = common.standards,
):
"""
List a data source.
"""
console = Console()
# Setup system
if trusted:
system.trusted = trusted
if standards:
system.standards = standards # type: ignore
# Create source
source = helpers.create_source(source, path=path)
if not source and not path:
note = 'Providing "source" or "path" is required'
helpers.print_error(console, note=note)
raise typer.Exit(code=1)
try:
# Create dialect
dialect_obj = helpers.create_dialect(
descriptor=dialect,
header_rows=header_rows,
header_join=header_join,
comment_char=comment_char,
comment_rows=comment_rows,
sheet=sheet,
table=table,
keys=keys,
keyed=keyed,
)
# Create detector
detector_obj = helpers.create_detector(
buffer_size=buffer_size,
sample_size=sample_size,
field_type=field_type,
field_names=field_names,
field_confidence=field_confidence,
field_float_numbers=field_float_numbers,
field_missing_values=field_missing_values,
)
# Create resource
resource = Resource(
source=helpers.create_source(source),
name=name,
path=path,
scheme=scheme,
format=format,
datatype=type,
compression=compression,
innerpath=innerpath,
encoding=encoding,
basepath=basepath,
detector=detector_obj,
)
# Add dialect
if dialect_obj:
resource.dialect = dialect_obj
# List resources
resources = resource.list(name=name)
descriptors = [resource.to_descriptor() for resource in resources]
except Exception as exception:
helpers.print_exception(console, debug=debug, exception=exception)
raise typer.Exit(code=1)
# Yaml mode
if yaml:
descriptor = to_yaml(descriptors).strip()
print(descriptor)
raise typer.Exit()
# Json mode
if json:
descriptor = to_json(descriptors).strip()
print(descriptor)
raise typer.Exit()
# Default mode
console.rule("[bold]Dataset")
view = Table(title="dataset")
view.add_column("name")
view.add_column("type")
view.add_column("path")
for resource in resources:
style = "sky_blue1" if resource.tabular else ""
row = [resource.name, resource.type, resource.path]
view.add_row(*row, style=style)
console.print(view)
|
2,942 |
assert response success
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import json
import logging
import re
import tempfile
from pathlib import Path
import lxml
import requests
from werkzeug.test import Client
from werkzeug.wrappers import Response
from pywps import Process, __version__
from pywps import xml_util as etree
from pywps.app.Common import Metadata, MetadataUrl
from pywps.inout import (
BoundingBoxInput,
BoundingBoxOutput,
ComplexInput,
ComplexOutput,
Format,
LiteralInput,
LiteralOutput,
)
logging.disable(logging.CRITICAL)
def service_ok(url, timeout=5):
try:
resp = requests.get(url, timeout=timeout)
if 'html' in resp.headers['content-type']:
ok = False
else:
ok = resp.ok
except requests.exceptions.ReadTimeout:
ok = False
except requests.exceptions.ConnectTimeout:
ok = False
except Exception:
ok = False
return ok
class DocExampleProcess(Process):
"""This first line is going to be skipped by the :skiplines:1 option.
Notes
-----
This is additional documentation that can be added following the Numpy docstring convention.
"""
def __init__(self):
inputs = [
LiteralInput(
'literal_input', "Literal input title", 'integer', abstract="Literal input value abstract.",
min_occurs=0, max_occurs=1, uoms=['meters', 'feet'], default=1
),
LiteralInput('date_input', 'The title is shown when no abstract is provided.', 'date',
allowed_values=['2000-01-01', '2018-01-01']),
ComplexInput('complex_input', 'Complex input title',
[Format('application/json'), Format('application/x-netcdf')],
abstract="Complex input abstract.", ),
BoundingBoxInput('bb_input', 'BoundingBox input title', ['EPSG:4326', ],
metadata=[Metadata('EPSG.io', 'http://epsg.io/'), ]),
]
outputs = [
LiteralOutput(
'literal_output', 'Literal output title', 'boolean', abstract='Boolean output abstract.'
),
ComplexOutput('complex_output', 'Complex output', [Format('text/plain'), ], ),
BoundingBoxOutput('bb_output', 'BoundingBox output title', ['EPSG:4326', ])
]
super(DocExampleProcess, self).__init__(
self._handler,
identifier='doc_example_process_identifier',
title="Process title",
abstract="Multiline process abstract.",
version="4.0",
metadata=[Metadata('PyWPS docs', 'https://pywps.org'),
Metadata('NumPy docstring conventions',
'https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt'),
MetadataUrl('Duplicate label', 'http://one.example.com', anonymous=True),
MetadataUrl('Duplicate label', 'http://two.example.com', anonymous=True),
],
inputs=inputs,
outputs=outputs,
)
def _handler(self, request, response):
pass
class WpsClient(Client):
def post_xml(self, *args, **kwargs):
doc = kwargs.pop('doc')
data = etree.tostring(doc, pretty_print=True)
kwargs['data'] = data
return self.post(*args, **kwargs)
def post_json(self, *args, **kwargs):
doc = kwargs.pop('doc')
# data = json.dumps(doc, indent=2)
# kwargs['data'] = data
kwargs['json'] = doc
# kwargs['content_type'] = 'application/json' # input is json, redundant as it's deducted from the json kwarg
# kwargs['mimetype'] = 'application/json' # output is json
kwargs['environ_base'] = {'HTTP_ACCEPT': 'application/json'} # output is json
return self.post(*args, **kwargs)
class WpsTestResponse(Response):
def __init__(self, *args):
super(WpsTestResponse, self).__init__(*args)
if re.match(r'text/xml(;\s*charset=.*)?', self.headers.get('Content-Type')):
self.xml = etree.fromstring(self.get_data())
def xpath(self, path):
version = self.xml.attrib["version"]
if version == "2.0.0":
from pywps import namespaces200
namespaces = namespaces200
else:
from pywps import namespaces100
namespaces = namespaces100
return self.xml.xpath(path, namespaces=namespaces)
def xpath_text(self, path):
return ' '.join(e.text for e in self.xpath(path))
def client_for(service):
return WpsClient(service, WpsTestResponse)
def assert_response_accepted(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'/wps:ProcessAccepted')
assert success is not None
# TODO: assert status URL is present
def assert_process_started(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'ProcessStarted')
# Is it still like this in PyWPS-4 ?
assert success.split[0] == "processstarted"
def assert_response_success_json(resp, expected_data):
assert resp.status_code == 200
content_type = resp.headers['Content-Type']
expected_contect_type = 'application/json'
re_content_type = rf'{expected_contect_type}(;\s*charset=.*)?'
assert re.match(re_content_type, content_type)
data = json.loads(resp.data)
success = data['status']['status']
assert success == 'succeeded'
if expected_data:
outputs = data['outputs']
assert outputs == expected_data
def METHOD_NAME(resp):
assert resp.status_code == 200
content_type = resp.headers['Content-Type']
expected_contect_type = 'text/xml'
re_content_type = rf'{expected_contect_type}(;\s*charset=.*)?'
assert re.match(re_content_type, content_type)
success = resp.xpath('/wps:ExecuteResponse/wps:Status/wps:ProcessSucceeded')
assert len(success) == 1
def assert_process_exception(resp, code=None):
assert resp.status_code == 400
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
elem = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert elem[0].attrib['exceptionCode'] == code
def assert_pywps_version(resp):
# get first child of root element
root_firstchild = resp.xpath('/*')[0].getprevious()
assert isinstance(root_firstchild, lxml.etree._Comment)
tokens = root_firstchild.text.split()
assert len(tokens) == 2
assert tokens[0] == 'PyWPS'
assert tokens[1] == __version__
def assert_wps_version(response, version="1.0.0"):
elem = response.xpath('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:ServiceTypeVersion')
found_version = elem[0].text
assert version == found_version
tmp = Path(tempfile.mkdtemp())
with open(tmp / "out.xml", "wb") as out:
out.writelines(response.response)
|
2,943 |
parse shutters xml
|
#!/usr/bin/env python
"""
This file contains the various XML parsing functions.
Hazen 04/17
"""
import numpy
import xml.etree.ElementTree as ElementTree
import storm_control.sc_library.halExceptions as halExceptions
class ShutterXMLException(halExceptions.HalException):
pass
class ShuttersInfo(object):
"""
Stores the shutters information that will get sent to other modules.
"""
def __init__(self,color_data = None, frames = None, **kwds):
super().__init__(**kwds)
self.color_data = color_data
self.frames = frames
def getColorData(self):
return self.color_data
def getFrames(self):
"""
Return the length of the shutter sequence in frames.
"""
return self.frames
def METHOD_NAME(channel_name_to_id, shutters_file, can_oversample = True):
"""
This parses a XML file that defines a shutter sequence.
FIXME: Not all setup support oversampling, but none of them currently set
the can_oversample argument.
"""
number_channels = len(channel_name_to_id)
# Load XML shutters file.
xml = ElementTree.parse(shutters_file).getroot()
if (xml.tag != "repeat"):
raise ShutterXMLException(shutters_file + " is not a shutters file.")
# Use user-specified oversampling (if requested), otherwise use 100.
if can_oversample:
oversampling = 100
else:
oversampling = 1
if xml.find("oversampling") is not None:
oversampling = int(xml.find("oversampling").text)
if ((not can_oversample) and (oversampling > 1)):
raise ShutterXMLException("This setup does not support oversampling.")
# The length of the sequence.
frames = int(xml.find("frames").text)
# The user is using the channel names rather than their ID's to specify
# the different channels.
by_name = False
if xml.find("by_name") is not None:
by_name = bool(int(xml.find("by_name").text))
#
# We store a color to associate with each frame. This can be accessed by
# other modules (such as the spot counter) to associate a color with the
# a particular frame when, for example, updating the STORM image.
#
color_data = []
for i in range(frames):
color_data.append(None)
#
# Create waveforms.
#
# Blank waveforms are created for all channels, even those that are not used.
#
waveforms = []
for i in range(number_channels):
waveforms.append(numpy.zeros(frames * oversampling))
# Add in the events.
for event in xml.findall("event"):
channel = None
power = None
on = None
off = None
color = False
for node in event:
if (node.tag == "channel"):
# Channels by name.
if by_name:
if (node.text in channel_name_to_id):
channel = channel_name_to_id[node.text]
else:
raise ShutterXMLException("Invalid channel descriptor " + str(node.text))
# Channels by ID.
else:
try:
channel = int(node.text)
except ValueError:
raise ShutterXMLException("Invalid channel number " + str(node.text))
elif (node.tag == "power"):
try:
power = float(node.text)
except ValueError:
raise ShutterXMLException("Invalid channel power " + str(node.text))
elif (node.tag == "on"):
try:
on = int(float(node.text) * float(oversampling))
except ValueError:
raise ShutterXMLException("Invalid on time " + str(node.text))
elif (node.tag == "off"):
try:
off = int(float(node.text) * float(oversampling))
except ValueError:
raise ShutterXMLException("Invalid off time " + str(node.text))
elif (node.tag == "color"):
color = []
colors = node.text.split(",")
if (len(colors) != 3):
raise ShutterXMLException("'" + node.text + "' is not a valid color descriptor.")
for c in colors:
x = int(c)
if x < 0:
x = 0
if x > 255:
x = 255
color.append(x)
# Check values.
if channel is None:
raise ShutterXMLException("Event channel must be specified.")
if power is None:
raise ShutterXMLException("Event power must be specified.")
if on is None:
raise ShutterXMLException("Event on time must be specified.")
if off is None:
raise ShutterXMLException("Event off time must be specified.")
# Check range.
if (channel < 0):
raise ShutterXMLException("Channel number is negative: " + str(channel) + ".")
if (channel >= number_channels):
raise ShutterXMLException("Channel number is too large: " + str(channel) + ".")
if (on < 0):
raise ShutterXMLException("On time out of range: " + str(on) + " in channel " + str(channel) + ".")
if (on > frames * oversampling):
raise ShutterXMLException("On time out of range: " + str(on) + " in channel " + str(channel) + ".")
if (off < 0):
raise ShutterXMLException("Off time out of range: " + str(on) + " in channel " + str(channel) + ".")
if (off > frames * oversampling):
raise ShutterXMLException("Off time out of range: " + str(on) + " in channel " + str(channel) + ".")
# Channel waveform setup.
i = on
waveform = waveforms[channel]
while i < off:
waveform[i] = power
i += 1
# Color information setup.
if color:
color_start = int(round(float(on)/float(oversampling)))
color_end = int(round(float(off)/float(oversampling)))
i = color_start
while i < color_end:
color_data[i] = color
i += 1
return [ShuttersInfo(color_data = color_data, frames = frames),
waveforms,
oversampling]
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
2,944 |
visit translation
|
"""
Parser for the .lang translation format.
"""
import codecs
import re
from parsimonious.exceptions import (
ParseError as ParsimoniousParseError,
VisitationError,
)
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs.models import VCSTranslation
BLANK_LINE = "blank_line"
TAG_REGEX = re.compile(r"\{(ok|l10n-extra)\}")
class LangComment:
def __init__(self, marker, content, end):
self.marker = marker
self.raw_content = content
self.end = end
@property
def content(self):
return self.raw_content.strip()
@property
def raw(self):
return self.marker + self.raw_content + self.end
class LangEntity(VCSTranslation):
def __init__(self, source_string, translation_string, tags):
super().__init__(
key=source_string, # Langfiles use the source as the key.
context="",
source_string=source_string,
strings={None: translation_string}, # Langfiles lack plural support
comments=[],
fuzzy=False, # Langfiles don't support fuzzy status
)
self.tags = set(tags)
# If the translation matches the source string without the {ok}
# tag, then the translation isn't actually valid, so we remove
# it.
if source_string == translation_string and "ok" not in tags:
del self.strings[None]
@property
def extra(self):
return {"tags": list(self.tags)}
class LangResource(ParsedResource):
def __init__(self, path, children):
self.path = path
self.children = children
@property
def translations(self):
return [c for c in self.children if isinstance(c, LangEntity)]
def save(self, locale):
with codecs.open(self.path, "w", "utf-8") as f:
for child in self.children:
if isinstance(child, LangEntity):
self.write_entity(f, child)
elif isinstance(child, LangComment):
f.write(child.raw)
elif child == BLANK_LINE:
f.write("\n")
def write_entity(self, f, entity):
f.write(f";{entity.source_string}\n")
translation = entity.strings.get(None, None)
if translation is None:
# No translation? Output the source string and remove {ok}.
translation = entity.source_string
entity.tags.discard("ok")
elif translation == entity.source_string:
# Translation is equal to the source? Include {ok}.
entity.tags.add("ok")
elif translation != entity.source_string:
# Translation is different? Remove {ok}, it's unneeded.
entity.tags.discard("ok")
if entity.extra.get("tags"):
tags = [f"{{{t}}}" for t in entity.tags]
translation = "{translation} {tags}".format(
translation=translation, tags=" ".join(tags)
)
f.write(f"{translation}\n")
class LangVisitor(NodeVisitor):
grammar = Grammar(
r"""
lang_file = (comment / entity / blank_line)*
comment = "#"+ line_content line_ending
line_content = ~r".*"
line_ending = ~r"$\n?"m # Match at EOL and EOF without newline.
blank_line = ~r"((?!\n)\s)*" line_ending
entity = string translation
string = ";" line_content line_ending
translation = line_content line_ending
"""
)
def visit_lang_file(self, node, children):
"""
Find comments that are associated with an entity and add them
to the entity's comments list. Also assign order to entities.
"""
comments = []
order = 0
for child in children:
if isinstance(child, LangComment):
comments.append(child)
continue
if isinstance(child, LangEntity):
child.comments = [c.content for c in comments]
child.order = order
order += 1
comments = []
return children
def visit_comment(self, node, node_info):
marker, content, end = node_info
return LangComment(node_text(marker), node_text(content), node_text(end))
def visit_blank_line(self, node, _):
return BLANK_LINE
def visit_entity(self, node, node_info):
string, translation = node_info
# Strip tags out of translation if they exist.
tags = []
tag_matches = list(re.finditer(TAG_REGEX, translation))
if tag_matches:
tags = [m.group(1) for m in tag_matches]
translation = translation[: tag_matches[0].start()].strip()
if translation == "":
raise ParsimoniousParseError(
"Blank translation for key {key} is not allowed in langfiles.".format(
key=string
)
)
return LangEntity(string, translation, tags)
def visit_string(self, node, node_info):
marker, content, end = node_info
return content.text.strip()
def METHOD_NAME(self, node, node_info):
content, end = node_info
return content.text.strip()
def generic_visit(self, node, children):
if children and len(children) == 1:
return children[0]
else:
return children or node
def node_text(node):
"""
Convert a Parsimonious node into text, including nodes that may
actually be a list of nodes due to repetition.
"""
if node is None:
return ""
elif isinstance(node, list):
return "".join([n.text for n in node])
else:
return node.text
def parse(path, source_path=None, locale=None):
# Read as utf-8-sig in case there's a BOM at the start of the file
# that we want to remove.
with codecs.open(path, "r", "utf-8-sig") as f:
content = f.read()
try:
children = LangVisitor().parse(content)
except (ParsimoniousParseError, VisitationError) as err:
raise ParseError(f"Failed to parse {path}: {err}") from err
return LangResource(path, children)
|
2,945 |
test create measurement without source
|
"""Test measurement collection."""
import unittest
import mongomock
from database.measurements import create_measurement
from tests.fixtures import METRIC_ID, REPORT_ID, SOURCE_ID, SUBJECT_ID, create_report
class TestMeasurements(unittest.TestCase):
"""Unit tests for the measurements collection."""
def setUp(self) -> None:
"""Set up fixtures."""
self.measurement_data = {
"start": "2023-07-19T16:50:47+00:000",
"end": "2023-07-19T16:50:47+00:001",
"has_error": False,
"sources": [
{
"type": "sonarqube",
"source_uuid": SOURCE_ID,
"name": "Source",
"parameters": {"url": "https://url", "password": "password"},
"parse_error": None,
"connection_error": None,
"value": "10",
"total": "100",
"entities": [{"key": "key", "first_seen": "2023-07-18"}],
},
],
"metric_uuid": METRIC_ID,
"report_uuid": REPORT_ID,
}
self.client: mongomock.MongoClient = mongomock.MongoClient()
self.database = self.client["quality_time_db"]
def test_create_measurement_without_latest_measurement(self):
"""Test that create_measurement without a latest measurement inserts a new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
self.assertEqual(1, len(list(self.database.measurements.find())))
def test_create_measurement_with_latest_measurement(self):
"""Test that create_measurement with a latest measurement inserts a new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
self.database["measurements"].insert_one(
{
"metric_uuid": METRIC_ID,
"sources": [
{"source_uuid": SOURCE_ID, "parse_error": None, "connection_error": None, "value": "42"},
],
},
)
create_measurement(self.database, self.measurement_data)
self.assertEqual(2, len(list(self.database.measurements.find())))
def test_create_measurement_with_no_latest_metric(self):
"""Test that create_measurement does not insert new measurement when the metric does not exist."""
create_measurement(self.database, self.measurement_data)
self.assertEqual(0, len(list(self.database.measurements.find())))
def METHOD_NAME(self):
"""Test that a new measurement is not created if the sources used for the measurement no longer exist."""
report = create_report(report_uuid=REPORT_ID)
del report["subjects"][SUBJECT_ID]["metrics"][METRIC_ID]["sources"][SOURCE_ID]
self.database["reports"].insert_one(report)
create_measurement(self.database, self.measurement_data)
self.assertEqual(0, len(list(self.database.measurements.find())))
def test_create_measurement_when_its_equal(self):
"""Test that create_measurement with equal measurement does not insert new measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
create_measurement(self.database, self.measurement_data)
self.assertEqual(1, len(list(self.database.measurements.find())))
def test_copy_first_seen_timestamps(self):
"""Test that the first seen timestamps are copied from the latest successful measurement."""
self.database["reports"].insert_one(create_report(report_uuid=REPORT_ID))
create_measurement(self.database, self.measurement_data)
self.measurement_data["sources"][0]["entities"][0]["first_seen"] = "2023-07-19"
create_measurement(self.database, self.measurement_data)
self.assertEqual(
"2023-07-18",
next(self.database.measurements.find())["sources"][0]["entities"][0]["first_seen"],
)
|
2,946 |
open image
|
import asyncio
import logging
import os
import tempfile
import time
import uuid
from functools import partial
from hashlib import md5
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices, QImage
from feeluown.consts import CACHE_DIR
logger = logging.getLogger(__name__)
class ImgManager(object):
"""图片下载、缓存管理
TOOD: 该模块相关逻辑需要重新梳理
"""
def __init__(self, app):
super().__init__()
self._app = app
self.cache = _ImgCache(self._app)
def get_from_cache(self, img_name):
fpath = self.cache.get(img_name)
if fpath is not None:
with open(fpath, 'rb') as f:
content = f.read()
return content
return None
async def get(self, img_url, img_name):
if img_url.startswith('fuo://local'):
# Before, `models.uri.resolve` is uesd to handle these non-std paths,
# and it is not elegant in fact :(
# HACK(cosven): please think about a better way in the future.
provider = self._app.library.get('local')
if provider is None:
return None
return provider.handle_with_path(img_url[11:])
fpath = self.cache.get(img_name)
if fpath is not None:
logger.info('read image:%s from cache', img_name)
with open(fpath, 'rb') as f:
content = f.read()
self.cache.update(img_name)
return content
event_loop = asyncio.get_event_loop()
action_msg = 'Downloading image from {}'.format(img_url)
self._app.show_msg(action_msg)
try:
# May return None.
res = await event_loop.run_in_executor(
None,
partial(self._app.request.get, img_url))
except: # noqa
res = None
logger.error(f'Download image failed, url:{img_url}')
if res is None:
return
fpath = self.cache.create(img_name)
self.save(fpath, res.content)
return res.content
def get_from_files(self, img_url, img_name) -> bytes:
logger.info('extract image from {}'.format(img_url))
if img_url.endswith('mp3') or img_url.endswith('ogg') or img_url.endswith('wma'):
from mutagen.mp3 import EasyMP3
metadata_mp3 = EasyMP3(img_url)
tags_mp3 = metadata_mp3.tags
assert tags_mp3 is not None
content = tags_mp3._EasyID3__id3._DictProxy__dict['APIC:'].data
elif img_url.endswith('m4a'):
from mutagen.easymp4 import EasyMP4
metadata_mp4 = EasyMP4(img_url)
tags_mp4 = metadata_mp4.tags
assert tags_mp4 is not None
content = tags_mp4._EasyMP4Tags__mp4._DictProxy__dict['covr'][0]
else:
raise Exception('Unsupported file type')
return content
def save(self, fpath, content):
try:
with open(fpath, 'wb') as f:
f.write(content)
except Exception:
logger.exception('save image file failed')
class _ImgCache(object):
'''Save img in cache dir.
Each image is saved with a hash ``name``, which contain img last used
timestamp.
'''
MAX_TOTAL_NUMBER = 100
def __init__(self, app):
super().__init__()
self._app = app
def _hash(self, img_name):
pure_url = img_name.split('?')[0]
return md5(pure_url.encode('utf-8')).hexdigest()
def _gen_fname(self, hname):
ts_str = str(int(time.time()))
return hname + '-' + ts_str
def create(self, img_name):
'''return img file path'''
hname = self._hash(img_name)
fname = self._gen_fname(hname)
logger.debug('create img cache for %s' % img_name)
return self._get_path(fname)
def update(self, img_name):
hname = self._hash(img_name)
new_fname = self._gen_fname(hname)
new_fpath = self._get_path(new_fname)
old_fpath = self.get(img_name)
os.rename(old_fpath, new_fpath)
logger.debug('update img cache for %s' % img_name)
def get(self, img_name):
hname = self._hash(img_name)
for fname in os.listdir(CACHE_DIR):
if fname.startswith(hname):
logger.debug('get img cache for %s' % img_name)
return self._get_path(fname)
return None
def delete(self, img_name):
fpath = self.get(img_name)
if fpath is not None:
return os.remove(fpath)
return False
def _get_path(self, fname):
return os.path.join(CACHE_DIR, fname)
def METHOD_NAME(img: QImage):
tmpdir = tempfile.gettempdir()
uid = str(uuid.uuid1())
name = f'feeluown-img-{uid}.png'
filepath = os.path.join(tmpdir, name)
if img.save(filepath):
QDesktopServices.openUrl(QUrl.fromLocalFile(filepath))
|
2,947 |
delete subscription from snuba
|
# Generated by Django 2.2.28 on 2022-06-15 11:14
# Based on https://github.com/getsentry/getsentry/blob/89ff1453be755ddef31f2b99de09bd03badeb25e/getsentry/migrations/0141_migrate_sessions_subs_to_metrics.py
import logging
import re
from django.db import migrations
from sentry.new_migrations.migrations import CheckedMigration
from sentry.snuba.dataset import Dataset, EntityKey
from sentry.snuba.tasks import _create_in_snuba, _delete_from_snuba
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
CRASH_RATE_ALERT_AGGREGATE_RE = (
r"^percentage\([ ]*(sessions_crashed|users_crashed)[ ]*\,[ ]*(sessions|users)[ ]*\)"
)
def create_subscription_in_snuba(subscription):
subscription.subscription_id = _create_in_snuba(subscription)
subscription.save()
def map_aggregate_to_entity_key(dataset: Dataset, aggregate: str) -> EntityKey:
if dataset == Dataset.Events:
entity_key = EntityKey.Events
elif dataset == Dataset.Transactions:
entity_key = EntityKey.Transactions
elif dataset in [Dataset.Metrics, Dataset.Sessions]:
match = re.match(CRASH_RATE_ALERT_AGGREGATE_RE, aggregate)
if not match:
raise Exception(
f"Only crash free percentage queries are supported for subscriptions"
f"over the {dataset.value} dataset"
)
if dataset == Dataset.Metrics:
count_col_matched = match.group(2)
if count_col_matched == "sessions":
entity_key = EntityKey.MetricsCounters
else:
entity_key = EntityKey.MetricsSets
else:
entity_key = EntityKey.Sessions
else:
raise Exception(f"{dataset} dataset does not have an entity key mapped to it")
return entity_key
def METHOD_NAME(subscription, query_dataset: Dataset):
entity_key: EntityKey = map_aggregate_to_entity_key(
query_dataset, subscription.snuba_query.aggregate
)
_delete_from_snuba(
query_dataset,
subscription.subscription_id,
entity_key,
)
@property
def event_types(self):
return [type.event_type for type in self.snubaqueryeventtype_set.all()]
def update_metrics_subscriptions(apps, schema_editor):
QuerySubscription = apps.get_model("sentry", "QuerySubscription")
for subscription in RangeQuerySetWrapperWithProgressBar(
QuerySubscription.objects.filter(snuba_query__dataset=Dataset.Metrics.value).select_related(
"snuba_query"
)
):
old_subscription_id = subscription.subscription_id
if old_subscription_id is not None:
try:
# The migration apps don't build this property, so patch it here:
subscription.snuba_query.event_types = event_types
create_subscription_in_snuba(subscription)
METHOD_NAME(subscription, Dataset.Metrics)
except Exception:
logging.exception(
"Failed to recreate metrics subscription in snuba",
extra={
"project": subscription.project.slug,
"subscription_id": subscription.id,
"query": subscription.snuba_query.query,
"aggregate": subscription.snuba_query.aggregate,
"time_window": subscription.snuba_query.time_window,
"resolution": subscription.snuba_query.resolution,
},
)
class Migration(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production. For
# the most part, this should only be used for operations where it's safe to run the migration
# after your code has deployed. So this should not be used for most operations that alter the
# schema of a table.
# Here are some things that make sense to mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that they can
# be monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# have ops run this and not block the deploy. Note that while adding an index is a schema
# change, it's completely safe to run the operation after the code has deployed.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not. Generally
# we don't want to run in a transaction here, since for long running operations like data
# back-fills this results in us locking an increasing number of rows until we finally commit.
atomic = False
dependencies = [
("sentry", "0291_add_new_perf_indexer"),
]
operations = [
migrations.RunPython(
update_metrics_subscriptions,
migrations.RunPython.noop,
hints={"tables": ["sentry_querysubscription", "sentry_snubaquery"]},
),
]
|
2,948 |
test deps outs are sorted by path
|
from collections import OrderedDict
from operator import itemgetter
from dvc.dvcfile import LOCK_FILE
from dvc.stage.utils import split_params_deps
from dvc.utils.fs import remove
from dvc.utils.serialize import dumps_yaml, parse_yaml_for_update
from tests.func.test_run import supported_params
FS_STRUCTURE = {
"foo": "bar\nfoobar",
"bar": "foo\nfoobar",
"foobar": "foobar\nbar",
"params.yaml": dumps_yaml(supported_params),
"params2.yaml": dumps_yaml(supported_params),
}
def read_lock_file(file=LOCK_FILE):
with open(file, encoding="utf-8") as f:
data = parse_yaml_for_update(f.read(), file)
assert isinstance(data, OrderedDict)
return data
def assert_eq_lockfile(previous, new):
for content in (previous, new):
assert isinstance(content, OrderedDict)
# if they both are OrderedDict, then `==` will also check for order
assert previous == new
def METHOD_NAME(tmp_dir, dvc, run_head):
tmp_dir.gen(FS_STRUCTURE)
deps = ["foo", "bar", "foobar"]
run_head(*deps, name="copy-first-line")
initial_content = read_lock_file()
lock = initial_content["stages"]["copy-first-line"]
# lock stage key order:
assert list(lock.keys()) == ["cmd", "deps", "outs"]
# `path` key appear first and then the `md5`
assert all(
list(dep.keys()) == ["path", "hash", "md5", "size"] for dep in lock["deps"]
)
assert all(
list(out.keys()) == ["path", "hash", "md5", "size"] for out in lock["outs"]
)
# deps are always sorted by the file path naming
assert list(map(itemgetter("path"), lock["deps"])) == sorted(deps)
# outs are too
assert list(map(itemgetter("path"), lock["outs"])) == [
d + "-1" for d in sorted(deps)
]
def test_order_is_preserved_when_pipeline_order_changes(tmp_dir, dvc, run_head):
tmp_dir.gen(FS_STRUCTURE)
deps = ["foo", "bar", "foobar"]
stage = run_head(*deps, name="copy-first-line")
initial_content = read_lock_file()
# reverse order of stage.outs and dump to the pipeline file
# then, again change stage.deps and dump to the pipeline file
reversal = stage.outs.reverse, stage.deps.reverse
for reverse_items in reversal:
reverse_items()
stage.dvcfile._dump_pipeline_file(stage)
# we only changed the order, should not reproduce
assert not dvc.reproduce(stage.addressing)
new_lock_content = read_lock_file()
assert_eq_lockfile(new_lock_content, initial_content)
(tmp_dir / LOCK_FILE).unlink()
assert dvc.reproduce(stage.addressing) == [stage]
new_lock_content = read_lock_file()
assert_eq_lockfile(new_lock_content, initial_content)
def test_cmd_changes_other_orders_are_preserved(tmp_dir, dvc, run_head):
tmp_dir.gen(FS_STRUCTURE)
deps = ["foo", "bar", "foobar"]
stage = run_head(*deps, name="copy-first-line")
initial_content = read_lock_file()
# let's change cmd in pipeline file
# it should only change "cmd", otherwise it should be
# structurally same as cmd
new_cmd = "python head.py foo bar foobar"
assert stage.cmd != new_cmd # sanity check
stage.cmd = new_cmd
stage.dvcfile._dump_pipeline_file(stage)
initial_content["stages"]["copy-first-line"]["cmd"] = stage.cmd
assert dvc.reproduce(stage.addressing) == [stage]
new_lock_content = read_lock_file()
assert_eq_lockfile(new_lock_content, initial_content)
def test_params_dump(tmp_dir, dvc, run_head):
tmp_dir.gen(FS_STRUCTURE)
stage = run_head(
"foo",
"bar",
"foobar",
name="copy-first-line",
params=[
"params2.yaml:answer,lists,name",
"params.yaml:lists,floats,nested.nested1,nested.nested1.nested2",
],
)
initial_content = read_lock_file()
lock = initial_content["stages"]["copy-first-line"]
# lock stage key order:
assert list(lock.keys()) == ["cmd", "deps", "params", "outs"]
assert list(lock["params"].keys()) == ["params.yaml", "params2.yaml"]
# # params keys are always sorted by the name
assert list(lock["params"]["params.yaml"].keys()) == [
"floats",
"lists",
"nested.nested1",
"nested.nested1.nested2",
]
assert list(lock["params"]["params2.yaml"]) == ["answer", "lists", "name"]
assert not dvc.reproduce(stage.addressing)
# let's change the order of params and dump them in pipeline file
params, _ = split_params_deps(stage)
for param in params:
param.params.reverse()
stage.dvcfile._dump_pipeline_file(stage)
assert not dvc.reproduce(stage.addressing)
(tmp_dir / LOCK_FILE).unlink()
assert dvc.reproduce(stage.addressing) == [stage]
assert_eq_lockfile(initial_content, read_lock_file())
# remove build-cache and check if the same structure is built
for item in [dvc.stage_cache.cache_dir, LOCK_FILE]:
remove(item)
assert dvc.reproduce(stage.addressing) == [stage]
assert_eq_lockfile(initial_content, read_lock_file())
|
2,949 |
getvalue
|
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test import support
import sys, os
import uu
from io import BytesIO
import io
plaintext = b"The smooth-scaled python crept over the sleeping dog\n"
encodedtext = b"""\
M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P
(:6YG(&1O9PH """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def METHOD_NAME(self):
self.flush()
return self.buffer.METHOD_NAME().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename):
return (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.METHOD_NAME(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.METHOD_NAME(), encodedtextwrapped(0o644, "t1"))
def test_decode(self):
inp = io.BytesIO(encodedtextwrapped(0o666, "t1"))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.METHOD_NAME(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1")
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.METHOD_NAME(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
def test_garbage_padding(self):
# Issue #22406
encodedtext = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x20\n"
b"end\n"
)
plaintext = b"\x33" # 00110011
with self.subTest("uu.decode()"):
inp = io.BytesIO(encodedtext)
out = io.BytesIO()
uu.decode(inp, out, quiet=True)
self.assertEqual(out.METHOD_NAME(), plaintext)
with self.subTest("uu_codec"):
import codecs
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.METHOD_NAME(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.METHOD_NAME(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def _kill(self, f):
# close and remove file
if f is None:
return
try:
f.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
try:
os.unlink(f.name)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
def setUp(self):
self.tmpin = support.TESTFN + "i"
self.tmpout = support.TESTFN + "o"
def tearDown(self):
del self.tmpin
del self.tmpout
def test_encode(self):
fin = fout = None
try:
support.unlink(self.tmpin)
fin = open(self.tmpin, 'wb')
fin.write(plaintext)
fin.close()
fin = open(self.tmpin, 'rb')
fout = open(self.tmpout, 'wb')
uu.encode(fin, fout, self.tmpin, mode=0o644)
fin.close()
fout.close()
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
finally:
self._kill(fin)
self._kill(fout)
def test_decode(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
finally:
self._kill(f)
def test_decode_filename(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
uu.decode(self.tmpin)
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
finally:
self._kill(f)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
f = None
try:
f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout))
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpin, 'rb')
self.assertRaises(uu.Error, uu.decode, f)
f.close()
finally:
self._kill(f)
def test_main():
support.run_unittest(UUTest,
UUStdIOTest,
UUFileTest,
)
if __name__=="__main__":
test_main()
|
2,950 |
ddl migr commit
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import functools
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
@functools.singledispatch
def get_status(ql: qlast.Base) -> bytes:
raise NotImplementedError(
f'cannot get status for the {type(ql).__name__!r} AST node')
@get_status.register(qlast.CreateObject)
def _ddl_create(ql: qlast.CreateObject) -> bytes:
return f'CREATE {ql.object_class}'.encode()
@get_status.register(qlast.AlterObject)
def _ddl_alter(ql: qlast.AlterObject) -> bytes:
return f'ALTER {ql.object_class}'.encode()
@get_status.register(qlast.DropObject)
def _ddl_drop(ql: qlast.DropObject) -> bytes:
return f'DROP {ql.object_class}'.encode()
@get_status.register(qlast.StartMigration)
def _ddl_migr_start(ql: qlast.Base) -> bytes:
return b'START MIGRATION'
@get_status.register(qlast.CreateMigration)
def _ddl_migr_create(ql: qlast.Base) -> bytes:
return b'CREATE MIGRATION'
@get_status.register(qlast.CommitMigration)
def METHOD_NAME(ql: qlast.Base) -> bytes:
return b'COMMIT MIGRATION'
@get_status.register(qlast.DropMigration)
def _ddl_migr_drop(ql: qlast.Base) -> bytes:
return b'DROP MIGRATION'
@get_status.register(qlast.AlterMigration)
def _ddl_migr_alter(ql: qlast.Base) -> bytes:
return b'ALTER MIGRATION'
@get_status.register(qlast.AbortMigration)
def _ddl_migr_abort(ql: qlast.Base) -> bytes:
return b'ABORT MIGRATION'
@get_status.register(qlast.PopulateMigration)
def _ddl_migr_populate(ql: qlast.Base) -> bytes:
return b'POPULATE MIGRATION'
@get_status.register(qlast.DescribeCurrentMigration)
def _ddl_migr_describe_current(ql: qlast.Base) -> bytes:
return b'DESCRIBE CURRENT MIGRATION'
@get_status.register(qlast.AlterCurrentMigrationRejectProposed)
def _ddl_migr_alter_current(ql: qlast.Base) -> bytes:
return b'ALTER CURRENT MIGRATION'
@get_status.register(qlast.StartMigrationRewrite)
def _ddl_migr_rw_start(ql: qlast.Base) -> bytes:
return b'START MIGRATION REWRITE'
@get_status.register(qlast.CommitMigrationRewrite)
def _ddl_migr_rw_commit(ql: qlast.Base) -> bytes:
return b'COMMIT MIGRATION REWRITE'
@get_status.register(qlast.AbortMigrationRewrite)
def _ddl_migr_rw_abort(ql: qlast.Base) -> bytes:
return b'ABORT MIGRATION REWRITE'
@get_status.register(qlast.ResetSchema)
def _ddl_migr_reset_schema(ql: qlast.Base) -> bytes:
return b'RESET SCHEMA'
@get_status.register(qlast.SelectQuery)
@get_status.register(qlast.GroupQuery)
@get_status.register(qlast.ForQuery)
def _select(ql: qlast.Base) -> bytes:
return b'SELECT'
@get_status.register(qlast.InsertQuery)
def _insert(ql: qlast.Base) -> bytes:
return b'INSERT'
@get_status.register(qlast.UpdateQuery)
def _update(ql: qlast.Base) -> bytes:
return b'UPDATE'
@get_status.register(qlast.DeleteQuery)
def _delete(ql: qlast.Base) -> bytes:
return b'DELETE'
@get_status.register(qlast.StartTransaction)
def _tx_start(ql: qlast.Base) -> bytes:
return b'START TRANSACTION'
@get_status.register(qlast.CommitTransaction)
def _tx_commit(ql: qlast.Base) -> bytes:
return b'COMMIT TRANSACTION'
@get_status.register(qlast.RollbackTransaction)
def _tx_rollback(ql: qlast.Base) -> bytes:
return b'ROLLBACK TRANSACTION'
@get_status.register(qlast.DeclareSavepoint)
def _tx_sp_declare(ql: qlast.Base) -> bytes:
return b'DECLARE SAVEPOINT'
@get_status.register(qlast.RollbackToSavepoint)
def _tx_sp_rollback(ql: qlast.Base) -> bytes:
return b'ROLLBACK TO SAVEPOINT'
@get_status.register(qlast.ReleaseSavepoint)
def _tx_sp_release(ql: qlast.Base) -> bytes:
return b'RELEASE SAVEPOINT'
@get_status.register(qlast.SessionSetAliasDecl)
def _sess_set_alias(ql: qlast.Base) -> bytes:
return b'SET ALIAS'
@get_status.register(qlast.SessionResetAliasDecl)
@get_status.register(qlast.SessionResetModule)
@get_status.register(qlast.SessionResetAllAliases)
def _sess_reset_alias(ql: qlast.Base) -> bytes:
return b'RESET ALIAS'
@get_status.register(qlast.ConfigOp)
def _sess_set_config(ql: qlast.ConfigOp) -> bytes:
if ql.scope == qltypes.ConfigScope.GLOBAL:
if isinstance(ql, qlast.ConfigSet):
return b'SET GLOBAL'
else:
return b'RESET GLOBAL'
else:
return f'CONFIGURE {ql.scope}'.encode('ascii')
@get_status.register(qlast.DescribeStmt)
def _describe(ql: qlast.Base) -> bytes:
return f'DESCRIBE'.encode()
@get_status.register(qlast.Rename)
def _rename(ql: qlast.Base) -> bytes:
return f'RENAME'.encode()
@get_status.register(qlast.ExplainStmt)
def _explain(ql: qlast.Base) -> bytes:
return b'ANALYZE QUERY'
@get_status.register(qlast.AdministerStmt)
def _administer(ql: qlast.Base) -> bytes:
return b'ADMINISTER'
|
2,951 |
sbfdatetime
|
import datetime
import functools
import locale
from sickchill import settings
from .network_timezones import sb_timezone
date_presets = (
"%Y-%m-%d",
"%a, %Y-%m-%d",
"%A, %Y-%m-%d",
"%y-%m-%d",
"%a, %y-%m-%d",
"%A, %y-%m-%d",
"%m/%d/%Y",
"%a, %m/%d/%Y",
"%A, %m/%d/%Y",
"%m/%d/%y",
"%a, %m/%d/%y",
"%A, %m/%d/%y",
"%m-%d-%Y",
"%a, %m-%d-%Y",
"%A, %m-%d-%Y",
"%m-%d-%y",
"%a, %m-%d-%y",
"%A, %m-%d-%y",
"%m.%d.%Y",
"%a, %m.%d.%Y",
"%A, %m.%d.%Y",
"%m.%d.%y",
"%a, %m.%d.%y",
"%A, %m.%d.%y",
"%d-%m-%Y",
"%a, %d-%m-%Y",
"%A, %d-%m-%Y",
"%d-%m-%y",
"%a, %d-%m-%y",
"%A, %d-%m-%y",
"%d/%m/%Y",
"%a, %d/%m/%Y",
"%A, %d/%m/%Y",
"%d/%m/%y",
"%a, %d/%m/%y",
"%A, %d/%m/%y",
"%d.%m.%Y",
"%a, %d.%m.%Y",
"%A, %d.%m.%Y",
"%d.%m.%y",
"%a, %d.%m.%y",
"%A, %d.%m.%y",
"%d. %b %Y",
"%a, %d. %b %Y",
"%A, %d. %b %Y",
"%d. %b %y",
"%a, %d. %b %y",
"%A, %d. %b %y",
"%d. %B %Y",
"%a, %d. %B %Y",
"%A, %d. %B %Y",
"%d. %B %y",
"%a, %d. %B %y",
"%A, %d. %B %y",
"%b %d, %Y",
"%a, %b %d, %Y",
"%A, %b %d, %Y",
"%B %d, %Y",
"%a, %B %d, %Y",
"%A, %B %d, %Y",
)
time_presets = ("%I:%M:%S %p", "%H:%M:%S")
# helper class
class static_or_instance(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return functools.partial(self.func, instance)
# subclass datetime.datetime to add function to display custom date and time formats
class sbdatetime(datetime.datetime):
has_locale = True
en_US_norm = locale.normalize("en_US.utf-8")
@static_or_instance
def convert_to_setting(self, dt=None):
try:
if settings.TIMEZONE_DISPLAY == "local":
return dt.astimezone(sb_timezone) if self is None else self.astimezone(sb_timezone)
else:
return self if self else dt
except Exception:
return self if self else dt
# display Time in SickChill Format
@static_or_instance
def sbftime(self, dt=None, show_seconds=False, t_preset=None):
"""
Display time in SC format
TODO: Rename this to srftime
:param dt: datetime object
:param show_seconds: Boolean, show seconds
:param t_preset: Preset time format
:return: time string
"""
try:
locale.setlocale(locale.LC_TIME, "")
except Exception:
pass
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, "en_US")
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm)
except Exception:
sbdatetime.has_locale = False
strt = ""
try:
if self is None:
if dt is not None:
if t_preset is not None:
strt = dt.strftime(t_preset)
elif show_seconds:
strt = dt.strftime(settings.TIME_PRESET_W_SECONDS)
else:
strt = dt.strftime(settings.TIME_PRESET)
else:
if t_preset is not None:
strt = self.strftime(t_preset)
elif show_seconds:
strt = self.strftime(settings.TIME_PRESET_W_SECONDS)
else:
strt = self.strftime(settings.TIME_PRESET)
finally:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, "")
except Exception:
sbdatetime.has_locale = False
return strt
# display Date in SickChill Format
@static_or_instance
def sbfdate(self, dt=None, d_preset=None):
"""
Display date in SC format
TODO: Rename this to srfdate
:param dt: datetime object
:param d_preset: Preset date format
:return: date string
"""
try:
locale.setlocale(locale.LC_TIME, "")
except Exception:
pass
strd = ""
try:
if self is None:
if dt is not None:
if d_preset is not None:
strd = dt.strftime(d_preset)
else:
strd = dt.strftime(settings.DATE_PRESET)
else:
if d_preset is not None:
strd = self.strftime(d_preset)
else:
strd = self.strftime(settings.DATE_PRESET)
except (ValueError, OSError):
strd = "UNK"
finally:
try:
locale.setlocale(locale.LC_TIME, "")
except Exception:
pass
return strd
# display Datetime in SickChill Format
@static_or_instance
def METHOD_NAME(self, dt=None, show_seconds=False, d_preset=None, t_preset=None):
"""
Show datetime in SC format
TODO: Rename this to srfdatetime
:param dt: datetime object
:param show_seconds: Boolean, show seconds as well
:param d_preset: Preset date format
:param t_preset: Preset time format
:return: datetime string
"""
try:
locale.setlocale(locale.LC_TIME, "")
except Exception:
pass
strd = ""
try:
if self is None:
if dt is not None:
if d_preset is not None:
strd = dt.strftime(d_preset)
else:
strd = dt.strftime(settings.DATE_PRESET)
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, "en_US")
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm)
except Exception:
sbdatetime.has_locale = False
if t_preset is not None:
strd += ", " + dt.strftime(t_preset)
elif show_seconds:
strd += ", " + dt.strftime(settings.TIME_PRESET_W_SECONDS)
else:
strd += ", " + dt.strftime(settings.TIME_PRESET)
else:
if d_preset is not None:
strd = self.strftime(d_preset)
else:
strd = self.strftime(settings.DATE_PRESET)
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, "en_US")
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm)
except Exception:
sbdatetime.has_locale = False
if t_preset is not None:
strd += ", " + self.strftime(t_preset)
elif show_seconds:
strd += ", " + self.strftime(settings.TIME_PRESET_W_SECONDS)
else:
strd += ", " + self.strftime(settings.TIME_PRESET)
except (ValueError, OSError):
strd = "UNK"
finally:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, "")
except Exception:
sbdatetime.has_locale = False
return strd
|
2,952 |
test hex to rgb
|
"""Function for testing colormap module."""
import numpy as np
import numpy.testing as npt
from fury import colormap
from fury.optpkg import optional_package
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
def test_boys2rgb():
expected = np.array(
[
[0.23171663, 0.34383397, 0.6950296],
[0.74520645, 0.58600913, 0.6950296],
[0.48846154, 0.46492155, 0.05164146],
]
)
v1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
v2 = np.array([1, 0, 0, 0, 1, 0])
for v, e in zip([v1, v2], [expected, expected[0]]):
c = colormap.boys2rgb(v)
npt.assert_array_almost_equal(c, e)
def test_orient2rgb():
e2 = [0.70710678, 0, 0, 0, 0.70710678, 0]
v = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
v2 = np.array([1, 0, 0, 0, 1, 0])
npt.assert_equal(colormap.orient2rgb(v), v)
npt.assert_almost_equal(colormap.orient2rgb(v2), e2)
npt.assert_raises(IOError, colormap.orient2rgb, np.array(1))
def test_get_cmap():
npt.assert_equal(colormap.get_cmap(''), None)
npt.assert_equal(colormap.get_cmap('blues'), None)
expected = np.array(
[
[0.03137255, 0.1882353, 0.41960785, 1],
[0.96862745, 0.98431373, 1, 1],
[0.96862745, 0.98431373, 1, 1],
]
)
expected2 = np.array(
[
[0.4, 0.4, 0.4, 1.0],
[0.498039, 0.788235, 0.498039, 1],
[0.498039, 0.788235, 0.498039, 1],
]
)
cmap = colormap.get_cmap('Blues')
npt.assert_array_almost_equal(cmap((1, 0, 0)), expected)
with npt.assert_warns(PendingDeprecationWarning):
cmap = colormap.get_cmap('Accent')
npt.assert_array_almost_equal(cmap((1, 0, 0)), expected2)
def test_line_colors():
s1 = np.array([np.arange(10)] * 3).T # 10x3
s2 = np.array([np.arange(5)] * 4) # 5x4
streamlines = [s1, s2]
s_color = colormap.line_colors(streamlines, cmap='boys_standard')
npt.assert_equal(s_color.shape, (2, 3))
def test_create_colormap():
value = np.arange(25)
npt.assert_raises(ValueError, colormap.create_colormap, value.reshape((5, 5)))
npt.assert_raises(AttributeError, colormap.create_colormap, value, name='fake')
npt.assert_warns(
PendingDeprecationWarning,
colormap.create_colormap,
value,
name='jet',
auto=False,
)
if not have_matplotlib:
with npt.assert_warns(UserWarning):
npt.assert_raises(ValueError, colormap.create_colormap, value)
def test_lab_delta():
color = np.c_[100, 127, 128]
delta = np.c_[0, 0, 0]
res = colormap._lab_delta(color, color)
res_2 = colormap._lab_delta(color, delta)
npt.assert_equal(res, 0)
npt.assert_equal(np.round(res_2), [206])
def test_rgb_lab_delta():
color = np.c_[255, 65, 0]
delta = np.c_[0, 65, 0]
res = colormap._rgb_lab_delta(color, color)
npt.assert_equal(res, 0)
res = colormap._rgb_lab_delta(color, delta)
npt.assert_equal(np.round(res), [114])
def test_lab2xyz():
lab_color = np.c_[100, 128, 128]
expected = np.c_[188.32, 100, 5.08]
res = colormap._lab2xyz(lab_color)
npt.assert_array_almost_equal(res, expected, decimal=2)
def test_xyz2rgb():
xyz_color = np.c_[43.14, 25.07, 2.56]
expected = np.c_[255, 65, 0]
res = np.round(colormap._xyz2rgb(xyz_color))
npt.assert_array_almost_equal(res, expected)
def test_lab2rgb():
lab_color = np.c_[0, 128, 128]
expected = np.c_[133, 0, 0]
res = np.round(colormap._lab2rgb(lab_color))
res[res < 0] = 0
npt.assert_array_almost_equal(res, expected)
def METHOD_NAME():
expected = np.array([1, 1, 1])
hexcode = '#FFFFFF'
res = colormap.hex_to_rgb(hexcode)
npt.assert_array_almost_equal(res, expected)
hashed_hexcode = 'FFFFFF'
res = colormap.hex_to_rgb(hashed_hexcode)
npt.assert_array_almost_equal(res, expected)
def test_color_converters():
color = np.array([1, 1, 1])
colors = np.array([[1, 1, 1], [0, 0, 0], [0.2, 0.3, 0.4]])
# testing rgb2xyz and xyz2rgb
expected_xyz = np.array([0.950456, 1.0, 1.088754])
xyz_color = colormap.rgb2xyz(color)
rgb_color = colormap.xyz2rgb(expected_xyz)
npt.assert_almost_equal(xyz_color, expected_xyz)
npt.assert_almost_equal(rgb_color, color)
for color in colors:
xyz_color = colormap.rgb2xyz(color)
rgb_from_xyz_color = colormap.xyz2rgb(xyz_color)
npt.assert_almost_equal(rgb_from_xyz_color, color)
# testing rgb2lab and lab2rgb
illuminant = 'D65'
observer = '2'
expected_lab = np.array([31.57976662, -1.86550104, -17.84845331])
lab_color = colormap.rgb2lab(color, illuminant, observer)
rgb_color = colormap.lab2rgb(expected_lab, illuminant, observer)
npt.assert_almost_equal(lab_color, expected_lab)
npt.assert_almost_equal(rgb_color, color)
for color in colors:
lab_color = colormap.rgb2lab(color, illuminant, observer)
rgb_from_lab_color = colormap.lab2rgb(lab_color, illuminant, observer)
npt.assert_almost_equal(rgb_from_lab_color, color)
# testing rgb2hsv and hsv2rgb
expected_hsv = np.array([0.58333333, 0.5, 0.4])
hsv_color = colormap.rgb2hsv(color)
rgb_color = colormap.hsv2rgb(expected_hsv)
npt.assert_almost_equal(hsv_color, expected_hsv)
npt.assert_almost_equal(rgb_color, color)
for color in colors:
hsv_color = colormap.rgb2hsv(color)
rgb_from_hsv_color = colormap.hsv2rgb(hsv_color)
npt.assert_almost_equal(rgb_from_hsv_color, color)
|
2,953 |
roundup
|
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import bisect
import mmap
import os
import sys
import tempfile
import threading
from . import context
from . import reduction
from . import util
__all__ = ['BufferWrapper']
#
# Inheritable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
import _winapi
class Arena(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, size):
self.size = size
for i in range(100):
name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
buf = mmap.mmap(-1, size, tagname=name)
if _winapi.GetLastError() == 0:
break
# We have reopened a preexisting mmap.
buf.close()
else:
raise FileExistsError('Cannot find name for new mmap')
self.name = name
self.buffer = buf
self._state = (self.size, self.name)
def __getstate__(self):
context.assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size, fd=-1):
self.size = size
self.fd = fd
if fd == -1:
self.fd, name = tempfile.mkstemp(
prefix='pym-%d-'%os.getpid(), dir=util.get_temp_dir())
os.unlink(name)
util.Finalize(self, os.close, (self.fd,))
with open(self.fd, 'wb', closefd=False) as f:
f.write(b'\0'*size)
self.buffer = mmap.mmap(self.fd, self.size)
def reduce_arena(a):
if a.fd == -1:
raise ValueError('Arena is unpicklable because '
'forking was enabled when it was created')
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
def rebuild_arena(size, dupfd):
return Arena(size, dupfd.detach())
reduction.register(Arena, reduce_arena)
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def METHOD_NAME(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self.METHOD_NAME(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
util.info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self.METHOD_NAME(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited by child process
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
util.Finalize(self, BufferWrapper._heap.free, args=(block,))
def create_memoryview(self):
(arena, start, stop), size = self._state
return memoryview(arena.buffer)[start:start+size]
|
2,954 |
test lifecycle
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import unittest
import os
import torch
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.test_util import GenCartesianProduct
test_device_args = (
[("cpu",)]
if os.getenv("ONEFLOW_TEST_CPU_ONLY")
else [("cpu",), ("cuda", 0), ("cuda", 1)]
)
test_args = list(
GenCartesianProduct((test_device_args, [(torch, flow), (flow, torch)]))
)
def are_tensors_equal(a, b):
def are_devices_equal(a, b):
if a.type == "cuda" and b.type == "cuda":
return a.index == b.index
else:
return a.type == b.type
return (
np.array_equal(a.cpu().numpy(), b.cpu().numpy())
and are_devices_equal(a.device, b.device)
and a.shape == b.shape
and a.stride() == b.stride()
and a.cpu().numpy().dtype == b.cpu().numpy().dtype
)
@flow.unittest.skip_unless_1n2d()
class TestPack(flow.unittest.TestCase):
def test_same_data(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.randn(3, 4, 5, device=m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
test_case.assertEqual(tensor2.storage_offset(), 0)
tensor2[1:2, 2:3, 3:4] = random.random()
# NOTE: OneFlow operations are asynchoronously executed,
# so we need to synchronize explicitly here.
flow._oneflow_internal.eager.Sync()
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
def test_use_ops(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.randn(3, 4, 5, device=m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
res1 = tensor1 ** 2
res2 = tensor2 ** 2
test_case.assertTrue(np.allclose(res1.cpu().numpy(), res2.cpu().numpy()))
def test_more_dtype(test_case):
# PyTorch bfloat16 tensor doesn't support .numpy() method
# so we can't test it
# torch.bfloat16, flow.bfloat16
dtypes = ["float64", "float32", "float16", "int64", "int32", "int8", "uint8"]
for device_args, (m1, m2) in test_args:
for dtype in dtypes:
tensor1 = m1.ones(
(2, 3), dtype=getattr(m1, dtype), device=m1.device(*device_args)
)
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
def test_non_contiguous_input(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = (
m1.randn(2, 3, 4, 5).permute(2, 0, 3, 1).to(m1.device(*device_args))
)
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
def test_scalar_tensor(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.tensor(5).to(m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
def test_0_size_tensor(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.tensor([]).to(m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
def METHOD_NAME(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.randn(2, 3, 4, 5).to(m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
value = tensor1.cpu().numpy()
del tensor2
if device_args[0] == "cuda":
m2.cuda.synchronize()
# actually release the cuda memory
m2.cuda.empty_cache()
test_case.assertTrue(np.array_equal(tensor1.cpu().numpy(), value))
tensor1 = m1.randn(2, 3, 4, 5).to(m1.device(*device_args))
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
value = tensor2.cpu().numpy()
del tensor1
if device_args[0] == "cuda":
m1.cuda.synchronize()
m1.cuda.empty_cache()
test_case.assertTrue(np.array_equal(tensor2.cpu().numpy(), value))
def test_subview(test_case):
for device_args, (m1, m2) in test_args:
tensor1 = m1.randn(3, 4, 5, device=m1.device(*device_args))
tensor1 = tensor1[1:, :, ::2]
tensor2 = m2.from_dlpack(m1.to_dlpack(tensor1))
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
test_case.assertEqual(tensor2.storage_offset(), 0)
tensor2[1:2, ::2, 3:4] = random.random()
test_case.assertTrue(are_tensors_equal(tensor1, tensor2))
if __name__ == "__main__":
unittest.main()
|
2,955 |
create json str
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Compute InstanceTemplate.
See:
https://cloud.google.com/compute/docs/reference/latest/instanceTemplates
"""
from builtins import object
import json
from google.cloud.forseti.common.gcp_type import key
class InstanceTemplate(object):
"""Represents InstanceTemplate resource."""
def __init__(self, **kwargs):
"""InstanceTemplate resource.
Args:
**kwargs: The object's attributes.
"""
self.creation_timestamp = kwargs.get('creation_timestamp')
self.description = kwargs.get('description')
self.name = kwargs.get('name')
self.properties = kwargs.get('properties')
self.id = kwargs.get('id')
self.project_id = kwargs.get('project_id')
self._json = kwargs.get('raw_instance_template')
@classmethod
def from_dict(cls, instance_template, project_id=None):
"""Creates an InstanceTemplate from an instance template dict.
Args:
instance_template (dict): An instance template resource dict.
project_id (str): A project id for the resource.
Returns:
InstanceTemplate: A new InstanceTemplate object.
"""
kwargs = {'project_id': project_id,
'id': instance_template.get('id'),
'creation_timestamp': instance_template.get(
'creationTimestamp'),
'name': instance_template.get('name'),
'description': instance_template.get('description'),
'properties': instance_template.get('properties', {}),
'raw_instance_template': json.dumps(
instance_template, sort_keys=True)}
return cls(**kwargs)
@staticmethod
def from_json(json_string, project_id=None):
"""Creates an InstanceTemplate from an instance template JSON string.
Args:
json_string (str): A json string representing the instance template.
project_id (str): A project id for the resource.
Returns:
InstanceTemplate: A new InstanceTemplate object.
"""
instance_template = json.loads(json_string)
return InstanceTemplate.from_dict(instance_template, project_id)
def METHOD_NAME(self):
"""Creates a json string based on the object attributes.
Returns:
str: json str.
"""
resource_dict = {
'id': self.id,
'creationTimestamp': self.creation_timestamp,
'name': self.name,
'description': self.description,
'properties': self.properties}
# Strip out empty values
resource_dict = dict((k, v) for k, v in
list(resource_dict.items()) if v)
return json.dumps(resource_dict, sort_keys=True)
@property
def json(self):
"""Returns the json string representation of the resource.
Returns:
str: json str.
"""
if not self._json:
self._json = self.METHOD_NAME()
return self._json
@property
def key(self):
"""Returns a Key identifying the object.
Returns:
Key: the key
"""
return Key.from_args(self.project_id, self.name)
KEY_OBJECT_KIND = 'InstanceTemplate'
class Key(key.Key):
"""An identifier for a specific instance template."""
@staticmethod
def from_args(project_id, name):
"""Construct a Key from specific values.
Args:
project_id (str): project_id
name (str): name
Returns:
Key: the key
"""
return Key(KEY_OBJECT_KIND, {
'project_id': project_id,
'name': name})
@staticmethod
def from_url(url):
"""Construct a Key from a URL.
Args:
url (str): Object reference URL
Returns:
Key: the key
Raises:
ValueError: Required parameters are missing.
"""
obj = Key._from_url(KEY_OBJECT_KIND,
{'projects': 'project_id',
'instanceTemplates': 'name'},
url)
if not obj.project_id or not obj.name:
raise ValueError('Missing fields in URL %r' % url)
return obj
@property
def project_id(self):
"""Object property: project_id
Returns:
str: project_id
"""
return self._path_component('project_id')
@property
def name(self):
"""Object property: name
Returns:
str: name
"""
return self._path_component('name')
|
2,956 |
query parameters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"devcenter admin network-connection show",
)
class Show(AAZCommand):
"""Get a network connection.
:example: Get
az devcenter admin network-connection show --name "uswest3network" --resource-group "rg1"
"""
_aaz_info = {
"version": "2023-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.devcenter/networkconnections/{}", "2023-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.network_connection_name = AAZStrArg(
options=["-n", "--name", "--network-connection-name"],
help="Name of the network connection that can be applied to a pool.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NetworkConnectionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NetworkConnectionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevCenter/networkConnections/{networkConnectionName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkConnectionName", self.ctx.args.network_connection_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.domain_join_type = AAZStrType(
serialized_name="domainJoinType",
flags={"required": True},
)
properties.domain_name = AAZStrType(
serialized_name="domainName",
)
properties.domain_password = AAZStrType(
serialized_name="domainPassword",
flags={"secret": True},
)
properties.domain_username = AAZStrType(
serialized_name="domainUsername",
)
properties.health_check_status = AAZStrType(
serialized_name="healthCheckStatus",
)
properties.networking_resource_group_name = AAZStrType(
serialized_name="networkingResourceGroupName",
)
properties.organization_unit = AAZStrType(
serialized_name="organizationUnit",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet_id = AAZStrType(
serialized_name="subnetId",
flags={"required": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
|
2,957 |
test npz3
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import sys
import tempfile
import unittest
import numpy as np
from monai.data import DataLoader, Dataset, NumpyReader
from monai.transforms import LoadImaged
from tests.utils import assert_allclose
class TestNumpyReader(unittest.TestCase):
def test_npy(self):
test_data = np.random.randint(0, 256, size=[3, 4, 4])
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data)
reader = NumpyReader()
result = reader.get_data(reader.read(filepath))
np.testing.assert_allclose(result[1]["spatial_shape"], test_data.shape)
np.testing.assert_allclose(result[0].shape, test_data.shape)
np.testing.assert_allclose(result[0], test_data)
def test_npz1(self):
test_data1 = np.random.randint(0, 256, size=[3, 4, 4])
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data1)
reader = NumpyReader()
result = reader.get_data(reader.read(filepath))
np.testing.assert_allclose(result[1]["spatial_shape"], test_data1.shape)
np.testing.assert_allclose(result[0].shape, test_data1.shape)
np.testing.assert_allclose(result[0], test_data1)
def test_npz2(self):
test_data1 = np.random.randint(0, 256, size=[3, 4, 4])
test_data2 = np.random.randint(0, 256, size=[3, 4, 4])
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npz")
np.savez(filepath, test_data1, test_data2)
reader = NumpyReader()
result = reader.get_data(reader.read(filepath))
np.testing.assert_allclose(result[1]["spatial_shape"], test_data1.shape)
np.testing.assert_allclose(result[0].shape, (2, 3, 4, 4))
np.testing.assert_allclose(result[0], np.stack([test_data1, test_data2]))
def METHOD_NAME(self):
test_data1 = np.random.randint(0, 256, size=[3, 4, 4])
test_data2 = np.random.randint(0, 256, size=[3, 4, 4])
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npz")
np.savez(filepath, test1=test_data1, test2=test_data2)
reader = NumpyReader(npz_keys=["test1", "test2"])
result = reader.get_data(reader.read(filepath))
np.testing.assert_allclose(result[1]["spatial_shape"], test_data1.shape)
np.testing.assert_allclose(result[0].shape, (2, 3, 4, 4))
np.testing.assert_allclose(result[0], np.stack([test_data1, test_data2]))
def test_npy_pickle(self):
test_data = {"test": np.random.randint(0, 256, size=[3, 4, 4])}
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data, allow_pickle=True)
reader = NumpyReader()
result = reader.get_data(reader.read(filepath))[0].item()
np.testing.assert_allclose(result["test"].shape, test_data["test"].shape)
np.testing.assert_allclose(result["test"], test_data["test"])
def test_kwargs(self):
test_data = {"test": np.random.randint(0, 256, size=[3, 4, 4])}
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data, allow_pickle=True)
reader = NumpyReader(mmap_mode="r")
result = reader.get_data(reader.read(filepath, mmap_mode=None))[0].item()
np.testing.assert_allclose(result["test"].shape, test_data["test"].shape)
def test_dataloader(self):
test_data = np.random.randint(0, 256, size=[3, 4, 5])
datalist = []
with tempfile.TemporaryDirectory() as tempdir:
for i in range(4):
filepath = os.path.join(tempdir, f"test_data{i}.npz")
np.savez(filepath, test_data)
datalist.append({"image": filepath})
num_workers = 2 if sys.platform == "linux" else 0
loader = DataLoader(
Dataset(data=datalist, transform=LoadImaged(keys="image", reader=NumpyReader())),
batch_size=2,
num_workers=num_workers,
)
for d in loader:
for c in d["image"]:
assert_allclose(c, test_data, type_test=False)
def test_channel_dim(self):
test_data = np.random.randint(0, 256, size=[3, 4, 5, 2])
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "test_data.npy")
np.save(filepath, test_data)
reader = NumpyReader(channel_dim=-1)
result = reader.get_data(reader.read(filepath))
np.testing.assert_allclose(result[1]["spatial_shape"], test_data.shape[:-1])
self.assertEqual(result[1]["original_channel_dim"], -1)
if __name__ == "__main__":
unittest.main()
|
2,958 |
test read blob
|
# Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Blob objects."""
import io
from pathlib import Path
import pytest
import pygit2
from . import utils
BLOB_SHA = 'a520c24d85fbfc815d385957eed41406ca5a860b'
BLOB_CONTENT = """hello world
hola mundo
bonjour le monde
""".encode()
BLOB_NEW_CONTENT = b'foo bar\n'
BLOB_FILE_CONTENT = b'bye world\n'
BLOB_PATCH = r"""diff --git a/file b/file
index a520c24..95d09f2 100644
--- a/file
+++ b/file
@@ -1,3 +1 @@
-hello world
-hola mundo
-bonjour le monde
+hello world
\ No newline at end of file
"""
BLOB_PATCH_2 = """diff --git a/file b/file
index a520c24..d675fa4 100644
--- a/file
+++ b/file
@@ -1,3 +1 @@
-hello world
-hola mundo
-bonjour le monde
+foo bar
"""
BLOB_PATCH_DELETED = """diff --git a/file b/file
deleted file mode 100644
index a520c24..0000000
--- a/file
+++ /dev/null
@@ -1,3 +0,0 @@
-hello world
-hola mundo
-bonjour le monde
"""
def METHOD_NAME(testrepo):
blob = testrepo[BLOB_SHA]
assert blob.hex == BLOB_SHA
sha = blob.id.hex
assert sha == BLOB_SHA
assert isinstance(blob, pygit2.Blob)
assert not blob.is_binary
assert pygit2.GIT_OBJ_BLOB == blob.type
assert BLOB_CONTENT == blob.data
assert len(BLOB_CONTENT) == blob.size
assert BLOB_CONTENT == blob.read_raw()
def test_create_blob(testrepo):
blob_oid = testrepo.create_blob(BLOB_NEW_CONTENT)
blob = testrepo[blob_oid]
assert isinstance(blob, pygit2.Blob)
assert pygit2.GIT_OBJ_BLOB == blob.type
assert blob_oid == blob.id
assert utils.gen_blob_sha1(BLOB_NEW_CONTENT) == blob_oid.hex
assert BLOB_NEW_CONTENT == blob.data
assert len(BLOB_NEW_CONTENT) == blob.size
assert BLOB_NEW_CONTENT == blob.read_raw()
blob_buffer = memoryview(blob)
assert len(BLOB_NEW_CONTENT) == len(blob_buffer)
assert BLOB_NEW_CONTENT == blob_buffer
def set_content():
blob_buffer[:2] = b'hi'
with pytest.raises(TypeError): set_content()
def test_create_blob_fromworkdir(testrepo):
blob_oid = testrepo.create_blob_fromworkdir("bye.txt")
blob = testrepo[blob_oid]
assert isinstance(blob, pygit2.Blob)
assert pygit2.GIT_OBJ_BLOB == blob.type
assert blob_oid == blob.id
assert utils.gen_blob_sha1(BLOB_FILE_CONTENT) == blob_oid.hex
assert BLOB_FILE_CONTENT == blob.data
assert len(BLOB_FILE_CONTENT) == blob.size
assert BLOB_FILE_CONTENT == blob.read_raw()
def test_create_blob_fromworkdir_aspath(testrepo):
blob_oid = testrepo.create_blob_fromworkdir(Path("bye.txt"))
blob = testrepo[blob_oid]
assert isinstance(blob, pygit2.Blob)
def test_create_blob_outside_workdir(testrepo):
with pytest.raises(KeyError):
testrepo.create_blob_fromworkdir(__file__)
def test_create_blob_fromdisk(testrepo):
blob_oid = testrepo.create_blob_fromdisk(__file__)
blob = testrepo[blob_oid]
assert isinstance(blob, pygit2.Blob)
assert pygit2.GIT_OBJ_BLOB == blob.type
def test_create_blob_fromiobase(testrepo):
with pytest.raises(TypeError):
testrepo.create_blob_fromiobase('bad type')
f = io.BytesIO(BLOB_CONTENT)
blob_oid = testrepo.create_blob_fromiobase(f)
blob = testrepo[blob_oid]
assert isinstance(blob, pygit2.Blob)
assert pygit2.GIT_OBJ_BLOB == blob.type
assert blob_oid == blob.id
assert BLOB_SHA == blob_oid.hex
def test_diff_blob(testrepo):
blob = testrepo[BLOB_SHA]
old_blob = testrepo['3b18e512dba79e4c8300dd08aeb37f8e728b8dad']
patch = blob.diff(old_blob, old_as_path="hello.txt")
assert len(patch.hunks) == 1
def test_diff_blob_to_buffer(testrepo):
blob = testrepo[BLOB_SHA]
patch = blob.diff_to_buffer("hello world")
assert len(patch.hunks) == 1
def test_diff_blob_to_buffer_patch_patch(testrepo):
blob = testrepo[BLOB_SHA]
patch = blob.diff_to_buffer("hello world")
assert patch.text == BLOB_PATCH
def test_diff_blob_to_buffer_delete(testrepo):
blob = testrepo[BLOB_SHA]
patch = blob.diff_to_buffer(None)
assert patch.text == BLOB_PATCH_DELETED
def test_diff_blob_create(testrepo):
old = testrepo[testrepo.create_blob(BLOB_CONTENT)]
new = testrepo[testrepo.create_blob(BLOB_NEW_CONTENT)]
patch = old.diff(new)
assert patch.text == BLOB_PATCH_2
def test_blob_from_repo(testrepo):
blob = testrepo[BLOB_SHA]
patch_one = blob.diff_to_buffer(None)
blob = testrepo[BLOB_SHA]
patch_two = blob.diff_to_buffer(None)
assert patch_one.text == patch_two.text
|
2,959 |
predicate
|
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import functools
import unittest
from . import sysinfo
def _identity(f):
return f
def _do_not_skip(reason):
assert reason
return _identity
skipOnMac = _do_not_skip
skipOnMacOnCI = _do_not_skip
skipOnWindows = _do_not_skip
skipOnAppVeyor = _do_not_skip
skipOnCI = _do_not_skip
skipOnManylinux = _do_not_skip
skipOnPyPy = _do_not_skip
skipOnPyPyOnCI = _do_not_skip
skipOnPyPy3OnCI = _do_not_skip
skipOnPyPy3 = _do_not_skip
skipOnPyPyOnWindows = _do_not_skip
skipOnPy3 = unittest.skip if sysinfo.PY3 else _do_not_skip
skipOnPy37 = unittest.skip if sysinfo.PY37 else _do_not_skip
skipOnPy310 = unittest.skip if sysinfo.PY310 else _do_not_skip
skipOnPy312 = unittest.skip if sysinfo.PY312 else _do_not_skip
skipOnPurePython = unittest.skip if sysinfo.PURE_PYTHON else _do_not_skip
skipWithCExtensions = unittest.skip if not sysinfo.PURE_PYTHON else _do_not_skip
skipOnLibuv = _do_not_skip
skipOnLibuvOnWin = _do_not_skip
skipOnLibuvOnCI = _do_not_skip
skipOnLibuvOnCIOnPyPy = _do_not_skip
skipOnLibuvOnPyPyOnWin = _do_not_skip
skipOnLibuvOnTravisOnCPython27 = _do_not_skip
skipOnLibev = _do_not_skip
if sysinfo.WIN:
skipOnWindows = unittest.skip
if sysinfo.OSX:
skipOnMac = unittest.skip
if sysinfo.RUNNING_ON_APPVEYOR:
# See comments scattered around about timeouts and the timer
# resolution available on appveyor (lots of jitter). this
# seems worse with the 62-bit builds.
# Note that we skip/adjust these tests only on AppVeyor, not
# win32---we don't think there's gevent related problems but
# environment related problems. These can be tested and debugged
# separately on windows in a more stable environment.
skipOnAppVeyor = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnCI = unittest.skip
if sysinfo.OSX:
skipOnMacOnCI = unittest.skip
if sysinfo.RUNNING_ON_MANYLINUX:
skipOnManylinux = unittest.skip
if sysinfo.PYPY:
skipOnPyPy = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnPyPyOnCI = unittest.skip
if sysinfo.WIN:
skipOnPyPyOnWindows = unittest.skip
if sysinfo.PYPY3:
skipOnPyPy3 = unittest.skip
if sysinfo.RUNNING_ON_CI:
# Same as above, for PyPy3.3-5.5-alpha and 3.5-5.7.1-beta and 3.5-5.8
skipOnPyPy3OnCI = unittest.skip
skipUnderCoverage = unittest.skip if sysinfo.RUN_COVERAGE else _do_not_skip
skipIf = unittest.skipIf
skipUnless = unittest.skipUnless
_has_psutil_process = None
def _check_psutil():
global _has_psutil_process
if _has_psutil_process is None:
_has_psutil_process = sysinfo.get_this_psutil_process() is not None
return _has_psutil_process
def _make_runtime_skip_decorator(reason, METHOD_NAME):
def decorator(test_item):
if not isinstance(test_item, type):
f = test_item
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
if not METHOD_NAME():
raise unittest.SkipTest(reason)
return f(*args, **kwargs)
test_item = skip_wrapper
else:
# given a class, override setUp() to skip it.
#
# Internally, unittest uses two flags on the class to do this:
# __unittest_skip__ and __unittest_skip_why__. It *appears*
# these are evaluated for each method in the test, so we can safely
# change them at runtime. **This isn't documented.**
#
# If they are set before execution begins, then the class setUpClass
# and tearDownClass are skipped. So changing them at runtime could result
# in something being set up but not torn down. It is substantially
# faster, though, to set them.
base = test_item
base_setUp = base.setUp
@functools.wraps(test_item)
def setUp(self):
if not METHOD_NAME():
base.__unittest_skip__ = True
base.__unittest_skip_why__ = reason
raise unittest.SkipTest(reason)
base_setUp(self)
base.setUp = setUp
return test_item
return decorator
def skipWithoutPSUtil(reason):
reason = "psutil not available: " + reason
# Defer the check until runtime to avoid imports
return _make_runtime_skip_decorator(reason, _check_psutil)
if sysinfo.LIBUV:
skipOnLibuv = unittest.skip
if sysinfo.RUNNING_ON_CI:
skipOnLibuvOnCI = unittest.skip
if sysinfo.PYPY:
skipOnLibuvOnCIOnPyPy = unittest.skip
if sysinfo.RUNNING_ON_TRAVIS:
if sysinfo.CPYTHON:
if sysinfo.PY27_ONLY:
skipOnLibuvOnTravisOnCPython27 = unittest.skip
if sysinfo.WIN:
skipOnLibuvOnWin = unittest.skip
if sysinfo.PYPY:
skipOnLibuvOnPyPyOnWin = unittest.skip
else:
skipOnLibev = unittest.skip
def skipWithoutResource(resource, reason=''):
requires = 'Requires resource %r' % (resource,)
if not reason:
reason = requires
else:
reason = reason + ' (' + requires + ')'
# Defer until runtime; resources are established as part
# of test startup.
def METHOD_NAME(): # This is easily cached if needed
from . import resources
return resources.ensure_setup_resources().is_resource_enabled(resource)
return _make_runtime_skip_decorator(reason, METHOD_NAME)
def skipWithoutExternalNetwork(reason=''):
# Use to decorate test functions or classes that
# need access to external network resources (e.g., DNS, HTTP servers, etc)
#
# Important: If you use this on classes, you must not use the
# two-argument form of super()
return skipWithoutResource('network', reason)
|
2,960 |
move files
|
from Components.Task import PythonTask, Task, Job, job_manager as JobManager, Condition
from Tools.Directories import fileExists
from enigma import eTimer
from os import path
from shutil import rmtree, copy2, move
class DeleteFolderTask(PythonTask):
def openFiles(self, fileList):
self.fileList = fileList
def work(self):
print("[DeleteFolderTask] files ", self.fileList)
errors = []
try:
rmtree(self.fileList)
except Exception as e:
errors.append(e)
if errors:
raise errors[0]
class CopyFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Copying files"))
cmdline = 'cp -Rf "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class MoveFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Moving files"))
cmdline = 'mv -f "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class AddFileProcessTask(Task):
def __init__(self, job, cmdline, srcfile, destfile, name):
Task.__init__(self, job, name)
self.setCmdline(cmdline)
self.srcfile = srcfile
self.destfile = destfile
self.ProgressTimer = eTimer()
self.ProgressTimer.callback.append(self.ProgressUpdate)
def ProgressUpdate(self):
if self.srcsize <= 0 or not fileExists(self.destfile, 'r'):
return
self.setProgress(int((path.getsize(self.destfile) / float(self.srcsize)) * 100))
self.ProgressTimer.start(5000, True)
def prepare(self):
if fileExists(self.srcfile, 'r'):
self.srcsize = path.getsize(self.srcfile)
self.ProgressTimer.start(5000, True)
def afterRun(self):
self.setProgress(100)
self.ProgressTimer.stop()
class DownloadProcessTask(Job):
def __init__(self, url, filename, file, **kwargs):
Job.__init__(self, _("%s") % file)
DownloadTask(self, url, filename, **kwargs)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class DownloadTask(Task):
def __init__(self, job, url, path, **kwargs):
self.kwargs = kwargs
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url.decode() if isinstance(url, bytes) else url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
from Tools.Downloader import DownloadWithProgress
self.callback = callback
self.download = DownloadWithProgress(self.url, self.path, **self.kwargs)
self.download.addProgress(self.download_progress)
self.download.addEnd(self.download_finished)
self.download.addError(self.download_failed)
self.download.start()
print("[DownloadTask] downloading", self.url, "to", self.path)
def abort(self):
print("[DownloadTask] aborting", self.url)
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if (recvbytes - self.last_recvbytes) > 100000: # anti-flicker
self.progress = int(100 * (float(recvbytes) / float(totalbytes)))
if (((float(totalbytes) / 1024) / 1024) / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%s of %s GB") % (str(round((((float(recvbytes) / 1024) / 1024) / 1024), 2)), str(round((((float(totalbytes) / 1024) / 1024) / 1024), 2)))
elif ((float(totalbytes) / 1024) / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%s of %s MB") % (str(round(((float(recvbytes) / 1024) / 1024), 2)), str(round(((float(totalbytes) / 1024) / 1024), 2)))
elif (totalbytes / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%d of %d KB") % (recvbytes / 1024, totalbytes / 1024)
else:
self.name = _("Downloading") + ' ' + _("%d of %d Bytes") % (recvbytes, totalbytes)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted=True)
else:
Task.processFinished(self, 0)
def copyFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src)) / 1000 / 1000 > 100:
JobManager.AddJob(CopyFileJob(src, dst, name))
else:
copy2(src, dst)
def METHOD_NAME(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src)) / 1000 / 1000 > 100:
JobManager.AddJob(MoveFileJob(src, dst, name))
else:
move(src, dst)
def deleteFiles(fileList, name):
job = Job(_("Deleting files"))
task = DeleteFolderTask(job, name)
task.openFiles(fileList)
JobManager.AddJob(job)
def downloadFile(url, file_name, sel, **kwargs):
JobManager.AddJob(DownloadProcessTask(url, file_name, sel, **kwargs))
|
2,961 |
jacob f
|
"""
Extended kalman filter (EKF) localization sample
author: Atsushi Sakai (@Atsushi_twi)
"""
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import math
import matplotlib.pyplot as plt
import numpy as np
from utils.plot import plot_covariance_ellipse
# Covariance for EKF simulation
Q = np.diag([
0.1, # variance of location on x-axis
0.1, # variance of location on y-axis
np.deg2rad(1.0), # variance of yaw angle
1.0 # variance of velocity
]) ** 2 # predict state covariance
R = np.diag([1.0, 1.0]) ** 2 # Observation x,y position covariance
# Simulation parameter
INPUT_NOISE = np.diag([1.0, np.deg2rad(30.0)]) ** 2
GPS_NOISE = np.diag([0.5, 0.5]) ** 2
DT = 0.1 # time tick [s]
SIM_TIME = 50.0 # simulation time [s]
show_animation = True
def calc_input():
v = 1.0 # [m/s]
yawrate = 0.1 # [rad/s]
u = np.array([[v], [yawrate]])
return u
def observation(xTrue, xd, u):
xTrue = motion_model(xTrue, u)
# add noise to gps x-y
z = observation_model(xTrue) + GPS_NOISE @ np.random.randn(2, 1)
# add noise to input
ud = u + INPUT_NOISE @ np.random.randn(2, 1)
xd = motion_model(xd, ud)
return xTrue, z, xd, ud
def motion_model(x, u):
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[DT * math.cos(x[2, 0]), 0],
[DT * math.sin(x[2, 0]), 0],
[0.0, DT],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def observation_model(x):
H = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
z = H @ x
return z
def METHOD_NAME(x, u):
"""
Jacobian of Motion Model
motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -DT * v * math.sin(yaw), DT * math.cos(yaw)],
[0.0, 1.0, DT * v * math.cos(yaw), DT * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
def jacob_h():
# Jacobian of Observation Model
jH = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0]
])
return jH
def ekf_estimation(xEst, PEst, z, u):
# Predict
xPred = motion_model(xEst, u)
jF = METHOD_NAME(xEst, u)
PPred = jF @ PEst @ jF.T + Q
# Update
jH = jacob_h()
zPred = observation_model(xPred)
y = z - zPred
S = jH @ PPred @ jH.T + R
K = PPred @ jH.T @ np.linalg.inv(S)
xEst = xPred + K @ y
PEst = (np.eye(len(xEst)) - K @ jH) @ PPred
return xEst, PEst
def main():
print(__file__ + " start!!")
time = 0.0
# State Vector [x y yaw v]'
xEst = np.zeros((4, 1))
xTrue = np.zeros((4, 1))
PEst = np.eye(4)
xDR = np.zeros((4, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
hz = np.zeros((2, 1))
while SIM_TIME >= time:
time += DT
u = calc_input()
xTrue, z, xDR, ud = observation(xTrue, xDR, u)
xEst, PEst = ekf_estimation(xEst, PEst, z, ud)
# store data history
hxEst = np.hstack((hxEst, xEst))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
hz = np.hstack((hz, z))
if show_animation:
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(hz[0, :], hz[1, :], ".g")
plt.plot(hxTrue[0, :].flatten(),
hxTrue[1, :].flatten(), "-b")
plt.plot(hxDR[0, :].flatten(),
hxDR[1, :].flatten(), "-k")
plt.plot(hxEst[0, :].flatten(),
hxEst[1, :].flatten(), "-r")
plot_covariance_ellipse(xEst[0, 0], xEst[1, 0], PEst)
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
if __name__ == '__main__':
main()
|
2,962 |
test int little byteorder
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License"];
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _struct
import unittest
from math import isclose
from random import randint, uniform, choice
from string import ascii_lowercase
from thingsboard_gateway.connectors.can.bytes_can_uplink_converter import BytesCanUplinkConverter
class BytesCanUplinkConverterTests(unittest.TestCase):
def setUp(self):
self.converter = BytesCanUplinkConverter()
def _has_no_data(self, data):
return bool(data is None or not data.get("attributes", []) and not data.get("telemetry", []))
def test_wrong_type(self):
can_data = [0, 1, 0, 0, 0]
configs = [{
"key": "var",
"is_ts": True,
"type": "wrong_type"
}]
tb_data = self.converter.convert(configs, can_data)
self.assertTrue(self._has_no_data(tb_data))
def test_bool_true(self):
can_data = [0, 1, 0, 0, 0]
configs = [{
"key": "boolVar",
"is_ts": True,
"type": "bool",
"start": 1
}]
tb_data = self.converter.convert(configs, can_data)
self.assertTrue(tb_data["telemetry"]["boolVar"])
def test_bool_false(self):
can_data = [1, 0, 1, 1, 1]
configs = [{
"key": "boolVar",
"is_ts": False,
"type": "bool",
"start": 1
}]
tb_data = self.converter.convert(configs, can_data)
self.assertFalse(tb_data["attributes"]["boolVar"])
def _test_int(self, type, byteorder):
int_value = randint(-32768, 32767)
int_size = 2
can_data = [0, 0]
configs = [{
"key": type + "Var",
"is_ts": True,
"type": type,
"start": len(can_data),
"length": int_size,
"byteorder": byteorder,
"signed": int_value < 0
}]
can_data.extend(int_value.to_bytes(int_size, byteorder, signed=(int_value < 0)))
tb_data = self.converter.convert(configs, can_data)
self.assertEqual(tb_data["telemetry"][type + "Var"], int_value)
def test_int_big_byteorder(self):
self._test_int("int", "big")
def METHOD_NAME(self):
self._test_int("int", "little")
def test_long_big_byteorder(self):
self._test_int("long", "big")
def test_long_little_byteorder(self):
self._test_int("long", "little")
def _test_float_point_number(self, type, byteorder):
float_value = uniform(-3.1415926535, 3.1415926535)
can_data = [0, 0]
configs = [{
"key": type + "Var",
"is_ts": True,
"type": type,
"start": len(can_data),
"length": 4 if type[0] == "f" else 8,
"byteorder": byteorder
}]
can_data.extend(_struct.pack((">" if byteorder[0] == "b" else "<") + type[0],
float_value))
tb_data = self.converter.convert(configs, can_data)
self.assertTrue(isclose(tb_data["telemetry"][type + "Var"], float_value, rel_tol=1e-05))
def test_float_big_byteorder(self):
self._test_float_point_number("float", "big")
def test_float_little_byteorder(self):
self._test_float_point_number("float", "little")
def test_double_big_byteorder(self):
self._test_float_point_number("double", "big")
def test_double_little_byteorder(self):
self._test_float_point_number("double", "little")
def _test_string(self, encoding="ascii"):
str_length = randint(1, 8)
str_value = ''.join(choice(ascii_lowercase) for _ in range(str_length))
configs = [{
"key": "stringVar",
"is_ts": True,
"type": "string",
"start": 0,
"length": str_length,
"encoding": encoding
}]
can_data = str_value.encode(encoding)
tb_data = self.converter.convert(configs, can_data)
self.assertEqual(tb_data["telemetry"]["stringVar"], str_value)
def test_string_default_ascii_encoding(self):
self._test_string()
def test_string_utf_8_string(self):
self._test_string("utf-8")
def _test_eval_int(self, number, strict_eval, expression):
can_data = number.to_bytes(1, "big", signed=(number < 0))
# By default the strictEval flag is True
configs = [{
"key": "var",
"is_ts": True,
"type": "int",
"start": 0,
"length": 1,
"byteorder": "big",
"signed": number < 0,
"expression": expression,
"strictEval": strict_eval
}]
return self.converter.convert(configs, can_data)
def test_strict_eval_violation(self):
number = randint(-128, 256)
tb_data = self._test_eval_int(number, True, "pow(value, 2)")
self.assertTrue(self._has_no_data(tb_data))
def test_strict_eval(self):
number = randint(-128, 256)
tb_data = self._test_eval_int(number, True, "value * value")
self.assertEqual(tb_data["telemetry"]["var"], number * number)
def test_no_strict_eval(self):
number = randint(-128, 256)
tb_data = self._test_eval_int(number, False, "pow(value, 2)")
self.assertEqual(tb_data["telemetry"]["var"], number * number)
def test_multiple_valid_configs(self):
bool_value = True
int_value = randint(0, 256)
can_data = [0, int(bool_value), int_value, 0, 0, 0]
configs = [
{
"key": "boolVar",
"type": "boolean",
"is_ts": True,
"start": 1
},
{
"key": "intVar",
"type": "int",
"is_ts": False,
"start": 2,
"length": 4,
"byteorder": "little",
"signed": False
}
]
tb_data = self.converter.convert(configs, can_data)
self.assertEqual(tb_data["telemetry"]["boolVar"], bool_value)
self.assertEqual(tb_data["attributes"]["intVar"], int_value)
def test_multiple_configs_one_invalid(self):
bool_value = True
invalid_length = 3 # Float requires 4 bytes
can_data = [0, int(bool_value), randint(0, 256), 0, 0, 0]
configs = [
{
"key": "validVar",
"type": "boolean",
"is_ts": True,
"start": 1
},
{
"key": "invalidVar",
"type": "float",
"is_ts": False,
"start": 2,
"length": invalid_length
}
]
tb_data = self.converter.convert(configs, can_data)
self.assertEqual(tb_data["telemetry"]["validVar"], bool_value)
self.assertIsNone(tb_data["attributes"].get("invalidVar"))
if __name__ == '__main__':
unittest.main()
|
2,963 |
handle
|
#########################################################################
#
# Copyright (C) 2020 OSGeo
# Copyright (C) 2022 King's College London
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import DC, DCTERMS, RDF, SKOS
from geonode.base.models import Thesaurus, ThesaurusKeyword, ThesaurusKeywordLabel, ThesaurusLabel
class Command(BaseCommand):
help = 'Dump a thesaurus in RDF format'
formats = sorted(['ttl', 'xml', 'pretty-xml', 'json-ld', 'nt', 'n3', 'trig'])
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'-n',
'--name',
dest='name',
help='Dump the thesaurus with the given name')
parser.add_argument(
'-f',
'--format',
dest='format',
default='pretty-xml',
help=f'Format string supported by rdflib, e.g.: pretty-xml (default), {", ".join(self.formats)}'
)
parser.add_argument(
'--default-lang',
dest='lang',
default=getattr(settings, 'THESAURUS_DEFAULT_LANG', None),
help='Default language code for untagged string literals'
)
# Named (optional) arguments
parser.add_argument(
'-l',
'--list',
action="store_true",
dest='list',
default=False,
help='List available thesauri')
def METHOD_NAME(self, **options):
name = options.get('name')
list = options.get('list')
if not name and not list:
raise CommandError("Missing identifier name for the thesaurus (--name)")
if options.get('format') not in self.formats:
raise CommandError(f"Invalid output format: supported formats are {', '.join(self.formats)}")
if list:
self.list_thesauri()
return
self.dump_thesaurus(name, options.get('format'), options.get('lang'))
def list_thesauri(self):
self.stderr.write(self.style.SUCCESS('LISTING THESAURI'))
thesaurus_entries = Thesaurus.objects.values_list('identifier', flat=True)
if len(thesaurus_entries) == 0:
self.stderr.write(self.style.WARNING('NO ENTRIES FOUND ...'))
return
max_id_len = len(max(thesaurus_entries, key=len))
for t in Thesaurus.objects.order_by('order').all():
if t.card_max == 0:
card = 'DISABLED'
else:
# DISABLED
# [0..n]
card = f'[{t.card_min}..{t.card_max if t.card_max!=-1 else "N"}] '
self.stdout.write(f'id:{t.id:2} sort:{t.order:3} {card} name={t.identifier.ljust(max_id_len)} title="{t.title}" URI:{t.about}\n')
def dump_thesaurus(self, name: str, fmt: str, default_lang: str):
g = Graph()
thesaurus = Thesaurus.objects.filter(identifier=name).get()
scheme = URIRef(thesaurus.about)
g.add((scheme, RDF.type, SKOS.ConceptScheme))
g.add((scheme, DC.title, Literal(thesaurus.title, lang=default_lang)))
g.add((scheme, DC.description, Literal(thesaurus.description, lang=default_lang)))
g.add((scheme, DCTERMS.issued, Literal(thesaurus.date)))
for title_label in ThesaurusLabel.objects.filter(thesaurus=thesaurus).all():
g.add((scheme, DC.title, Literal(title_label.label, lang=title_label.lang)))
# Concepts
for keyword in ThesaurusKeyword.objects.filter(thesaurus=thesaurus).all():
concept = URIRef(keyword.about)
g.add((concept, RDF.type, SKOS.Concept))
g.add((concept, SKOS.inScheme, scheme))
if keyword.alt_label:
g.add((concept, SKOS.altLabel, Literal(keyword.alt_label, lang=default_lang)))
for label in ThesaurusKeywordLabel.objects.filter(keyword=keyword).all():
g.add((concept, SKOS.prefLabel, Literal(label.label, lang=label.lang)))
self.stdout.write(g.serialize(format=fmt))
|
2,964 |
get mode
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2015 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Dialog for the Mode shift functionality.
"""
from fractions import Fraction
from PyQt5.QtCore import QSettings, QSize
from PyQt5.QtGui import QValidator
from PyQt5.QtWidgets import (QComboBox, QDialog, QDialogButtonBox,
QGridLayout, QLabel, QLineEdit, QWidget)
import app
import userguide
import qutil
# Mode definitions
modes = {
'Major': ((0,0), (1,1), (2,2), (3, Fraction(5, 2)), (4, Fraction(7, 2)),
(5, Fraction(9, 2)), (6, Fraction(11, 2))),
'Minor (harmonic)': ((0,0), (1,1), (2, Fraction(3, 2)), (3, Fraction(5, 2)),
(4, Fraction(7, 2)), (5, 4), (6, Fraction(11, 2))),
'Minor (natural)': ((0,0), (1,1), (2, Fraction(3, 2)), (3, Fraction(5, 2)),
(4, Fraction(7, 2)), (5, 4), (6,5)),
'Dorian': ((0,0), (1,1), (2, Fraction(3, 2)), (3, Fraction(5, 2)),
(4, Fraction(7, 2)), (5, Fraction(9, 2)), (6,5)),
'Phrygian': ((0,0), (1, Fraction(1, 2)), (2, Fraction(3, 2)),
(3, Fraction(5, 2)), (4, Fraction(7, 2)), (5,4), (6,5)),
'Lydian': ((0,0), (1,1), (2,2), (3,3), (4, Fraction(7, 2)),
(5, Fraction(9, 2)), (6, Fraction(11, 2))),
'Mixolydian': ((0,0), (1,1), (2,2), (3, Fraction(5, 2)), (4, Fraction(7, 2)),
(5, Fraction(9, 2)), (6,5)),
'Locrian': ((0,0), (1, Fraction(1, 2)), (2, Fraction(3, 2)),
(3, Fraction(5, 2)), (4, 3), (5,4), (6,5)),
'Phrygian dominant': ((0,0), (1, Fraction(1, 2)), (2, 2),
(3, Fraction(5, 2)), (4, Fraction(7, 2)), (5,4), (6,5)),
'Hungarian minor': ((0,0), (1,1), (2, Fraction(3, 2)), (3,3),
(4, Fraction(7, 2)), (5, 4), (6, Fraction(11, 2))),
'Double harmonic major': ((0,0), (1, Fraction(1, 2)), (2, 2), (3, Fraction(5, 2)),
(4, Fraction(7, 2)), (5,4), (6, Fraction(11, 2))),
'Persian': ((0,0), (1, Fraction(1, 2)), (2, 2), (3, Fraction(5, 2)),
(4, 3), (5,4), (6, Fraction(11, 2))),
'Diminished (octatonic)': ((0,0), (1,1), (2, Fraction(3, 2)), (3, Fraction(5, 2)), (4, 3),
(5,4), (5, Fraction(9, 2)), (6, Fraction(11, 2))),
'Whole tone (hexatonic)': ((0,0), (1,1), (2,2), (3,3), (4,4), (6,5)),
'Yo (pentatonic)': ((0,0), (1,1), (3, Fraction(5, 2)), (4, Fraction(7, 2)), (6,5))
}
class ModeShiftDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
mainLayout = QGridLayout()
self.setLayout(mainLayout)
self.keyLabel = QLabel()
self.keyInput = QLineEdit()
self.modeCombo = QComboBox()
self.modeLabel = QLabel()
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
userguide.addButton(self.buttons, "mode_shift")
for m in sorted(modes.keys()):
self.modeCombo.addItem(m)
mainLayout.addWidget(self.keyLabel, 0, 0, 1, 1)
mainLayout.addWidget(self.keyInput, 0, 1, 1, 1)
mainLayout.addWidget(self.modeLabel, 1, 0, 1, 1)
mainLayout.addWidget(self.modeCombo, 1, 1, 1, 1)
mainLayout.addWidget(self.buttons, 9, 0, 2, 2)
app.translateUI(self)
qutil.saveDialogSize(self, "mode_shift/dialog/size", QSize(80, 60))
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.keyInput.textEdited.connect(self.readKeyInput)
self.modeCombo.currentIndexChanged.connect(self.readSettings)
self.loadSettings()
def translateUI(self):
self.setWindowTitle(app.caption(_("Mode Shift")))
self.keyLabel.setText(_("Key:"))
self.modeLabel.setText(_("Mode:"))
self.buttons.button(QDialogButtonBox.Ok).setText(_("shift pitches"))
self.buttons.button(QDialogButtonBox.Ok).setEnabled(False)
def setKeyValidator(self, validate):
"""Set function that validates the key input."""
keyValidator = KeyValidator()
keyValidator.setValidateFunc(validate)
self.keyInput.setValidator(keyValidator)
def readSettings(self):
"""Reads the current settings."""
self._currentKey = self.keyInput.text()
self._currentMode = self.modeCombo.currentText()
def readKeyInput(self):
"""Read the key input and check if it's acceptable."""
if self.keyInput.hasAcceptableInput():
self.readSettings()
self.buttons.button(QDialogButtonBox.Ok).setEnabled(True)
else:
self.buttons.button(QDialogButtonBox.Ok).setEnabled(False)
def METHOD_NAME(self):
"""Returns the chosen mode."""
return self._currentKey, modes[self._currentMode]
def loadSettings(self):
""" get users previous settings """
s = QSettings()
s.beginGroup('mode_shift')
key = s.value('key', "", str)
self.keyInput.setText(key)
index = s.value('mode', 0, int)
self.modeCombo.setCurrentIndex(index)
self.readKeyInput()
def saveSettings(self):
""" save users last settings """
s = QSettings()
s.beginGroup('mode_shift')
s.setValue('key', self._currentKey)
s.setValue('mode', self.modeCombo.currentIndex())
class KeyValidator(QValidator):
def __init__(self, parent=None):
super().__init__(parent)
def setValidateFunc(self, func):
self._func = func
def validate(self, text, pos):
if text:
if self._func(text):
return (QValidator.Acceptable, text, pos)
elif len(text) > 3:
return (QValidator.Invalid, text, pos)
return (QValidator.Intermediate, text, pos)
|
2,965 |
test max speed records no value
|
import datetime
from flask import Flask
from fittrackee.users.models import User
from fittrackee.workouts.models import Record, Sport, Workout
class TestRecordModel:
def test_record_model(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_type = 'LD'
record_ld = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type=record_type,
).first()
assert record_ld.user.username == user_1.username
assert record_ld.sport_id == sport_1_cycling.id
assert record_ld.workout_id == workout_cycling_user_1.sport_id
assert record_ld.record_type == record_type
assert str(record_ld.workout_date) == str(
workout_cycling_user_1.workout_date
)
assert record_ld.value == workout_cycling_user_1.duration
assert '<Record Cycling - LD - 2018-01-01>' == str(record_ld)
record_serialize = record_ld.serialize()
record_serialize['id'] = record_ld.id
record_serialize['record_type'] = record_ld.record_type
record_serialize['sport_id'] = record_ld.sport_id
record_serialize['user'] = record_ld.user.username
record_serialize['value'] = record_ld.value
record_serialize['workout_id'] = record_ld.workout_id
record_serialize['workout_date'] = record_ld.workout_date
def test_record_model_with_none_value(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_ld = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='LD',
).first()
record_ld.value = None
assert record_ld.value is None
record_serialize = record_ld.serialize()
assert record_serialize['value'] is None
def test_average_speed_records(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_as = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='AS',
).first()
assert isinstance(record_as.value, float)
assert record_as.value == 10.0
assert record_as._value == 1000
record_serialize = record_as.serialize()
assert record_serialize.get('value') == 10.0
assert isinstance(record_serialize.get('value'), float)
def test_add_farthest_distance_records(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_fd = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='FD',
).first()
assert isinstance(record_fd.value, float)
assert record_fd.value == 10.0
assert record_fd._value == 10000
record_serialize = record_fd.serialize()
assert record_serialize.get('value') == 10.0
assert isinstance(record_serialize.get('value'), float)
def test_add_longest_duration_records(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_ld = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='LD',
).first()
assert isinstance(record_ld.value, datetime.timedelta)
assert str(record_ld.value) == '1:00:00'
assert record_ld._value == 3600
record_serialize = record_ld.serialize()
assert record_serialize.get('value') == '1:00:00'
assert isinstance(record_serialize.get('value'), str)
def test_add_longest_duration_records_with_zero(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_ld = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='LD',
).first()
record_ld.value = datetime.timedelta(seconds=0)
assert isinstance(record_ld.value, datetime.timedelta)
assert str(record_ld.value) == '0:00:00'
assert record_ld._value == 0
record_serialize = record_ld.serialize()
assert record_serialize.get('value') == '0:00:00'
assert isinstance(record_serialize.get('value'), str)
def METHOD_NAME(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
workout_cycling_user_1: Workout,
) -> None:
record_ms = Record.query.filter_by(
user_id=workout_cycling_user_1.user_id,
sport_id=workout_cycling_user_1.sport_id,
record_type='MS',
).first()
assert isinstance(record_ms.value, float)
assert record_ms.value == 10.0
assert record_ms._value == 1000
record_serialize = record_ms.serialize()
assert record_serialize.get('value') == 10.0
assert isinstance(record_serialize.get('value'), float)
|
2,966 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
An object that represents a private endpoint connection for a container registry.
"""
def __init__(__self__, METHOD_NAME=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
METHOD_NAME=self.METHOD_NAME,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Get the specified private endpoint connection associated with the container registry.
Azure REST API version: 2022-12-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Get the specified private endpoint connection associated with the container registry.
Azure REST API version: 2022-12-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,967 |
get authorization provider output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAuthorizationProviderResult',
'AwaitableGetAuthorizationProviderResult',
'get_authorization_provider',
'get_authorization_provider_output',
]
@pulumi.output_type
class GetAuthorizationProviderResult:
"""
Authorization Provider contract.
"""
def __init__(__self__, display_name=None, id=None, identity_provider=None, name=None, oauth2=None, type=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity_provider and not isinstance(identity_provider, str):
raise TypeError("Expected argument 'identity_provider' to be a str")
pulumi.set(__self__, "identity_provider", identity_provider)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if oauth2 and not isinstance(oauth2, dict):
raise TypeError("Expected argument 'oauth2' to be a dict")
pulumi.set(__self__, "oauth2", oauth2)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Authorization Provider name. Must be 1 to 300 characters long.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="identityProvider")
def identity_provider(self) -> Optional[str]:
"""
Identity provider name. Must be 1 to 300 characters long.
"""
return pulumi.get(self, "identity_provider")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def oauth2(self) -> Optional['outputs.AuthorizationProviderOAuth2SettingsResponse']:
"""
OAuth2 settings
"""
return pulumi.get(self, "oauth2")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAuthorizationProviderResult(GetAuthorizationProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAuthorizationProviderResult(
display_name=self.display_name,
id=self.id,
identity_provider=self.identity_provider,
name=self.name,
oauth2=self.oauth2,
type=self.type)
def get_authorization_provider(authorization_provider_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAuthorizationProviderResult:
"""
Gets the details of the authorization provider specified by its identifier.
Azure REST API version: 2022-08-01.
:param str authorization_provider_id: Identifier of the authorization provider.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['authorizationProviderId'] = authorization_provider_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement:getAuthorizationProvider', __args__, opts=opts, typ=GetAuthorizationProviderResult).value
return AwaitableGetAuthorizationProviderResult(
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
identity_provider=pulumi.get(__ret__, 'identity_provider'),
name=pulumi.get(__ret__, 'name'),
oauth2=pulumi.get(__ret__, 'oauth2'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_authorization_provider)
def METHOD_NAME(authorization_provider_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAuthorizationProviderResult]:
"""
Gets the details of the authorization provider specified by its identifier.
Azure REST API version: 2022-08-01.
:param str authorization_provider_id: Identifier of the authorization provider.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
...
|
2,968 |
get id for path
|
""" copy files to Google Drive using rclone """
import configparser
import json
import logging
import subprocess
from pathlib import Path
from subprocess import Popen, PIPE
from typing import List, Optional, Tuple, Union
from tqdm.notebook import tqdm
from metatlas.tools.notebook import in_papermill
logger = logging.getLogger(__name__)
class RClone:
"""Access to Google Drive"""
def __init__(self, rclone_path: Union[Path, str]) -> None:
self.rclone_path = Path(rclone_path)
def config_file(self) -> Optional[str]:
"""Returns path to config file or None"""
try:
result = subprocess.check_output([self.rclone_path, "config", "file"], text=True)
except (subprocess.CalledProcessError, FileNotFoundError):
return None
return result.split("\n")[1]
def get_name_for_id(self, identifier: str) -> Optional[str]:
"""
Inputs:
identifer: unique folder identifier from Google Drive URL
if identifier is in the config file, then return the name assigned to the identifier
otherwise return None
"""
ini_file = self.config_file()
if ini_file is None:
return None
config = configparser.ConfigParser()
config.read(ini_file)
for name in config.sections():
props = config[name]
if "type" in props and props["type"] == "drive":
if "root_folder_id" in props and props["root_folder_id"] == identifier:
return name
return None
def copy_to_drive(
self, source: Path, drive: str, dest_path: Optional[Path] = None, progress: bool = False
) -> None:
"""
Inputs:
source: file or directory to copy to drive
drive: name in the RClone configuration for a location in Google Drive
dest_path: location under drive to copy to, will create folders if needed
progress: display a tqdm progress bar
"""
dest = Path(f"{drive}:" if dest_path is None else f"{drive}:{dest_path}")
cmd = [self.rclone_path, "copy", source, dest]
try:
if progress:
cmd.append("--progress")
with tqdm(
total=100, desc="Percentage of total number of files", unit="%", disable=in_papermill()
) as pbar:
with Popen(cmd, stdout=PIPE, bufsize=1, universal_newlines=True) as proc:
for line in proc.stdout or []:
line = line.strip()
if line.startswith("Transferred:") and line.endswith("%"):
percent = float(line.split(",")[1].split("%")[0])
pbar.n = percent
pbar.refresh()
else:
subprocess.check_output(cmd, text=True)
except subprocess.CalledProcessError as err:
logger.exception(err)
raise err
except FileNotFoundError:
logger.info("rclone not found. Skipping transfer to Google Drive")
def METHOD_NAME(self, path_string: str) -> str:
"""
Inputs:
path_string: a string containing drive_name a colon and one or more folders like:
'my_drive:folder1/folder2'
returns an ID string which can be used in a Google Drive URL
"""
drive, folders = parse_path(path_string)
assert isinstance(folders, list)
assert isinstance(folders[:-1], list)
all_but_last = f"{drive}:{'/'.join(folders[:-1])}"
command_list = [self.rclone_path, "lsjson", "--dirs-only", all_but_last]
try:
result = subprocess.check_output(command_list, text=True)
except subprocess.CalledProcessError as err:
logger.exception(err)
raise err
returned_folders = json.loads(result)
for folder in returned_folders:
if folder["Name"] == folders[-1]:
return folder["ID"]
raise FileNotFoundError(f"Could not find a file or folder at {path_string}")
def path_to_url(self, path_string: str) -> str:
"""
Inputs:
path_string: a string containing drive_name a colon and one or more folders like:
'my_drive:folder1/folder2'
returns an URL for opening the object at path_string
"""
drive_id = self.METHOD_NAME(path_string)
return f"https://drive.google.com/drive/folders/{drive_id}"
def parse_path(path_string: str) -> Tuple[str, List[str]]:
"""
Inputs:
path_string: a string containing drive_name a colon and one or more folders like:
'my_drive:folder1/folder2'
returns a tuple of the drive_name, folder_list
"""
drive = path_string.split(":")[0]
remainder = ":".join(path_string.split(":")[1:])
return drive, remainder.split("/")
|
2,969 |
test version bump increase string length
|
from pathlib import Path
from shutil import copyfile
import pytest
from commitizen import bump
from commitizen.exceptions import CurrentVersionNotFoundError
MULTIPLE_VERSIONS_INCREASE_STRING = 'version = "1.2.9"\n' * 30
MULTIPLE_VERSIONS_REDUCE_STRING = 'version = "1.2.10"\n' * 30
TESTING_FILE_PREFIX = "tests/data"
def _copy_sample_file_to_tmpdir(
tmp_path: Path, source_filename: str, dest_filename: str
) -> Path:
tmp_file = tmp_path / dest_filename
copyfile(f"{TESTING_FILE_PREFIX}/{source_filename}", tmp_file)
return tmp_file
@pytest.fixture(scope="function")
def commitizen_config_file(tmpdir):
return _copy_sample_file_to_tmpdir(
tmpdir, "sample_pyproject.toml", "pyproject.toml"
)
@pytest.fixture(scope="function")
def python_version_file(tmpdir, request):
return _copy_sample_file_to_tmpdir(tmpdir, "sample_version.py", "__version__.py")
@pytest.fixture(scope="function")
def inconsistent_python_version_file(tmpdir):
return _copy_sample_file_to_tmpdir(
tmpdir, "inconsistent_version.py", "__version__.py"
)
@pytest.fixture(scope="function")
def random_location_version_file(tmpdir):
return _copy_sample_file_to_tmpdir(tmpdir, "sample_cargo.lock", "Cargo.lock")
@pytest.fixture(scope="function")
def version_repeated_file(tmpdir):
return _copy_sample_file_to_tmpdir(
tmpdir, "repeated_version_number.json", "package.json"
)
@pytest.fixture(scope="function")
def docker_compose_file(tmpdir):
return _copy_sample_file_to_tmpdir(
tmpdir, "sample_docker_compose.yaml", "docker-compose.yaml"
)
@pytest.fixture(
scope="function",
params=(
"multiple_versions_to_update_pyproject.toml",
"multiple_versions_to_update_pyproject_wo_eol.toml",
),
ids=("with_eol", "without_eol"),
)
def multiple_versions_to_update_poetry_lock(tmpdir, request):
return _copy_sample_file_to_tmpdir(tmpdir, request.param, "pyproject.toml")
@pytest.fixture(scope="function")
def multiple_versions_increase_string(tmpdir):
tmp_file = tmpdir.join("anyfile")
tmp_file.write(MULTIPLE_VERSIONS_INCREASE_STRING)
return str(tmp_file)
@pytest.fixture(scope="function")
def multiple_versions_reduce_string(tmpdir):
tmp_file = tmpdir.join("anyfile")
tmp_file.write(MULTIPLE_VERSIONS_REDUCE_STRING)
return str(tmp_file)
@pytest.fixture(scope="function")
def version_files(
commitizen_config_file,
python_version_file,
version_repeated_file,
docker_compose_file,
):
return (
commitizen_config_file,
python_version_file,
version_repeated_file,
docker_compose_file,
)
def test_update_version_in_files(version_files, file_regression):
old_version = "1.2.3"
new_version = "2.0.0"
bump.update_version_in_files(
old_version, new_version, version_files, encoding="utf-8"
)
file_contents = ""
for filepath in version_files:
with open(filepath, "r", encoding="utf-8") as f:
file_contents += f.read()
file_regression.check(file_contents, extension=".txt")
def test_partial_update_of_file(version_repeated_file, file_regression):
old_version = "1.2.3"
new_version = "2.0.0"
regex = "version"
location = f"{version_repeated_file}:{regex}"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(version_repeated_file, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".json")
def test_random_location(random_location_version_file, file_regression):
old_version = "1.2.3"
new_version = "2.0.0"
location = f"{random_location_version_file}:version.+Commitizen"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(random_location_version_file, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".lock")
def test_duplicates_are_change_with_no_regex(
random_location_version_file, file_regression
):
old_version = "1.2.3"
new_version = "2.0.0"
location = f"{random_location_version_file}:version"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(random_location_version_file, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".lock")
def METHOD_NAME(
multiple_versions_increase_string, file_regression
):
old_version = "1.2.9"
new_version = "1.2.10"
location = f"{multiple_versions_increase_string}:version"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(multiple_versions_increase_string, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".txt")
def test_version_bump_reduce_string_length(
multiple_versions_reduce_string, file_regression
):
old_version = "1.2.10"
new_version = "2.0.0"
location = f"{multiple_versions_reduce_string}:version"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(multiple_versions_reduce_string, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".txt")
def test_file_version_inconsistent_error(
commitizen_config_file, inconsistent_python_version_file, version_repeated_file
):
version_files = [
commitizen_config_file,
inconsistent_python_version_file,
version_repeated_file,
]
old_version = "1.2.3"
new_version = "2.0.0"
with pytest.raises(CurrentVersionNotFoundError) as excinfo:
bump.update_version_in_files(
old_version,
new_version,
version_files,
check_consistency=True,
encoding="utf-8",
)
expected_msg = (
f"Current version 1.2.3 is not found in {inconsistent_python_version_file}.\n"
"The version defined in commitizen configuration and the ones in "
"version_files are possibly inconsistent."
)
assert expected_msg in str(excinfo.value)
def test_multiplt_versions_to_bump(
multiple_versions_to_update_poetry_lock, file_regression
):
old_version = "1.2.9"
new_version = "1.2.10"
location = f"{multiple_versions_to_update_poetry_lock}:version"
bump.update_version_in_files(old_version, new_version, [location], encoding="utf-8")
with open(multiple_versions_to_update_poetry_lock, "r", encoding="utf-8") as f:
file_regression.check(f.read(), extension=".toml")
|
2,970 |
list
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DomainsOperations(object):
"""DomainsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "1.6".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "1.6"
self.config = config
def METHOD_NAME(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of domains for the current tenant.
:param filter: The filter to apply to the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Domain
:rtype:
~azure.graphrbac.models.DomainPaged[~azure.graphrbac.models.Domain]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url']
path_format_arguments = {
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
METHOD_NAME.metadata = {'url': '/{tenantID}/domains'}
def get(
self, domain_name, custom_headers=None, raw=False, **operation_config):
"""Gets a specific domain in the current tenant.
:param domain_name: name of the domain.
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Domain or ClientRawResponse if raw=true
:rtype: ~azure.graphrbac.models.Domain or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/{tenantID}/domains/{domainName}'}
|
2,971 |
wait for permission
|
from datetime import datetime, timedelta
from backup.config import Config, Setting
from backup.time import Time
from backup.exceptions import GoogleCredGenerateError, KnownError, LogicError, ensureKey
from aiohttp import ClientSession
from injector import inject
from .driverequests import DriveRequester
from backup.logger import getLogger
from backup.creds import Creds
import asyncio
logger = getLogger(__name__)
SCOPE = 'https://www.googleapis.com/auth/drive.file'
class AuthCodeQuery:
@inject
def __init__(self, config: Config, session: ClientSession, time: Time, drive: DriveRequester):
self.session = session
self.config = config
self.drive = drive
self.time = time
self.client_id: str = None
self.client_secret: str = None
self.device_code: str = None
self.verification_url: str = None
self.user_code: str = None
self.check_interval: timedelta = timedelta(seconds=5)
self.expiration: datetime = time.now()
self.last_check = time.now()
async def requestCredentials(self, client_id: str, client_secret: str):
self.client_id = client_id
self.client_secret = client_secret
request_data = {
'client_id': self.client_id,
'scope': SCOPE
}
resp = await self.session.post(self.config.get(Setting.DRIVE_DEVICE_CODE_URL), data=request_data, timeout=30)
if resp.status != 200:
raise GoogleCredGenerateError(f"Google responded with error status HTTP {resp.status}. Please verify your credentials are set up correctly.")
data = await resp.json()
self.device_code = str(ensureKey("device_code", data, "Google's authorization request"))
self.verification_url = str(ensureKey("verification_url", data, "Google's authorization request"))
self.user_code = str(ensureKey("user_code", data, "Google's authorization request"))
self.expiration = self.time.now() + timedelta(seconds=int(ensureKey("expires_in", data, "Google's authorization request")))
self.check_interval = timedelta(seconds=int(ensureKey("interval", data, "Google's authorization request")))
async def METHOD_NAME(self) -> Creds:
if not self.device_code:
raise LogicError("Please call requestCredentials() first")
error_count = 0
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'device_code': self.device_code,
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code'
}
while self.expiration > self.time.now():
start = self.time.now()
resp = None
try:
resp = await self.session.post(self.config.get(Setting.DRIVE_TOKEN_URL), data=data, timeout=self.check_interval.total_seconds())
try:
reply = await resp.json()
except Exception:
reply = {}
if resp.status == 403:
if reply.get("error", "") == "slow_down":
# google wants us to chill out, so do that
await asyncio.sleep(self.check_interval.total_seconds())
else:
# Google says no
logger.error(f"Getting credentials from Google failed with HTTP 403 and error: {reply.get('error', 'unspecified')}")
raise GoogleCredGenerateError("Google refused the request to connect your account, either because you rejected it or they were set up incorrectly.")
elif resp.status == 428:
# Google says PEBKAC
logger.info(f"Waiting for you to authenticate with Google at {self.verification_url}")
elif resp.status / 100 != 2:
# Mysterious error
logger.error(f"Getting credentials from Google failed with HTTP {resp.status} and error: {reply.get('error', 'unspecified')}")
raise GoogleCredGenerateError("Failed unexpectedly while trying to reach Google. See the add-on logs for details.")
else:
# got the token, return it
return Creds.load(self.time, reply, id=self.client_id, secret=self.client_secret)
except KnownError:
raise
except Exception as e:
logger.error("Error while trying to retrieve credentials from Google")
logger.printException(e)
# Allowing 10 errors is arbitrary, but prevents us from just erroring out forever in the background
error_count += 1
if error_count > 10:
raise GoogleCredGenerateError("Failed unexpectedly too many times while attempting to reach Google. See the logs for details.")
finally:
if resp is not None:
resp.release()
# Make sure we never query more than google says we should
remainder = self.check_interval - (self.time.now() - start)
if remainder > timedelta(seconds=0):
await asyncio.sleep(remainder.total_seconds())
logger.error("Getting credentials from Google expired, please try again")
raise GoogleCredGenerateError("Credentials expired while waiting for you to authorize with Google")
|
2,972 |
files
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""doc
"""
import sys
import datetime
import os
import yaml
import random
import shutil
import six
import warnings
import glob
from utils.util import get_last_dir
class AttrDict(dict):
def __init__(self, d={}, **kwargs):
if kwargs:
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
# for k in self.__class__.__dict__.keys():
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
# setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [
self.__class__(x) if isinstance(x, dict) else x for x in value
]
elif isinstance(value, dict) and not isinstance(value, self.__class__):
value = self.__class__(value)
super(AttrDict, self).__setattr__(name, value)
super(AttrDict, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, attr):
try:
value = super(AttrDict, self).__getitem__(attr)
except KeyError:
# log.warn("%s attribute is not existed, return None" % attr)
warnings.warn("%s attribute is not existed, return None" % attr)
value = None
return value
def update(self, e=None, **f):
d = e or dict()
d.update(f)
for k in d:
setattr(self, k, d[k])
def pop(self, k, d=None):
delattr(self, k)
return super(EasyDict, self).pop(k, d)
def make_dir(path):
"""Build directory"""
if not os.path.exists(path):
os.makedirs(path)
def load_config(config_file):
"""Load config file"""
with open(config_file) as f:
if hasattr(yaml, 'FullLoader'):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
return config
def create_necessary_dirs(config):
"""Create some necessary directories to save some important files.
"""
config.log_dir = os.path.join(config.log_dir, config.task_name)
config.save_dir = os.path.join(config.save_dir, config.task_name)
config.output_dir = os.path.join(config.output_dir, config.task_name)
make_dir(config.log_dir)
make_dir(config.save_dir)
make_dir(config.output_dir)
def save_files(config):
"""Save config file so that we can know the config when we look back
"""
filelist = config.files2saved
targetpath = config.log_dir
if filelist is not None:
for file_or_dir in filelist:
if os.path.isdir(file_or_dir):
last_name = get_last_dir(file_or_dir)
dst = os.path.join(targetpath, last_name)
try:
shutil.copytree(file_or_dir, dst)
except Exception as e:
print(e)
print("backup %s to %s" % (file_or_dir, targetpath))
else:
for filename in METHOD_NAME(METHOD_NAME=file_or_dir):
if os.path.isfile(filename):
print("backup %s to %s" % (filename, targetpath))
shutil.copy2(filename, targetpath)
else:
print("%s is not existed." % filename)
def METHOD_NAME(curr_dir='./', METHOD_NAME='*.py'):
for i in glob.glob(os.path.join(curr_dir, METHOD_NAME)):
yield i
def prepare_config(config_file, isCreate=False, isSave=False):
if os.path.isfile(config_file):
config = load_config(config_file)
config = AttrDict(config)
else:
print("%s is not a yaml file" % config_file)
raise
if isCreate:
create_necessary_dirs(config)
if isSave:
save_files(config)
config.model_dir = config.save_dir
return config
|
2,973 |
copy
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Classes and functions to manage package tags"""
import collections
import METHOD_NAME
from collections.abc import Mapping
import spack.error
import spack.util.spack_json as sjson
def _get_installed_package_names():
"""Returns names of packages installed in the active environment."""
specs = spack.environment.installed_specs()
return [spec.name for spec in specs]
def packages_with_tags(tags, installed, skip_empty):
"""
Returns a dict, indexed by tag, containing lists of names of packages
containing the tag or, if no tags, for all available tags.
Arguments:
tags (list or None): list of tags of interest or None for all
installed (bool): True if want names of packages that are installed;
otherwise, False if want all packages with the tag
skip_empty (bool): True if exclude tags with no associated packages;
otherwise, False if want entries for all tags even when no such
tagged packages
"""
tag_pkgs = collections.defaultdict(lambda: list)
spec_names = _get_installed_package_names() if installed else []
keys = spack.repo.PATH.tag_index if tags is None else tags
for tag in keys:
packages = [
name for name in spack.repo.PATH.tag_index[tag] if not installed or name in spec_names
]
if packages or not skip_empty:
tag_pkgs[tag] = packages
return tag_pkgs
class TagIndex(Mapping):
"""Maps tags to list of packages."""
def __init__(self, repository):
self._tag_dict = collections.defaultdict(list)
self.repository = repository
@property
def tags(self):
return self._tag_dict
def to_json(self, stream):
sjson.dump({"tags": self._tag_dict}, stream)
@staticmethod
def from_json(stream, repository):
d = sjson.load(stream)
if not isinstance(d, dict):
raise TagIndexError("TagIndex data was not a dict.")
if "tags" not in d:
raise TagIndexError("TagIndex data does not start with 'tags'")
r = TagIndex(repository=repository)
for tag, packages in d["tags"].items():
r[tag].extend(packages)
return r
def __getitem__(self, item):
return self._tag_dict[item]
def __iter__(self):
return iter(self._tag_dict)
def __len__(self):
return len(self._tag_dict)
def METHOD_NAME(self):
"""Return a deep copy of this index."""
clone = TagIndex(repository=self.repository)
clone._tag_dict = METHOD_NAME.deepcopy(self._tag_dict)
return clone
def get_packages(self, tag):
"""Returns all packages associated with the tag."""
return self.tags[tag] if tag in self.tags else []
def merge(self, other):
"""Merge another tag index into this one.
Args:
other (TagIndex): tag index to be merged
"""
other = other.METHOD_NAME() # defensive copy.
for tag in other.tags:
if tag not in self.tags:
self.tags[tag] = other.tags[tag]
continue
spkgs, opkgs = self.tags[tag], other.tags[tag]
self.tags[tag] = sorted(list(set(spkgs + opkgs)))
def update_package(self, pkg_name):
"""Updates a package in the tag index.
Args:
pkg_name (str): name of the package to be removed from the index
"""
pkg_cls = self.repository.get_pkg_class(pkg_name)
# Remove the package from the list of packages, if present
for pkg_list in self._tag_dict.values():
if pkg_name in pkg_list:
pkg_list.remove(pkg_name)
# Add it again under the appropriate tags
for tag in getattr(pkg_cls, "tags", []):
tag = tag.lower()
self._tag_dict[tag].append(pkg_cls.name)
class TagIndexError(spack.error.SpackError):
"""Raised when there is a problem with a TagIndex."""
|
2,974 |
readline complete
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Readline-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import readline
import six
from tensorflow.python.debug.cli import base_ui
from tensorflow.python.debug.cli import debugger_cli_common
class ReadlineUI(base_ui.BaseUI):
"""Readline-based Command-line UI."""
def __init__(self, on_ui_exit=None, config=None):
base_ui.BaseUI.__init__(self, on_ui_exit=on_ui_exit, config=config)
self._init_input()
def _init_input(self):
readline.parse_and_bind("set editing-mode emacs")
# Disable default readline delimiter in order to receive the full text
# (not just the last word) in the completer.
readline.set_completer_delims("\n")
readline.set_completer(self.METHOD_NAME)
readline.parse_and_bind("tab: complete")
self._input = six.moves.input
def METHOD_NAME(self, text, state):
context, prefix, except_last_word = self._analyze_tab_complete_input(text)
candidates, _ = self._tab_completion_registry.get_completions(context,
prefix)
candidates = [(except_last_word + candidate) for candidate in candidates]
return candidates[state]
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details."""
print(title)
if init_command is not None:
self._dispatch_command(init_command)
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
return exit_token
def _ui_loop(self):
while True:
command = self._get_user_command()
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
def _get_user_command(self):
print("")
return self._input(self.CLI_PROMPT).strip()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
print(str(e))
return
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=None)
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
print("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
print("Failed to write output to %s" % output_file_path)
def _display_output(self, screen_output):
for line in screen_output.lines:
print(line)
|
2,975 |
get subjects
|
# -*- coding: utf-8 -*-
#
# utils.py
#
# This file is part of MUESLI.
#
# Copyright (C) 2011, Matthias Kuemmerer <matthias (at) matthias-k.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import pyramid.security
from collections import defaultdict
import yaml
import jwt
from muesli.types import Term
import muesli
preferences = [\
{'penalty': 1, 'name': 'Gut'},
{'penalty': 3, 'name': 'Mittel'},
{'penalty': 10,'name': 'Schlecht'},
{'penalty': 100, 'name': 'Gar nicht'}]
penalty_names = dict([[pref['penalty'], pref['name']] for pref in preferences])
ghostpenalty = 20000
ghostcapacity = 10000
lpsolve = '/usr/bin/lp_solve'
students_unhappiness = 50
modes = [['off', 'Keine Anmeldung'],
['direct', 'Direkte Anmeldung'],
['prefs', 'Praeferenzen'],
['static', 'Weder An- noch Abmeldung']]
categories = [{'id': 'assignment', 'name': 'Übungszettel'},
{'id': 'exam', 'name': 'Klausur'},
{'id': 'practical_assignment', 'name': 'Praktische Übung'},
{'id': 'presence_assignment', 'name': 'Präsenzübung'},
{'id': 'mock_exam', 'name': 'Probeklausur'}]
class Configuration:
def __init__(self, filename):
with open(filename, 'r', encoding='utf-8') as config_file:
self.data = yaml.safe_load(config_file.read())
def __getitem__(self, key):
return self.data[key]
def get(self, key, default):
return self.data.get(key, default)
#TutorRights:
editAllTutorials = 'editAllTutorials'
editOwnTutorials = 'editOwnTutorial'
editNoTutorials = 'editNoTutorial'
tutorRights = [[editAllTutorials, 'Punkte zu allen Tutorien eintragen'],
[editOwnTutorials, 'Punkte zu eigenen Tutorien eintragen'],
[editNoTutorials, 'Keine Punkte eintragen']]
def METHOD_NAME(user=None):
hisSubjects = list(muesli.config['subjects'])
if user and not user.subject in hisSubjects:
hisSubjects.append(user.subject)
hisSubjects = list(zip(hisSubjects,hisSubjects))
return hisSubjects
def getSemesterLimit():
now = datetime.datetime.now()
semesterlimit = now.year
if now.month < 4:
semesterlimit -= 1
term = '1' if now.month>=4 and now.month <=9 else '2'
semesterlimit = '%4i%s' % (semesterlimit, term)
return semesterlimit
def getTerms():
first_term = muesli.config['terms']['first_term']
terms_per_year = muesli.config['terms']['terms_per_year']
now = datetime.datetime.now()
year = now.year
last_term = year * 10 + 11
terms = []
term = first_term
while term < last_term:
terms.append([Term(str(term)),Term(str(term))])
if term % 10 >= terms_per_year:
term = term + 11 - (term % 10)
else:
term += 1
return terms
class PermissionInfo:
def __init__(self, request):
self.request = request
def has_permission(self, permission):
return self.request.has_permission(permission, self.request.context)
class UserInfo:
def __init__(self, user):
self.user = user
def is_loggedin(self):
return self.user != None
def is_admin(self):
if self.is_loggedin():
return self.user.is_admin
else: return False
def is_assistant(self):
if self.is_loggedin():
return self.user.is_assistant
else: return False
def is_tutor(self, lecture):
if self.is_loggedin():
return self.user in lecture.tutors
else: return False
def is_tutor_of_tutorials(self, tutorials):
if self.is_loggedin():
return all(self.user == tutorial.tutor for tutorial in tutorials)
else:
return False
def listStrings(strings):
if len(strings)==0:
return ''
elif len(strings) == 1:
return strings[0]
else:
part1 = strings[:-1]
part2 = strings[-1]
return ', '.join(part1)+' und '+part2
class DictOfObjects:
def __init__(self, createFunction):
self.d = {}
self.createFunction=createFunction
def __getitem__(self, key):
if not key in self.d:
self.d[key] = self.createFunction()
return self.d[key]
def __setitem__(self, key, value):
self.d[key] = value
def __iter__(self):
return self.d.__iter__()
def __str__(self):
return "%r" % self.d
class AutoVivification(dict):
"""Implementation of perl's autovivification feature.
from: http://stackoverflow.com/q/635483"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def update(self, other):
for key in other:
if isinstance(other[key], dict):
self[key].update(other[key])
else: self[key] = other[key]
def update_available(self, other):
for key in other:
if isinstance(other[key], dict):
if key in self:
if isinstance(other[key], AutoVivification):
self[key].update_available(other[key])
else:
self[key].update(other[key])
else: self[key] = other[key]
#From http://blogs.fluidinfo.com/terry/2012/05/26/autovivification-in-python-nested-defaultdicts-with-a-specific-final-type/
def autovivify(levels=1, final=dict):
return (defaultdict(final) if levels < 2 else
defaultdict(lambda: autovivify(levels - 1, final)))
def create_jwt_token(userid, expiration=None, **claims):
payload = claims
payload['sub'] = userid
payload['iat'] = iat = datetime.datetime.utcnow()
if expiration is None:
expiration = datetime.timedelta(days=90)
payload['exp'] = iat + expiration
token = jwt.encode(payload, muesli.config["api"]["JWT_SECRET_TOKEN"], algorithm='HS512')
return token
|
2,976 |
main
|
#!/usr/bin/env python
################################################################################
#
# MODULE: g.compare.md5.py
#
# AUTHOR(S): Luca Delucchi <[email protected]>
#
# PURPOSE: Check if two GRASS maps are the same
#
# COPYRIGHT: (c) 2012-2020 by Luca Delucchi and the GRASS Development Team
#
# This program is free software under the GNU General
# Public License (>=v2). Read the file COPYING that
# comes with GRASS for details.
#
################################################################################
# %module
# % description: Checks if two GRASS GIS maps are identical.
# % keyword: general
# % keyword: map management
# % keyword: list
# %end
# %flag
# % key: g
# % description: Return output in shell script style (0 fail, 1 success)
# %end
# %flag
# % key: c
# % description: Does not consider the color table for raster
# %end
# %flag
# % key: t
# % description: Does not consider the topology for vector
# %end
# %option
# % key: ainput
# % type: string
# % gisprompt: old,file,file
# % description: Name of first map to check
# % key_desc: name
# % required : yes
# %end
# %option
# % key: binput
# % type: string
# % gisprompt: old,file,file
# % description: Name of second map to check
# % required : yes
# %end
# %option G_OPT_M_DATATYPE
# % key: type
# % options: raster,vector
# % answer: raster
# %end
import os
import sys
import hashlib
import grass.script as grass
def md5(fileName, excludeLine="", includeLine=""):
"""Compute md5 hash of the specified file"""
m = hashlib.md5()
try:
fd = open(fileName, "rb")
except IOError:
print("Unable to open the file in read mode")
return
content = fd.readlines()
fd.close()
for eachLine in content:
if excludeLine and eachLine.startswith(excludeLine):
continue
m.update(eachLine)
m.update(includeLine.encode("utf-8"))
return m.hexdigest()
def checkfile(name, formatt, shell):
"""Check if the input file exists"""
if formatt == "raster":
typ = "Raster"
inp = grass.find_file(name)
elif formatt == "vector":
typ = "Vector"
inp = grass.find_file(name, formatt)
if inp["name"] == "":
if shell:
grass.message(0)
return
else:
grass.fatal(_("%s %s does not exists" % (typ, name)))
else:
return inp
def checkmd5(a, b, shell):
"""Check if md5 is the same for both files"""
# check if the files exist and if the user have permission to read them
if os.path.exists(a) and os.path.exists(b):
if not os.access(a, os.R_OK):
if shell:
grass.message(0)
return
else:
grass.fatal(_("You have no permission to read %s file" % a))
if not os.access(b, os.R_OK):
if shell:
grass.message(0)
return
else:
grass.fatal(_("You have no permission to read %s file" % b))
# calculate the md5
amd5 = md5(a)
bmd5 = md5(b)
# check if md5 is the same
if amd5 == bmd5:
return 1
else:
return 0
# if both files doesn't exist this is good
elif not os.path.exists(a) and not os.path.exists(b):
return 1
# one file exists and the other not, this is not good result
else:
# if some files could be not exist add here other elif condition
return 0
def METHOD_NAME():
# check if we are in grass
gisbase = os.getenv("GISBASE")
if not gisbase:
grass.fatal(_("$GISBASE not defined"))
return 0
# check if shell script output is required
if flags["g"]:
shell = True
err = 0
good = 1
else:
shell = False
err = _("The two maps are different")
good = _("The two maps are identical")
# options
typ = options["type"]
ainp = checkfile(options["ainput"], typ, shell)
binp = checkfile(options["binput"], typ, shell)
variables = grass.core.gisenv()
# files to investigate to check identity
# for now color2 is ignored
raster_folder = ["cats", "cell", "cellhd", "cell_misc", "fcell", "colr", "hist"]
if flags["c"]:
raster_folder.remove("colr")
vector_folder = ["coor", "head", "topo"]
if flags["t"]:
vector_folder.remove("topo")
# path to the mapsets
aloc = os.path.join(
variables["GISDBASE"], variables["LOCATION_NAME"], ainp["mapset"]
)
bloc = os.path.join(
variables["GISDBASE"], variables["LOCATION_NAME"], binp["mapset"]
)
# variable for color table
md5color = 1
# start analysis for raster
if typ == "raster":
# for each folder
for fold in raster_folder:
# create the path to folder
apath = os.path.join(aloc, fold, ainp["name"])
bpath = os.path.join(bloc, fold, binp["name"])
# if folder is cell_misc it check the into files inside cell_misc folder
if fold == "cell_misc":
adirlist = os.listdir(apath)
bdirlist = os.listdir(bpath)
# if the files are the same check md5sum for each file
if adirlist == bdirlist:
for i in adirlist:
apath = os.path.join(apath, i)
bpath = os.path.join(bpath, i)
if not checkmd5(apath, bpath, shell):
grass.message(err)
return
# if the files are different return false
else:
grass.message(err)
return
# check md5sum for each file
else:
if not checkmd5(apath, bpath, shell):
grass.message(err)
return
grass.message(good)
return
# start analysis for vector
elif typ == "vector":
for fold in vector_folder:
apath = os.path.join(aloc, "vector", ainp["name"], fold)
bpath = os.path.join(bloc, "vector", binp["name"], fold)
if not checkmd5(apath, bpath, shell):
grass.message(err)
return
grass.message(good)
return
if __name__ == "__main__":
options, flags = grass.parser()
sys.exit(METHOD_NAME())
|
2,977 |
test narrow jsonpath node silce filter
|
from typing import Optional
import pytest
from jsonpath_ng import (
Child,
Fields,
Index,
Slice,
This,
)
from jsonpath_ng.ext import parse
from jsonpath_ng.ext.filter import (
Expression,
Filter,
)
from reconcile.utils.jsonpath import (
apply_constraint_to_path,
jsonpath_parts,
narrow_jsonpath_node,
parse_jsonpath,
remove_prefix_from_path,
sortable_jsonpath_string_repr,
)
#
# test jsonpath splitting
#
def test_jsonpath_parts():
path = parse("$.a.b.c")
assert jsonpath_parts(path) == [parse("$"), parse("a"), parse("b"), parse("c")]
def test_jsonpath_parts_root_only():
path = parse("$")
assert jsonpath_parts(path) == [parse("$")]
def test_jsonpath_parts_with_index():
path = parse("a[0]")
assert jsonpath_parts(path) == [parse("a"), Index(0)]
def test_jsonpath_parts_with_slice_all():
path = parse("a[*]")
assert jsonpath_parts(path) == [parse("a"), Slice(None, None, None)]
def test_jsonpath_parts_with_filter():
path = parse("a.b[?(@.c=='c')].d")
assert jsonpath_parts(path) == [
parse("a"),
parse("b"),
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
parse("d"),
]
def test_jsonpath_parts_with_filter_ignore():
path = parse("a.b[?(@.c=='c')].d")
assert jsonpath_parts(path, ignore_filter=True) == [
parse("a"),
parse("b"),
parse("d"),
]
def test_jsonpath_parts_ignore_root():
path = parse("$.a.b")
assert jsonpath_parts(path, ignore_root=True) == [
parse("a"),
parse("b"),
]
def test_jsonpath_parts_without_ignore_root():
path = parse("$.a.b")
assert jsonpath_parts(path) == [
parse("$"),
parse("a"),
parse("b"),
]
#
# test narrow jsonpath node
#
def test_narrow_jsonpath_node_field_equal():
assert narrow_jsonpath_node(parse("a"), parse("a")) == parse("a")
def test_narrow_jsonpath_node_field_not_equal():
assert not narrow_jsonpath_node(parse("a"), parse("b"))
def testnarrow_jsonpath_node_index_equal():
assert narrow_jsonpath_node(Index(0), Index(0)) == Index(0)
def test_narrow_jsonpath_node_index_slice():
assert narrow_jsonpath_node(Index(0), Slice(None, None, None)) == Index(0)
def test_narrow_jsonpath_node_slice_index():
assert narrow_jsonpath_node(Slice(None, None, None), Index(0)) == Index(0)
def test_narrow_jsonpath_node_slice_slice():
assert narrow_jsonpath_node(
Slice(None, None, None), Slice(None, None, None)
) == Slice(None, None, None)
def test_narrow_jsonpath_node_filter_equal():
assert narrow_jsonpath_node(
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
) == Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")])
def test_narrow_jsonpath_node_filter_not_equal():
assert (
narrow_jsonpath_node(
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
Filter(expressions=[Expression(Child(This(), Fields("d")), "==", "d")]),
)
is None
)
def test_narrow_jsonpath_node_filter_slice():
filter = Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")])
assert (
narrow_jsonpath_node(
filter,
Slice(None, None, None),
)
== filter
)
def METHOD_NAME():
filter = Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")])
assert (
narrow_jsonpath_node(
Slice(None, None, None),
filter,
)
== filter
)
def test_narrow_jsonpath_node_index_filter():
assert narrow_jsonpath_node(
Index(0),
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
) == Index(0)
def test_narrow_jsonpath_node_filter_index():
assert narrow_jsonpath_node(
Filter(expressions=[Expression(Child(This(), Fields("c")), "==", "c")]),
Index(0),
) == Index(0)
def test_narrow_jsonpath_node_field_wildcard():
assert narrow_jsonpath_node(parse("a"), parse("*")) == parse("a")
def test_narrow_jsonpath_node_wildcard_field():
assert narrow_jsonpath_node(parse("*"), parse("a")) == parse("a")
def test_narrow_jsonpath_node_wildcard_wildcard():
assert narrow_jsonpath_node(parse("*"), parse("*")) == parse("*")
#
# narrow jsonpath expression
#
def test_apply_constraint_to_path_equal():
assert apply_constraint_to_path(parse("a.b.c"), parse("a.b.c")) == parse("a.b.c")
def test_apply_constraint_to_longer_path():
assert apply_constraint_to_path(parse("a.b[*].c.f"), parse("a.b[0].c")) == parse(
"a.b[0].c.f"
)
def test_apply_constraint_to_shorter_path():
assert apply_constraint_to_path(parse("a.b[*]"), parse("a.b[0].c")) == parse(
"a.b[0]"
)
def test_apply_constraint_to_unrelated_path():
assert not apply_constraint_to_path(parse("a.b[*]"), parse("d.e[0].f"))
def test_apply_incompatible_constraint_to_path():
assert apply_constraint_to_path(parse("a.b[0].f"), parse("a.b[1].c")) == parse(
"a.b[0].f"
)
def test_apply_partially_incompatible_constraint_to_path():
assert apply_constraint_to_path(
parse("a.b[*].c[0].d"), parse("a.b[1].c[1]")
) == parse("a.b[1].c[0].d")
def test_apply_field_constraint_to_wildcard_path():
assert apply_constraint_to_path(parse("a.*.c"), parse("a.b.c.d")) == parse("a.b.c")
#
# test sortable jsonpath representation
#
@pytest.mark.parametrize(
"jsonpath, sortable_jsonpath",
[
("a.b[0].c", "a.b.[00000].c"),
("a.b[10].c", "a.b.[00010].c"),
("[10]", "[00010]"),
("a.[10][*]", "a.[00010].*"),
],
)
def test_sortable_jsonpath_string_repr(jsonpath: str, sortable_jsonpath: str):
assert sortable_jsonpath_string_repr(parse(jsonpath), 5) == sortable_jsonpath
#
# test remove prefix from path
#
@pytest.mark.parametrize(
"path, prefix, expected",
[
# simple prefix
("a.b.c.d.e", "a.b.c", "d.e"),
# the path is has not the defined prefix
("a.b.c.d.e", "f.g", None),
# path and prefix are the same
("a.b.c.d.e", "a.b.c.d.e", None),
# path with index
("a.b[1].c", "a", "b[1].c"),
# path with index after the prefix
("a.b[1].c", "a.b", "[1].c"),
# prefix is root
("a.b", "$", "a.b"),
# prefix and path are root
("$", "$", None),
],
)
def test_remove_prefix_from_path(path: str, prefix: str, expected: Optional[str]):
expected_path = parse(expected) if expected else None
assert remove_prefix_from_path(parse(path), parse(prefix)) == expected_path
#
# P A R S I N G
#
@pytest.mark.parametrize(
"path, rendered",
[
# simple path for the regular parser
("a.b.c", "a.b.c"),
# this one requires the extended parser
("a[?(@.b=='b')]", "a.[?[Expression(Child(This(), Fields('b')) == 'b')]]"),
],
)
def test_parse_jsonpath(path: str, rendered: str):
assert str(parse_jsonpath(path)) == rendered
|
2,978 |
xrecover
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
# downloaded from https://github.com/pyca/ed25519
#
# Written in 2011? by Daniel J. Bernstein <[email protected]>
# 2013 by Donald Stufft <[email protected]>
# 2013 by Alex Gaynor <[email protected]>
# 2013 by Greg Price <[email protected]>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
from typing import List, NewType, Tuple
Point = NewType("Point", Tuple[int, int, int, int])
__version__ = "1.0.dev1"
b = 256
q: int = 2**255 - 19
l: int = 2**252 + 27742317777372353535851937790883648493
COORD_MASK = ~(1 + 2 + 4 + (1 << b - 1))
COORD_HIGH_BIT = 1 << b - 2
def H(m: bytes) -> bytes:
return hashlib.sha512(m).digest()
def pow2(x: int, p: int) -> int:
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z: int) -> int:
"""$= z^{-1} mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def METHOD_NAME(y: int) -> int:
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q - x
return x
By = 4 * inv(5)
Bx = METHOD_NAME(By)
B = Point((Bx % q, By % q, 1, (Bx * By) % q))
ident = Point((0, 1, 1, 0))
def edwards_add(P: Point, Q: Point) -> Point:
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1 - x1) * (y2 - x2) % q
b = (y1 + x1) * (y2 + x2) % q
c = t1 * 2 * d * t2 % q
dd = z1 * 2 * z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def edwards_double(P: Point) -> Point:
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, _) = P
a = x1 * x1 % q
b = y1 * y1 % q
c = 2 * z1 * z1 % q
# dd = -a
e = ((x1 + y1) * (x1 + y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def scalarmult(P: Point, e: int) -> Point:
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow: List[Point] = []
def make_Bpow() -> None:
P = B
for _ in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e: int) -> Point:
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y: int) -> bytes:
return y.to_bytes(b // 8, "little")
def encodepoint(P: Point) -> bytes:
(x, y, z, _) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
xbit = (x & 1) << (b - 1)
y_result = y & ~xbit # clear x bit
y_result |= xbit # set corret x bit value
return encodeint(y_result)
def decodeint(s: bytes) -> int:
return int.from_bytes(s, "little")
def decodepoint(s: bytes) -> Point:
y = decodeint(s) & ~(1 << b - 1) # y without the highest bit
x = METHOD_NAME(y)
if x & 1 != bit(s, b - 1):
x = q - x
P = Point((x, y, 1, (x * y) % q))
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
def decodecoord(s: bytes) -> int:
a = decodeint(s[: b // 8])
# clear mask bits
a &= COORD_MASK
# set high bit
a |= COORD_HIGH_BIT
return a
def bit(h: bytes, i: int) -> int:
return (h[i // 8] >> (i % 8)) & 1
def publickey_unsafe(sk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m: bytes) -> int:
return decodeint(H(m))
def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
r = Hint(h[b // 8 : b // 4] + m)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P: Point) -> bool:
(x, y, z, t) = P
return (
z % q != 0
and x * y % q == z * t % q
and (y * y - x * x - z * z - d * t * t) % q == 0
)
class SignatureMismatch(Exception):
pass
def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[: b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8 : b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, _) = P = scalarmult_B(S)
(x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))
if (
not isoncurve(P)
or not isoncurve(Q)
or (x1 * z2 - x2 * z1) % q != 0
or (y1 * z2 - y2 * z1) % q != 0
):
raise SignatureMismatch("signature does not pass verification")
|
2,979 |
gen keywords bucket init
|
#!/usr/bin/python3
#
# Copyright (C) 2021 FMSoft <https://www.fmsoft.cn>
#
# This file is a part of Purring Cat 2, a HVML parser and interpreter.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Author: Vincent Wei <https://github.com/VincentWei>
"""
Make HVML keywords table:
1. Read 'data/keywords.txt' file.
2. Generate the keywords.h and keywords.inc
"""
import argparse
def read_cfgs_fin(fin):
cfgs = {}
line_no = 1
line = fin.readline()
while line:
s = line.strip()
if s == "" or s[0] == '#':
line_no = line_no + 1
line = fin.readline()
continue
cfg = s.split()
prefix = cfg[0]
for i in range(len(cfg)):
if i > 0:
if not prefix in cfgs:
cfgs[prefix] = []
cfgs[prefix].append(cfg[i])
line = fin.readline()
return cfgs
def read_cfgs(fn):
fin = open(fn, "r")
cfgs = read_cfgs_fin(fin)
for cfg in cfgs:
cfgs[cfg] = sorted(cfgs[cfg])
fin.close()
return cfgs
def gen_PURC_KEYWORD(idx, nr, first, prefix, kw):
# generate enums: PCHVML_KEYWORD_<PREFIX>_<KEYWORD>
if idx == first:
s = "/*=*/"
else:
s = " "
return " /* %*d */ %s PCHVML_KEYWORD_%s_%s" % (len(str(nr)), idx, s, prefix.upper(), kw.upper().replace('-', '_'))
def gen_pchvml_keyword(idx, nr, first, prefix, sz, kw):
# generate cfgs: { 0, "<KEYWORD>" }
if idx == first:
s = "/*=*/"
else:
s = " "
return " /* ATOM_BUCKET_%-*s */ %s { 0, \"%s\" }" % (len(str(sz)), prefix.upper(), s, kw)
def METHOD_NAME(prefix, start, end):
# generate func_calls:
# keywords_bucket_init(keywords, start, end, ATOM_BUCKET_<PREFIX>)
return " keywords_bucket_init(keywords, %s, %s, ATOM_BUCKET_%s)" % (start, end, prefix.upper())
def process_header_fn(fout, fin, cfgs):
line_no = 1
line = fin.readline()
while line:
s = line.strip()
if s == "%%keywords%%":
idx = 0
nr = 0
for prefix in cfgs:
kws = cfgs[prefix]
nr += len(kws)
for prefix in cfgs:
kws = cfgs[prefix]
first = idx
for kw in kws:
s = gen_PURC_KEYWORD(idx, nr, first, prefix, kw)
fout.write("%s,\n" % s)
idx += 1
else:
fout.write(line)
line_no = line_no + 1
line = fin.readline()
def process_header(dst, src, cfgs):
fout = open(dst, "w")
fin = open(src, "r")
process_header_fn(fout, fin, cfgs)
fout.close()
fin.close()
def process_source_fn(fout, fin, cfgs):
line_no = 1
line = fin.readline()
while line:
s = line.strip()
if s == "%%keywords%%":
sz = 0;
for prefix in cfgs:
if sz < len(prefix):
sz = len(prefix)
idx = 0
nr = 0
for prefix in cfgs:
kws = cfgs[prefix]
nr += len(kws)
for prefix in cfgs:
kws = cfgs[prefix]
first = idx
for kw in kws:
s = gen_pchvml_keyword(idx, nr, first, prefix, sz, kw)
fout.write("%s,\n" % s)
idx += 1
elif s == "%%keywords_bucket_init%%":
start = 0
for prefix in cfgs:
kws = cfgs[prefix]
nr = len(kws)
end = start + nr;
s = METHOD_NAME(prefix, start, end)
fout.write("%s;\n" % s)
start = end
else:
fout.write(line)
line_no = line_no + 1
line = fin.readline()
def process_source(dst, src, cfgs):
fout = open(dst, "w")
fin = open(src, "r")
process_source_fn(fout, fin, cfgs)
fout.close()
fin.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generating keywords table')
parser.add_argument('--dest')
parser.add_argument('--kw_h')
parser.add_argument('--kw_inc')
parser.add_argument('--kw_foo')
parser.add_argument('--kw_txt')
parser.add_argument('--kw_h_in')
parser.add_argument('--kw_inc_in')
parser.add_argument('--without-print', action='store_true')
args = parser.parse_args()
# parser.print_help()
# print(args)
cfgs = read_cfgs(args.kw_txt)
process_header(args.kw_h, args.kw_h_in, cfgs)
process_source(args.kw_inc, args.kw_inc_in, cfgs)
|
2,980 |
map
|
import os
from functools import wraps
import bottle
import numpy as np
from yt.fields.derived_field import ValidateSpatial
from yt.utilities.lib.misc_utilities import get_color_bounds
from yt.utilities.png_writer import write_png_to_string
from yt.visualization.fixed_resolution import FixedResolutionBuffer
from yt.visualization.image_writer import apply_colormap
local_dir = os.path.dirname(__file__)
def exc_writeout(f):
import traceback
@wraps(f)
def func(*args, **kwargs):
try:
rv = f(*args, **kwargs)
return rv
except Exception:
traceback.print_exc(None, open("temp.exc", "w"))
raise
return func
class PannableMapServer:
_widget_name = "pannable_map"
def __init__(self, data, field, takelog, cmap, route_prefix=""):
self.data = data
self.ds = data.ds
self.field = field
self.cmap = cmap
bottle.route(f"{route_prefix}/map/:field/:L/:x/:y.png")(self.METHOD_NAME)
bottle.route(f"{route_prefix}/map/:field/:L/:x/:y.png")(self.METHOD_NAME)
bottle.route(f"{route_prefix}/")(self.index)
bottle.route(f"{route_prefix}/:field")(self.index)
bottle.route(f"{route_prefix}/index.html")(self.index)
bottle.route(f"{route_prefix}/list", "GET")(self.list_fields)
# This is a double-check, since we do not always mandate this for
# slices:
self.data[self.field] = self.data[self.field].astype("float64")
bottle.route(f"{route_prefix}/static/:path", "GET")(self.static)
self.takelog = takelog
self._lock = False
for unit in ["Gpc", "Mpc", "kpc", "pc"]:
v = self.ds.domain_width[0].in_units(unit).value
if v > 1:
break
self.unit = unit
self.px2unit = self.ds.domain_width[0].in_units(unit).value / 256
def lock(self):
import time
while self._lock:
time.sleep(0.01)
self._lock = True
def unlock(self):
self._lock = False
def METHOD_NAME(self, field, L, x, y):
if "," in field:
field = tuple(field.split(","))
cmap = self.cmap
dd = 1.0 / (2.0 ** (int(L)))
relx = int(x) * dd
rely = int(y) * dd
DW = self.ds.domain_right_edge - self.ds.domain_left_edge
xl = self.ds.domain_left_edge[0] + relx * DW[0]
yl = self.ds.domain_left_edge[1] + rely * DW[1]
xr = xl + dd * DW[0]
yr = yl + dd * DW[1]
try:
self.lock()
w = 256 # pixels
data = self.data[field]
frb = FixedResolutionBuffer(self.data, (xl, xr, yl, yr), (w, w))
cmi, cma = get_color_bounds(
self.data["px"],
self.data["py"],
self.data["pdx"],
self.data["pdy"],
data,
self.ds.domain_left_edge[0],
self.ds.domain_right_edge[0],
self.ds.domain_left_edge[1],
self.ds.domain_right_edge[1],
dd * DW[0] / (64 * 256),
dd * DW[0],
)
finally:
self.unlock()
if self.takelog:
cmi = np.log10(cmi)
cma = np.log10(cma)
to_plot = apply_colormap(
np.log10(frb[field]), color_bounds=(cmi, cma), cmap_name=cmap
)
else:
to_plot = apply_colormap(
frb[field], color_bounds=(cmi, cma), cmap_name=cmap
)
rv = write_png_to_string(to_plot)
return rv
def index(self, field=None):
if field is not None:
self.field = field
return bottle.static_file(
"map_index.html", root=os.path.join(local_dir, "html")
)
def static(self, path):
if path[-4:].lower() in (".png", ".gif", ".jpg"):
bottle.response.headers["Content-Type"] = f"image/{path[-3:].lower()}"
elif path[-4:].lower() == ".css":
bottle.response.headers["Content-Type"] = "text/css"
elif path[-3:].lower() == ".js":
bottle.response.headers["Content-Type"] = "text/javascript"
full_path = os.path.join(os.path.join(local_dir, "html"), path)
return open(full_path).read()
def list_fields(self):
d = {}
# Add fluid fields (only gas for now)
for ftype in self.ds.fluid_types:
d[ftype] = []
for f in self.ds.derived_field_list:
if f[0] != ftype:
continue
# Discard fields which need ghost zones for now
df = self.ds.field_info[f]
if any(isinstance(v, ValidateSpatial) for v in df.validators):
continue
# Discard cutting plane fields
if "cutting" in f[1]:
continue
active = f[1] == self.field
d[ftype].append((f, active))
print(self.px2unit, self.unit)
return {
"data": d,
"px2unit": self.px2unit,
"unit": self.unit,
"active": self.field,
}
|
2,981 |
test import metadata languages
|
from django.db import connection
from django.db.models import Q
from django.urls import reverse
from le_utils.constants import content_kinds
from rest_framework.test import APITestCase
from kolibri.core.content import base_models
from kolibri.core.content import models as content
from kolibri.core.content.constants.schema_versions import CONTENT_SCHEMA_VERSION
from kolibri.core.content.test.test_channel_upgrade import ChannelBuilder
class ImportMetadataTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.builder = ChannelBuilder()
cls.builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
cls.root = content.ContentNode.objects.get(id=cls.builder.root_node["id"])
cls.node = cls.root.get_descendants().exclude(kind=content_kinds.TOPIC).first()
cls.all_nodes = cls.node.get_ancestors(include_self=True)
cls.files = content.File.objects.filter(contentnode__in=cls.all_nodes)
cls.assessmentmetadata = content.AssessmentMetaData.objects.filter(
contentnode__in=cls.all_nodes
)
cls.localfiles = content.LocalFile.objects.filter(
files__in=cls.files
).distinct()
cls.languages = content.Language.objects.filter(
Q(id__in=cls.files.values_list("lang_id", flat=True))
| Q(id__in=cls.all_nodes.values_list("lang_id", flat=True))
)
cls.through_tags = content.ContentNode.tags.through.objects.filter(
contentnode__in=cls.all_nodes
)
cls.tags = content.ContentTag.objects.filter(
id__in=cls.through_tags.values_list("contenttag_id", flat=True)
).distinct()
def _assert_data(self, Model, queryset):
response = self.client.get(
reverse("kolibri:core:importmetadata-detail", kwargs={"pk": self.node.id})
)
fields = Model._meta.fields
BaseModel = getattr(base_models, Model.__name__, Model)
field_names = {field.column for field in BaseModel._meta.fields}
if hasattr(BaseModel, "_mptt_meta"):
field_names.add(BaseModel._mptt_meta.parent_attr)
field_names.add(BaseModel._mptt_meta.tree_id_attr)
field_names.add(BaseModel._mptt_meta.left_attr)
field_names.add(BaseModel._mptt_meta.right_attr)
field_names.add(BaseModel._mptt_meta.level_attr)
for response_data, obj in zip(response.data[Model._meta.db_table], queryset):
# Ensure that we are not returning any empty objects
self.assertNotEqual(response_data, {})
for field in fields:
if field.column in field_names:
value = response_data[field.column]
if hasattr(field, "from_db_value"):
value = field.from_db_value(value, None, connection, None)
self.assertEqual(value, getattr(obj, field.column))
def test_import_metadata_nodes(self):
self._assert_data(content.ContentNode, self.all_nodes)
def test_import_metadata_files(self):
self._assert_data(content.File, self.files)
def test_import_metadata_assessmentmetadata(self):
self._assert_data(content.AssessmentMetaData, self.assessmentmetadata)
def test_import_metadata_localfiles(self):
self._assert_data(content.LocalFile, self.localfiles)
def METHOD_NAME(self):
self._assert_data(content.Language, self.languages)
def test_import_metadata_through_tags(self):
self._assert_data(content.ContentNode.tags.through, self.through_tags)
def test_import_metadata_tags(self):
self._assert_data(content.ContentTag, self.tags)
def test_schema_version_too_low(self):
response = self.client.get(
reverse("kolibri:core:importmetadata-detail", kwargs={"pk": self.node.id})
+ "?schema_version=1"
)
self.assertEqual(response.status_code, 400)
def test_schema_version_too_high(self):
response = self.client.get(
reverse("kolibri:core:importmetadata-detail", kwargs={"pk": self.node.id})
+ "?schema_version={}".format(int(CONTENT_SCHEMA_VERSION) + 1)
)
self.assertEqual(response.status_code, 400)
def test_schema_version_just_right(self):
response = self.client.get(
reverse("kolibri:core:importmetadata-detail", kwargs={"pk": self.node.id})
+ "?schema_version={}".format(CONTENT_SCHEMA_VERSION)
)
self.assertEqual(response.status_code, 200)
|
2,982 |
list connected cluster user credential output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ListConnectedClusterUserCredentialResult',
'AwaitableListConnectedClusterUserCredentialResult',
'list_connected_cluster_user_credential',
'list_connected_cluster_user_credential_output',
]
@pulumi.output_type
class ListConnectedClusterUserCredentialResult:
"""
The list of credential result response.
"""
def __init__(__self__, hybrid_connection_config=None, kubeconfigs=None):
if hybrid_connection_config and not isinstance(hybrid_connection_config, dict):
raise TypeError("Expected argument 'hybrid_connection_config' to be a dict")
pulumi.set(__self__, "hybrid_connection_config", hybrid_connection_config)
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter(name="hybridConnectionConfig")
def hybrid_connection_config(self) -> 'outputs.HybridConnectionConfigResponse':
"""
Contains the REP (rendezvous endpoint) and “Sender” access token.
"""
return pulumi.get(self, "hybrid_connection_config")
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponse']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListConnectedClusterUserCredentialResult(ListConnectedClusterUserCredentialResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListConnectedClusterUserCredentialResult(
hybrid_connection_config=self.hybrid_connection_config,
kubeconfigs=self.kubeconfigs)
def list_connected_cluster_user_credential(authentication_method: Optional[Union[str, 'AuthenticationMethod']] = None,
client_proxy: Optional[bool] = None,
cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListConnectedClusterUserCredentialResult:
"""
Gets cluster user credentials of the connected cluster with a specified resource group and name.
:param Union[str, 'AuthenticationMethod'] authentication_method: The mode of client authentication.
:param bool client_proxy: Boolean value to indicate whether the request is for client side proxy or not
:param str cluster_name: The name of the Kubernetes cluster on which get is called.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['authenticationMethod'] = authentication_method
__args__['clientProxy'] = client_proxy
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:kubernetes/v20220501preview:listConnectedClusterUserCredential', __args__, opts=opts, typ=ListConnectedClusterUserCredentialResult).value
return AwaitableListConnectedClusterUserCredentialResult(
hybrid_connection_config=pulumi.get(__ret__, 'hybrid_connection_config'),
kubeconfigs=pulumi.get(__ret__, 'kubeconfigs'))
@_utilities.lift_output_func(list_connected_cluster_user_credential)
def METHOD_NAME(authentication_method: Optional[pulumi.Input[Union[str, 'AuthenticationMethod']]] = None,
client_proxy: Optional[pulumi.Input[bool]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListConnectedClusterUserCredentialResult]:
"""
Gets cluster user credentials of the connected cluster with a specified resource group and name.
:param Union[str, 'AuthenticationMethod'] authentication_method: The mode of client authentication.
:param bool client_proxy: Boolean value to indicate whether the request is for client side proxy or not
:param str cluster_name: The name of the Kubernetes cluster on which get is called.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
2,983 |
test dataset not present
|
from http import HTTPStatus
import io
from pathlib import Path
from typing import Callable, Optional
from flask import Response
import pytest
import requests
from pbench.server import JSONOBJECT
from pbench.server.cache_manager import CacheManager, CacheType, Inventory
from pbench.server.database.models.datasets import Dataset, DatasetNotFound
class TestDatasetsAccess:
@pytest.fixture()
def query_get_as(self, client, server_config, more_datasets, pbench_drb_token):
"""
Helper fixture to perform the API query and validate an expected
return status.
Args:
client: Flask test API client fixture
server_config: Pbench config fixture
more_datasets: Dataset construction fixture
pbench_drb_token: Authenticated user token fixture
"""
def query_api(
dataset: str, target: str, expected_status: HTTPStatus
) -> requests.Response:
try:
dataset_id = Dataset.query(name=dataset).resource_id
except DatasetNotFound:
dataset_id = dataset # Allow passing deliberately bad value
headers = {"authorization": f"bearer {pbench_drb_token}"}
response = client.get(
f"{server_config.rest_uri}/datasets/{dataset_id}/inventory/{target}",
headers=headers,
)
assert response.status_code == expected_status
return response
return query_api
def test_get_no_dataset(self, query_get_as):
response = query_get_as(
"nonexistent-dataset", "metadata.log", HTTPStatus.NOT_FOUND
)
assert response.json == {"message": "Dataset 'nonexistent-dataset' not found"}
def METHOD_NAME(self, query_get_as):
response = query_get_as("fio_2", "metadata.log", HTTPStatus.NOT_FOUND)
assert response.json == {
"message": "The dataset tarball named 'random_md5_string4' is not found"
}
def test_unauthorized_access(self, query_get_as):
response = query_get_as("test", "metadata.log", HTTPStatus.FORBIDDEN)
assert response.json == {
"message": "User drb is not authorized to READ a resource owned by test with private access"
}
@pytest.mark.parametrize("key", (None, "", "subdir1"))
def test_path_is_directory(self, query_get_as, monkeypatch, key):
def mock_get_inventory(_s, _d: str, _t: str):
return {"type": CacheType.DIRECTORY, "stream": None}
monkeypatch.setattr(CacheManager, "get_inventory", mock_get_inventory)
monkeypatch.setattr(Path, "is_file", lambda self: False)
monkeypatch.setattr(Path, "exists", lambda self: True)
response = query_get_as("fio_2", key, HTTPStatus.BAD_REQUEST)
assert response.json == {
"message": "The specified path does not refer to a regular file"
}
def test_not_a_file(self, query_get_as, monkeypatch):
def mock_get_inventory(_s, _d: str, _t: str):
return {"type": CacheType.SYMLINK, "stream": None}
monkeypatch.setattr(CacheManager, "get_inventory", mock_get_inventory)
monkeypatch.setattr(Path, "is_file", lambda self: False)
monkeypatch.setattr(Path, "exists", lambda self: False)
response = query_get_as("fio_2", "subdir1/f1_sym", HTTPStatus.BAD_REQUEST)
assert response.json == {
"message": "The specified path does not refer to a regular file"
}
def test_dataset_in_given_path(self, query_get_as, monkeypatch):
mock_close = False
def call_on_close(_c: Callable):
nonlocal mock_close
mock_close = True
mock_args: Optional[tuple[Path, JSONOBJECT]] = None
exp_stream = io.BytesIO(b"file_as_a_byte_stream")
def mock_get_inventory(_s, _d: str, _t: str):
return {
"name": "f1.json",
"type": CacheType.FILE,
"stream": Inventory(exp_stream, None),
}
response = Response()
monkeypatch.setattr(response, "call_on_close", call_on_close)
def mock_send_file(path_or_file, *args, **kwargs):
nonlocal mock_args
mock_args = (path_or_file, kwargs)
return response
monkeypatch.setattr(CacheManager, "get_inventory", mock_get_inventory)
monkeypatch.setattr(
"pbench.server.api.resources.datasets_inventory.send_file", mock_send_file
)
response = query_get_as("fio_2", "f1.json", HTTPStatus.OK)
assert response.status_code == HTTPStatus.OK
file_content, args = mock_args
assert isinstance(file_content, Inventory)
assert file_content.stream == exp_stream
assert args["as_attachment"] is False
assert args["download_name"] == "f1.json"
assert mock_close
def test_send_fail(self, query_get_as, monkeypatch):
mock_close = False
def mock_close(_s):
nonlocal mock_close
mock_close = True
exp_stream = io.BytesIO(b"file_as_a_byte_stream")
def mock_get_inventory(_s, _d: str, _t: str):
return {
"name": "f1.json",
"type": CacheType.FILE,
"stream": Inventory(exp_stream, None),
}
def mock_send_file(path_or_file, *args, **kwargs):
raise Exception("I'm failing to succeed")
monkeypatch.setattr(exp_stream, "close", mock_close)
monkeypatch.setattr(CacheManager, "get_inventory", mock_get_inventory)
monkeypatch.setattr(
"pbench.server.api.resources.datasets_inventory.send_file", mock_send_file
)
query_get_as("fio_2", "f1.json", HTTPStatus.INTERNAL_SERVER_ERROR)
assert mock_close
def test_get_inventory(self, query_get_as, monkeypatch):
exp_stream = io.BytesIO(b"file_as_a_byte_stream")
def mock_get_inventory(_s, _d: str, _t: str):
return {
"name": "f1.json",
"type": CacheType.FILE,
"stream": Inventory(exp_stream, None),
}
monkeypatch.setattr(CacheManager, "get_inventory", mock_get_inventory)
response = query_get_as("fio_2", "f1.json", HTTPStatus.OK)
assert response.status_code == HTTPStatus.OK
assert response.text == "file_as_a_byte_stream"
assert response.headers["content-type"] == "application/json"
assert response.headers["content-disposition"] == "inline; filename=f1.json"
|
2,984 |
test python
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test foreach-dataset command"""
import os.path as op
import sys
from pathlib import Path
import pytest
from datalad.api import create
from datalad.distribution.dataset import Dataset
from datalad.tests.utils_pytest import (
assert_false,
assert_greater,
assert_in,
assert_not_in,
assert_status,
eq_,
get_deeply_nested_structure,
ok_clean_git,
swallow_outputs,
with_tempfile,
)
def _without_command(results):
"""A helper to tune up results so that they lack 'command'
which is guaranteed to differ between different cmd types
"""
out = []
for r in results:
r = r.copy()
r.pop('command')
out.append(r)
return out
@with_tempfile(mkdir=True)
def check_basic_resilience(populator, path=None):
ds = populator(path)
ds.save()
kwargs = dict(recursive=True)
res_external = ds.foreach_dataset(
[sys.executable, '-c', 'from datalad.distribution.dataset import Dataset; ds=Dataset("."); print(ds.path)'],
**kwargs)
res_python = ds.foreach_dataset("ds.path", cmd_type='eval', **kwargs)
# a sample python function to pass to foreach
def get_path(ds, **kwargs):
return ds.path
res_python_func = ds.foreach_dataset(get_path, **kwargs)
assert_status('ok', res_external)
assert_status('ok', res_python)
# consistency checks
eq_(len(res_external), len(res_python))
eq_(len(res_external), len(res_python_func))
eq_(_without_command(res_python), _without_command(res_python_func))
# Test correct order for bottom-up vs top-down
topdown_dss = [ds.path] + ds.subdatasets(result_xfm='paths', bottomup=False, **kwargs)
eq_(topdown_dss, [_['result'] for _ in res_python])
bottomup_dss = ds.subdatasets(result_xfm='paths', recursive=True, bottomup=True) + [ds.path]
eq_(bottomup_dss, [_['result'] for _ in ds.foreach_dataset("ds.path", bottomup=True, cmd_type='eval', **kwargs)])
# more radical example - cleanup
# Make all datasets dirty
for d in bottomup_dss:
(Path(d) / "dirt").write_text("")
res_clean = ds.foreach_dataset(['git', 'clean', '-f'], jobs=10, **kwargs)
assert_status('ok', res_clean)
# no dirt should be left
for d in bottomup_dss:
assert_false((Path(d) / "dirt").exists())
if populator is get_deeply_nested_structure:
ok_clean_git(ds.path, index_modified=[ds.pathobj / 'subds_modified'])
else:
ok_clean_git(ds.path)
@pytest.mark.parametrize("populator", [
# empty dataset
create,
# ver much not empty dataset
get_deeply_nested_structure,
])
def test_basic_resilience(populator):
check_basic_resilience(populator)
@with_tempfile(mkdir=True)
def check_python_eval(cmd, path):
ds = Dataset(path).create()
res = ds.foreach_dataset(cmd, cmd_type='eval')
eq_(len(res), 1)
expected_variables = {'ds', 'pwd', 'refds'}
eq_(expected_variables.intersection(res[0]['result']), expected_variables)
# besides expected, there could be few more ATM, +5 arbitrarily just to test
# that we are not leaking too much
assert_greater(len(expected_variables) + 5, len(res[0]['result']))
@with_tempfile(mkdir=True)
def check_python_exec(cmd, path):
ds = Dataset(path).create()
sub = ds.create('sub') # create subdataset for better coverage etc
# but exec has no result
res = ds.foreach_dataset(cmd, cmd_type='exec')
assert_not_in('result', res[0])
# but allows for more complete/interesting setups in which we could import modules etc
cmd2 = 'import os, sys; print(f"DIR: {os.linesep.join(dir())}")'
with swallow_outputs() as cmo:
res1 = ds.foreach_dataset(cmd2, output_streams='capture', cmd_type='exec')
assert_in('ds', res1[0]['stdout'])
assert_in('sys', res1[0]['stdout'])
eq_(res1[0]['stderr'], '')
# default renderer for each dataset
assert cmo.out.startswith(f'foreach-dataset(ok): {path}')
assert f'foreach-dataset(ok): {sub.path}' in cmo.out
with swallow_outputs() as cmo:
res2 = ds.foreach_dataset(cmd2, output_streams='relpath', cmd_type='exec')
# still have the same res
assert res1 == res2
# but we have "fancier" output
assert cmo.out.startswith(f'DIR: ')
# 2nd half should be identical to 1st half but with lines prefixed with sub/ path
lines = cmo.out.splitlines()
half = len(lines) // 2
assert [op.join('sub', l) for l in lines[:half]] == lines[half:]
assert 'foreach-dataset(ok)' not in cmo.out
def METHOD_NAME():
check_python_eval("dir()")
check_python_exec("dir()")
def dummy_dir(*args, **kwargs):
"""Ensure that we pass all placeholders as kwargs"""
assert not args
return kwargs
check_python_eval(dummy_dir) # direct function invocation
|
2,985 |
supportpartners
|
# -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.apps import apps
from django.contrib.auth import get_user_model
from django.db import models
class OrganisationManager(models.Manager):
def get_queryset(self):
return super(OrganisationManager, self).get_queryset().extra(
select={
'lower_name': 'lower(rsr_organisation.name)'
}
).order_by('lower_name')
class OrganisationQuerySet(models.QuerySet):
def has_location(self):
return self.filter(primary_location__isnull=False)
def partners(self, role):
"return the organisations in the queryset that are partners of type role"
return self.filter(partnerships__iati_organisation_role__exact=role).distinct()
def allpartners(self):
return self.distinct()
def fieldpartners(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.IATI_IMPLEMENTING_PARTNER)
def fundingpartners(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.IATI_FUNDING_PARTNER)
def reportingpartners(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.IATI_REPORTING_ORGANISATION)
def sponsorpartners(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.AKVO_SPONSOR_PARTNER)
def METHOD_NAME(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.IATI_ACCOUNTABLE_PARTNER)
def extendingpartners(self):
Partnership = apps.get_model('rsr.partnership')
return self.partners(Partnership.IATI_EXTENDING_PARTNER)
def supportpartners_with_projects(self):
"""return the organisations in the queryset that are support partners with published
projects, not counting archived projects"""
Partnership = apps.get_model('rsr.partnership')
Project = apps.get_model('rsr.project')
PublishingStatus = apps.get_model('rsr.publishingstatus')
return self.filter(
partnerships__iati_organisation_role=Partnership.IATI_ACCOUNTABLE_PARTNER,
partnerships__project__publishingstatus__status=PublishingStatus.STATUS_PUBLISHED,
partnerships__project__iati_status__in=Project.NOT_SUSPENDED
).distinct()
def ngos(self):
from ..organisation import ORG_TYPE_NGO
return self.filter(organisation_type__exact=ORG_TYPE_NGO)
def governmental(self):
from ..organisation import ORG_TYPE_GOV
return self.filter(organisation_type__exact=ORG_TYPE_GOV)
def commercial(self):
from ..organisation import ORG_TYPE_COM
return self.filter(organisation_type__exact=ORG_TYPE_COM)
def knowledge(self):
from ..organisation import ORG_TYPE_KNO
return self.filter(organisation_type__exact=ORG_TYPE_KNO)
def all_projects(self):
"returns a queryset with all projects that has self as any kind of partner"
Project = apps.get_model('rsr.project')
return Project.objects.of_partners(self).distinct()
def users(self):
return get_user_model().objects.filter(employers__organisation__in=self).distinct()
def all_updates(self):
ProjectUpdate = apps.get_model('rsr.projectupdate')
return ProjectUpdate.objects.filter(user__organisations__in=self).distinct()
def employments(self):
Employment = apps.get_model('rsr.employment')
return Employment.objects.filter(organisation__in=self).distinct()
def content_owned_organisations(self, exclude_orgs=None):
"""Returns a list of Organisations of which these organisations are the content owner.
Includes self, is recursive.
The exclude_orgs parameter is used to avoid recursive calls that
can happen in case there are organisations that set each other as
content owned organisations.
"""
Organisation = apps.get_model('rsr.organisation')
result = set()
for org in self:
result = result | set(
org.content_owned_organisations(exclude_orgs=exclude_orgs).values_list('pk', flat=True)
)
return Organisation.objects.filter(pk__in=result).distinct()
OrgManager = OrganisationManager.from_queryset(OrganisationQuerySet)
|
2,986 |
get context data
|
"""
This module contains form views for our models that don't need custom handling.
"""
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import FieldDoesNotExist
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.generic.edit import BaseCreateView, BaseUpdateView, ModelFormMixin
from .media import MediaContextMixin
from .mixins import ModelConfirmationContextMixin, ModelTemplateResponseMixin
# pylint: disable=too-many-ancestors
class CustomModelFormMixin(
PermissionRequiredMixin,
ModelTemplateResponseMixin,
ModelFormMixin,
ModelConfirmationContextMixin,
MediaContextMixin,
):
"""
This mixin handles error messages in form views of subclasses of
:class:`~integreat_cms.cms.forms.custom_model_form.CustomModelForm`
"""
#: The suffix to append to the auto-generated candidate template name.
template_name_suffix = "_form"
def get_permission_required(self):
"""
Override this method to override the permission_required attribute.
:return: The permissions that are required for views inheriting from this Mixin
:rtype: ~collections.abc.Iterable
"""
# If the form is submitted via POST, require the change permission
if self.request.method == "POST":
return (f"cms.change_{self.model._meta.model_name}",)
# If the form is just retrieved, require the view permission
return (f"cms.view_{self.model._meta.model_name}",)
@property
def model(self):
"""
Return the model class of this form mixin
:return: The corresponding Django model
:rtype: ~django.db.models.Model
"""
return self.form_class._meta.model
def METHOD_NAME(self, **kwargs):
r"""
Returns a dictionary representing the template context
(see :meth:`~django.views.generic.base.ContextMixin.get_context_data`).
:param \**kwargs: The given keyword arguments
:type \**kwargs: dict
:return: The template context
:rtype: dict
"""
context = super().METHOD_NAME(**kwargs)
context.update({"current_menu_item": f"{self.model._meta.model_name}s"})
return context
def get_form_kwargs(self):
"""
Return the keyword arguments for instantiating the form
:return: The form kwargs
:rtype: dict
"""
kwargs = super().get_form_kwargs()
if self.request.region:
kwargs["additional_instance_attributes"] = {"region": self.request.region}
return kwargs
def get_success_url(self):
"""
Determine the URL to redirect to when the form is successfully validated
:return: The url to redirect on success
:rtype: str
"""
kwargs = {}
try:
# Check whether the model has a slug field
self.model._meta.get_field(self.slug_url_kwarg)
kwargs[self.slug_url_kwarg] = self.object.slug
except FieldDoesNotExist:
# If not, use the primary key field as fallback
kwargs[self.pk_url_kwarg] = self.object.pk
if self.request.region:
kwargs["region_slug"] = self.request.region.slug
return reverse(f"edit_{self.object._meta.model_name}", kwargs=kwargs)
def form_valid(self, form):
"""
Saves the form instance, sets the current object for the view, and redirects to :meth:`get_success_url`.
:param form: The valid form instance
:type form: ~django.forms.ModelForm
:return: A redirection to the success url
:rtype: ~django.http.HttpResponseRedirect
"""
if not form.has_changed():
# Add "no changes" messages
messages.info(self.request, _("No changes made"))
elif self.object:
messages.success(
self.request,
_('{} "{}" was successfully saved').format(
self.object._meta.verbose_name, self.object
),
)
else:
messages.success(
self.request,
_('{} "{}" was successfully created').format(
form.instance._meta.verbose_name, form.instance
),
)
return super().form_valid(form)
def form_invalid(self, form):
"""
Renders a response, providing the invalid form as context.
:param form: The invalid form instance
:type form: ~django.forms.ModelForm
:return: The rendered invalid form
:rtype: ~django.http.HttpResponse
"""
form.add_error_messages(self.request)
return super().form_invalid(form)
class CustomCreateView(CustomModelFormMixin, BaseCreateView):
"""
A view that displays a form for creating a region object, redisplaying the form with validation errors (if
there are any) and saving the object.
"""
class CustomUpdateView(CustomModelFormMixin, BaseUpdateView):
"""
A view that displays a form for editing an existing region object, redisplaying the form with validation errors
(if there are any) and saving changes to the object. This uses a form automatically generated from the object's
model class (unless a form class is manually specified).
"""
|
2,987 |
lkas tx msgs
|
#!/usr/bin/env python3
import enum
import unittest
from panda import Panda
from panda.tests.libpanda import libpanda_py
import panda.tests.safety.common as common
from panda.tests.safety.common import CANPackerPanda, MeasurementSafetyTest
from functools import partial
class SubaruMsg(enum.IntEnum):
Brake_Status = 0x13c
CruiseControl = 0x240
Throttle = 0x40
Steering_Torque = 0x119
Wheel_Speeds = 0x13a
ES_LKAS = 0x122
ES_LKAS_ANGLE = 0x124
ES_Brake = 0x220
ES_Distance = 0x221
ES_Status = 0x222
ES_DashStatus = 0x321
ES_LKAS_State = 0x322
ES_Infotainment = 0x323
ES_UDS_Request = 0x787
ES_HighBeamAssist = 0x121
ES_STATIC_1 = 0x22a
ES_STATIC_2 = 0x325
SUBARU_MAIN_BUS = 0
SUBARU_ALT_BUS = 1
SUBARU_CAM_BUS = 2
def METHOD_NAME(alt_bus, lkas_msg=SubaruMsg.ES_LKAS):
return [[lkas_msg, SUBARU_MAIN_BUS],
[SubaruMsg.ES_Distance, alt_bus],
[SubaruMsg.ES_DashStatus, SUBARU_MAIN_BUS],
[SubaruMsg.ES_LKAS_State, SUBARU_MAIN_BUS],
[SubaruMsg.ES_Infotainment, SUBARU_MAIN_BUS]]
def long_tx_msgs(alt_bus):
return [[SubaruMsg.ES_Brake, alt_bus],
[SubaruMsg.ES_Status, alt_bus]]
def gen2_long_additional_tx_msgs():
return [[SubaruMsg.ES_UDS_Request, SUBARU_CAM_BUS],
[SubaruMsg.ES_HighBeamAssist, SUBARU_MAIN_BUS],
[SubaruMsg.ES_STATIC_1, SUBARU_MAIN_BUS],
[SubaruMsg.ES_STATIC_2, SUBARU_MAIN_BUS]]
def fwd_blacklisted_addr(lkas_msg=SubaruMsg.ES_LKAS):
return {SUBARU_CAM_BUS: [lkas_msg, SubaruMsg.ES_DashStatus, SubaruMsg.ES_LKAS_State, SubaruMsg.ES_Infotainment]}
class TestSubaruSafetyBase(common.PandaSafetyTest, MeasurementSafetyTest):
FLAGS = 0
STANDSTILL_THRESHOLD = 0 # kph
RELAY_MALFUNCTION_ADDR = SubaruMsg.ES_LKAS
RELAY_MALFUNCTION_BUS = SUBARU_MAIN_BUS
FWD_BUS_LOOKUP = {SUBARU_MAIN_BUS: SUBARU_CAM_BUS, SUBARU_CAM_BUS: SUBARU_MAIN_BUS}
FWD_BLACKLISTED_ADDRS = fwd_blacklisted_addr()
MAX_RT_DELTA = 940
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 60
DRIVER_TORQUE_FACTOR = 50
ALT_MAIN_BUS = SUBARU_MAIN_BUS
ALT_CAM_BUS = SUBARU_CAM_BUS
DEG_TO_CAN = -100
INACTIVE_GAS = 1818
def setUp(self):
self.packer = CANPackerPanda("subaru_global_2017_generated")
self.safety = libpanda_py.libpanda
self.safety.set_safety_hooks(Panda.SAFETY_SUBARU, self.FLAGS)
self.safety.init_tests()
def _set_prev_torque(self, t):
self.safety.set_desired_torque_last(t)
self.safety.set_rt_torque_last(t)
# TODO: this is unused
def _torque_driver_msg(self, torque):
values = {"Steer_Torque_Sensor": torque}
return self.packer.make_can_msg_panda("Steering_Torque", 0, values)
def _speed_msg(self, speed):
values = {s: speed for s in ["FR", "FL", "RR", "RL"]}
return self.packer.make_can_msg_panda("Wheel_Speeds", self.ALT_MAIN_BUS, values)
def _angle_meas_msg(self, angle):
values = {"Steering_Angle": angle}
return self.packer.make_can_msg_panda("Steering_Torque", 0, values)
def _user_brake_msg(self, brake):
values = {"Brake": brake}
return self.packer.make_can_msg_panda("Brake_Status", self.ALT_MAIN_BUS, values)
def _user_gas_msg(self, gas):
values = {"Throttle_Pedal": gas}
return self.packer.make_can_msg_panda("Throttle", 0, values)
def _pcm_status_msg(self, enable):
values = {"Cruise_Activated": enable}
return self.packer.make_can_msg_panda("CruiseControl", self.ALT_MAIN_BUS, values)
class TestSubaruStockLongitudinalSafetyBase(TestSubaruSafetyBase):
def _cancel_msg(self, cancel, cruise_throttle=0):
values = {"Cruise_Cancel": cancel, "Cruise_Throttle": cruise_throttle}
return self.packer.make_can_msg_panda("ES_Distance", self.ALT_MAIN_BUS, values)
def test_cancel_message(self):
# test that we can only send the cancel message (ES_Distance) with inactive throttle (1818) and Cruise_Cancel=1
for cancel in [True, False]:
self._generic_limit_safety_check(partial(self._cancel_msg, cancel), self.INACTIVE_GAS, self.INACTIVE_GAS, 0, 2**12, 1, self.INACTIVE_GAS, cancel)
class TestSubaruLongitudinalSafetyBase(TestSubaruSafetyBase, common.LongitudinalGasBrakeSafetyTest):
MIN_GAS = 808
MAX_GAS = 3400
INACTIVE_GAS = 1818
MAX_POSSIBLE_GAS = 2**12
MIN_BRAKE = 0
MAX_BRAKE = 600
MAX_POSSIBLE_BRAKE = 2**16
MIN_RPM = 0
MAX_RPM = 2400
MAX_POSSIBLE_RPM = 2**12
FWD_BLACKLISTED_ADDRS = {2: [SubaruMsg.ES_LKAS, SubaruMsg.ES_Brake, SubaruMsg.ES_Distance,
SubaruMsg.ES_Status, SubaruMsg.ES_DashStatus,
SubaruMsg.ES_LKAS_State, SubaruMsg.ES_Infotainment]}
def test_rpm_safety_check(self):
self._generic_limit_safety_check(self._send_rpm_msg, self.MIN_RPM, self.MAX_RPM, 0, self.MAX_POSSIBLE_RPM, 1)
def _send_brake_msg(self, brake):
values = {"Brake_Pressure": brake}
return self.packer.make_can_msg_panda("ES_Brake", self.ALT_MAIN_BUS, values)
def _send_gas_msg(self, gas):
values = {"Cruise_Throttle": gas}
return self.packer.make_can_msg_panda("ES_Distance", self.ALT_MAIN_BUS, values)
def _send_rpm_msg(self, rpm):
values = {"Cruise_RPM": rpm}
return self.packer.make_can_msg_panda("ES_Status", self.ALT_MAIN_BUS, values)
class TestSubaruTorqueSafetyBase(TestSubaruSafetyBase, common.DriverTorqueSteeringSafetyTest):
MAX_RATE_UP = 50
MAX_RATE_DOWN = 70
MAX_TORQUE = 2047
def _torque_cmd_msg(self, torque, steer_req=1):
values = {"LKAS_Output": torque, "LKAS_Request": steer_req}
return self.packer.make_can_msg_panda("ES_LKAS", SUBARU_MAIN_BUS, values)
class TestSubaruGen1TorqueStockLongitudinalSafety(TestSubaruStockLongitudinalSafetyBase, TestSubaruTorqueSafetyBase):
FLAGS = 0
TX_MSGS = METHOD_NAME(SUBARU_MAIN_BUS)
class TestSubaruGen2TorqueSafetyBase(TestSubaruTorqueSafetyBase):
ALT_MAIN_BUS = SUBARU_ALT_BUS
ALT_CAM_BUS = SUBARU_ALT_BUS
MAX_RATE_UP = 40
MAX_RATE_DOWN = 40
MAX_TORQUE = 1000
class TestSubaruGen2TorqueStockLongitudinalSafety(TestSubaruStockLongitudinalSafetyBase, TestSubaruGen2TorqueSafetyBase):
FLAGS = Panda.FLAG_SUBARU_GEN2
TX_MSGS = METHOD_NAME(SUBARU_ALT_BUS)
class TestSubaruGen1LongitudinalSafety(TestSubaruLongitudinalSafetyBase, TestSubaruTorqueSafetyBase):
FLAGS = Panda.FLAG_SUBARU_LONG
TX_MSGS = METHOD_NAME(SUBARU_MAIN_BUS) + long_tx_msgs(SUBARU_MAIN_BUS)
class TestSubaruGen2LongitudinalSafety(TestSubaruLongitudinalSafetyBase, TestSubaruGen2TorqueSafetyBase):
FLAGS = Panda.FLAG_SUBARU_LONG | Panda.FLAG_SUBARU_GEN2
TX_MSGS = METHOD_NAME(SUBARU_ALT_BUS) + long_tx_msgs(SUBARU_ALT_BUS) + gen2_long_additional_tx_msgs()
def _rdbi_msg(self, did: int):
return b'\x03\x22' + did.to_bytes(2) + b'\x00\x00\x00\x00'
def _es_uds_msg(self, msg: bytes):
return libpanda_py.make_CANPacket(SubaruMsg.ES_UDS_Request, 2, msg)
def test_es_uds_message(self):
tester_present = b'\x02\x3E\x80\x00\x00\x00\x00\x00'
not_tester_present = b"\x03\xAA\xAA\x00\x00\x00\x00\x00"
button_did = 0x1130
# Tester present is allowed for gen2 long to keep eyesight disabled
self.assertTrue(self._tx(self._es_uds_msg(tester_present)))
# Non-Tester present is not allowed
self.assertFalse(self._tx(self._es_uds_msg(not_tester_present)))
# Only button_did is allowed to be read via UDS
for did in range(0xFFFF):
should_tx = (did == button_did)
self.assertEqual(self._tx(self._es_uds_msg(self._rdbi_msg(did))), should_tx)
# any other msg is not allowed
for sid in range(0xFF):
msg = b'\x03' + sid.to_bytes(1) + b'\x00' * 6
self.assertFalse(self._tx(self._es_uds_msg(msg)))
if __name__ == "__main__":
unittest.main()
|
2,988 |
is manageable
|
# -*- coding: utf-8 -*-
#
# This file is part of SKALE Admin
#
# Copyright (C) 2021 SKALE Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import importlib
import ipaddress
import multiprocessing
from functools import wraps
from typing import Callable, Dict, Iterable
from core.schains.firewall.types import IHostFirewallController, SChainRule
logger = logging.getLogger(__name__)
TABLE = 'filter'
CHAIN = 'INPUT'
plock = multiprocessing.Lock()
def refreshed(func: Callable) -> Callable:
@wraps(func)
def wrapper(self, *args, **kwargs):
self.refresh()
return func(self, *args, **kwargs)
return wrapper
def is_like_number(value):
if value is None:
return False
try:
int(value)
except ValueError:
return False
return True
class IptablesController(IHostFirewallController):
def __init__(self, table: str = TABLE, chain: str = CHAIN):
self.table = table
self.chain = chain
self.iptc = importlib.import_module('iptc')
self.iptc = importlib.reload(self.iptc)
def refresh(self):
with plock:
self.iptc.Table(self.table).refresh()
def add_rule(self, rule: SChainRule) -> None:
if not self.has_rule(rule):
rule_d = self.schain_rule_to_rule_d(rule)
with plock:
self.iptc.easy.insert_rule(self.table, self.chain, rule_d) # type: ignore # noqa
def remove_rule(self, rule: SChainRule) -> None:
if self.has_rule(rule):
rule_d = self.schain_rule_to_rule_d(rule)
with plock:
self.iptc.easy.delete_rule(self.table, self.chain, rule_d) # type: ignore # noqa
@classmethod
def METHOD_NAME(cls, rule_d: Dict) -> bool:
return all((
rule_d.get('protocol') == 'tcp',
rule_d.get('target') == 'ACCEPT',
is_like_number(rule_d.get('tcp', {}).get('dport'))
))
@property # type: ignore
@refreshed
def rules(self) -> Iterable[SChainRule]:
with plock:
ichain = self.iptc.Chain(self.iptc.Table(self.table), self.chain) # type: ignore # noqa
for irule in ichain.rules:
rule_d = self.iptc.easy.decode_iptc_rule(irule) # type: ignore # noqa
if self.METHOD_NAME(rule_d):
yield self.rule_d_to_schain_rule(rule_d)
@refreshed
def has_rule(self, rule: SChainRule) -> bool:
with plock:
rule_d = self.schain_rule_to_rule_d(rule)
return self.iptc.easy.has_rule(self.table, self.chain, rule_d) # type: ignore # noqa
@classmethod
def schain_rule_to_rule_d(cls, srule: SChainRule) -> Dict:
rule = {
'protocol': 'tcp',
'tcp': {'dport': str(srule.port)},
'target': 'ACCEPT'
}
if srule.first_ip is not None:
if srule.first_ip == srule.last_ip or srule.last_ip is None:
rule.update({'src': cls.to_ip_network(srule.first_ip)})
else:
rule.update({
'iprange': {
'src-range': f'{srule.first_ip}-{srule.last_ip}'
}
})
return rule
@classmethod
def rule_d_to_schain_rule(cls, rule_d: Dict) -> SChainRule:
first_ip, last_ip = None, None
iprange = rule_d.get('iprange')
src = rule_d.get('src')
if iprange:
first_ip, last_ip = iprange['src-range'].split('-')
elif src:
first_ip = rule_d['src']
first_ip = cls.from_ip_network(rule_d['src'])
port = int(rule_d['tcp']['dport'])
return SChainRule(port, first_ip, last_ip)
@classmethod
def from_ip_network(cls, ip: str) -> str:
return str(ipaddress.ip_network(ip).hosts()[0])
@classmethod
def to_ip_network(cls, ip: str) -> str:
return str(ipaddress.ip_network(ip))
|
2,989 |
create cmd
|
"""Tests the GCE NFS service."""
import json
import unittest
from unittest import mock
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gce_network
from perfkitbenchmarker.providers.gcp import gce_nfs_service
from tests import pkb_common_test_case
import six
FLAGS = flags.FLAGS
_RUN_URI = 'fb810a9b'
_PROJECT = 'bionic-baton-343'
_ZONE = 'us-west1-a'
_NET_NAME = 'gce-network'
_NFS_NAME = 'nfs-%s' % _RUN_URI
_CREATE_RES = []
_ERROR = 'error'
def METHOD_NAME(tier='STANDARD'):
return [
'create',
_NFS_NAME,
'--file-share',
'name=vol0,capacity=1024',
'--network',
'name=%s' % _NET_NAME,
'--labels',
'',
'--tier',
tier,
]
def _DescribeResult(tier='STANDARD'):
return {
'createTime': '2018-05-04T21:38:49.862374Z',
'name': 'projects/foo/locations/asia-east1-a/instances/nfs-xxxxxxxx',
'networks': [{
'ipAddresses': ['10.198.13.2'],
'network': 'default2',
'reservedIpRange': '10.198.13.0/29'
}],
'state': 'READY',
'tier': tier,
'volumes': [{
'capacityGb': '1024',
'name': 'vol0'
}]
}
def _FullGcloud(args, location):
prefix = [
'gcloud', '--quiet', '--format', 'json', '--project', _PROJECT,
'filestore', 'instances'
]
postfix = ['--location', location]
return prefix + list(args) + postfix
class GceNfsServiceTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(GceNfsServiceTest, self).setUp()
self.issue_cmd = self._CreatePatched(vm_util, 'IssueCommand')
self._SetNetwork()
FLAGS['gce_network_name'].parse(_NET_NAME)
FLAGS['project'].parse(_PROJECT)
FLAGS['run_uri'].parse(_RUN_URI)
FLAGS['gcloud_path'].parse('gcloud')
def _SetNetwork(self):
network_spec = self._CreatePatched(gce_network, 'GceNetwork')
mock_network = mock.Mock()
mock_network.network_resource.name = _NET_NAME
network_spec.GetNetworkFromNetworkSpec.return_value = mock_network
def _CreatePatched(self, module, method_name):
patcher = mock.patch.object(module, method_name)
mock_method = patcher.start()
self.addCleanup(patcher.stop)
return mock_method
def _NfsService(self, disk_size=1024, **kwargs):
for key, value in six.iteritems(kwargs):
FLAGS[key].parse(value)
spec = disk.BaseDiskSpec('test_component', disk_size=disk_size)
return gce_nfs_service.GceNfsService(spec, _ZONE)
def _SetResponses(self, *responses):
responses_as_tuples = []
for response in responses:
if response == _ERROR:
responses_as_tuples.append(('', response, 1))
else:
responses_as_tuples.append((json.dumps(response), '', 0))
self.issue_cmd.side_effect = responses_as_tuples
def assertCommandCalled(self, *args, location=_ZONE):
self.issue_cmd.assert_called_with(
_FullGcloud(args, location), raise_on_failure=False, timeout=1800)
def assertMultipleCommands(self, *cmds, location=_ZONE):
expected_calls = []
for cmd in cmds:
expected_calls.append(
mock.call(
_FullGcloud(cmd, location=location),
raise_on_failure=False,
timeout=1800))
self.assertEqual(expected_calls, self.issue_cmd.call_args_list)
def testCreate(self):
nfs = self._NfsService()
self._SetResponses(_CREATE_RES)
nfs._Create()
self.assertCommandCalled(*METHOD_NAME())
def testCreateWithErrors(self):
self._SetResponses(_ERROR, _ERROR)
with self.assertRaises(errors.Resource.RetryableCreationError):
nfs = self._NfsService()
nfs._Create()
describe_cmd = ['describe', 'nfs-fb810a9b']
self.assertMultipleCommands(METHOD_NAME(), describe_cmd)
def testCreate2TBDisk(self):
self._SetResponses(_CREATE_RES)
nfs = self._NfsService(disk_size=2048)
nfs._Create()
cmd = self.issue_cmd.call_args_list[0][0][0]
self.assertRegexpMatches(' '.join(cmd), 'capacity=2048')
def testGetRemoteAddress(self):
self._SetResponses(_DescribeResult())
nfs = self._NfsService(disk_size=2048)
self.assertEqual('10.198.13.2', nfs.GetRemoteAddress())
def testDelete(self):
self._SetResponses({})
nfs = self._NfsService()
nfs._Delete()
self.assertCommandCalled('delete', _NFS_NAME, '--async')
def testDeleteWithErrors(self):
self._SetResponses(_ERROR, _DescribeResult())
with self.assertRaises(errors.Resource.RetryableDeletionError):
nfs = self._NfsService()
nfs._Delete()
delete_cmd = ['delete', _NFS_NAME, '--async']
describe_cmd = ['describe', _NFS_NAME]
self.assertMultipleCommands(delete_cmd, describe_cmd)
def testIsReady(self):
self._SetResponses(_DescribeResult())
nfs = self._NfsService()
self.assertTrue(nfs._IsReady())
def testIsNotReady(self):
self._SetResponses({}) # missing "state"
nfs = self._NfsService()
self.assertFalse(nfs._IsReady())
if __name__ == '__main__':
unittest.main()
|
2,990 |
cmd path
|
import contextlib
import json
import os
import shutil
import sys
import tempfile
__all__ = ['MockCommand', 'assert_calls']
pkgdir = os.path.dirname(__file__)
recording_dir = None
def _make_recording_file(prefix):
"""Make a temp file for recording calls to a mocked command"""
global recording_dir
if recording_dir is None:
recording_dir = tempfile.mkdtemp()
fd, p = tempfile.mkstemp(dir=recording_dir, prefix=prefix, suffix='.json')
os.close(fd)
return p
def prepend_to_path(dir):
os.environ['PATH'] = dir + os.pathsep + os.environ.get('PATH', os.defpath)
def remove_from_path(dir):
path_dirs = os.environ['PATH'].split(os.pathsep)
path_dirs.remove(dir)
os.environ['PATH'] = os.pathsep.join(path_dirs)
_record_run = """#!{python}
import os, sys
import json
with open({recording_file!r}, 'a') as f:
json.dump({{'env': dict(os.environ),
'argv': sys.argv,
'cwd': os.getcwd()}},
f)
f.write('\\x1e') # ASCII record separator
{extra_code}
"""
_output_template = """\
sys.stdout.write({!r})
sys.stderr.write({!r})
sys.exit({!r})
"""
# TODO: Overlapping calls to the same command may interleave writes.
class MockCommand(object):
"""Context manager to mock a system command.
The mock command will be written to a directory at the front of $PATH,
taking precedence over any existing command with the same name.
The *python* parameter accepts a string of code for the command to run,
in addition to the default behaviour of recording calls to the command.
This will run with the same Python interpreter as the calling code, but in
a new process.
The *content* parameter gives extra control, by providing a script which
will run with no additions. On Unix, it should start with a shebang (e.g.
``#!/usr/bin/env python``) specifying the interpreter. On Windows, it will
always be run by the same Python interpreter as the calling code.
Calls to the command will not be recorded when content is specified.
"""
def __init__(self, name, content=None, python=''):
self.name = name
self.recording_file = _make_recording_file(prefix=name)
self.command_dir = tempfile.mkdtemp()
if content is None:
content = _record_run.format(
python=sys.executable, recording_file=self.recording_file,
extra_code=python,
)
elif python:
raise ValueError(
"Specify script content or extra code (python='...'), not both"
)
self.content = content
@classmethod
def fixed_output(cls, name, stdout='', stderr='', exit_status=0):
"""Make a mock command, producing fixed output when it is run::
t = 'Sat 24 Apr 17:11:58 BST 2021\\n'
with MockCommand.fixed_output('date', t) as mock_date:
...
The stdout & stderr strings will be written to the respective streams,
and the process will exit with the specified numeric status (the default
of 0 indicates success).
This works with the recording mechanism, so you can check what arguments
this command was called with.
"""
return cls(
name, python=_output_template.format(stdout, stderr, exit_status)
)
def _copy_exe(self):
bitness = '64' if (sys.maxsize > 2**32) else '32'
src = os.path.join(pkgdir, 'cli-%s.exe' % bitness)
dst = os.path.join(self.command_dir, self.name+'.exe')
shutil.copy(src, dst)
@property
def METHOD_NAME(self):
# Can only be used once commands_dir has been set
p = os.path.join(self.command_dir, self.name)
if os.name == 'nt':
p += '-script.py'
return p
def __enter__(self):
if os.path.isfile(self.METHOD_NAME):
raise EnvironmentError("Command %r already exists at %s" %
(self.name, self.METHOD_NAME))
with open(self.METHOD_NAME, 'w') as f:
f.write(self.content)
if os.name == 'nt':
self._copy_exe()
else:
os.chmod(self.METHOD_NAME, 0o755) # Set executable bit
prepend_to_path(self.command_dir)
return self
def __exit__(self, etype, evalue, tb):
remove_from_path(self.command_dir)
shutil.rmtree(self.command_dir, ignore_errors=True)
def get_calls(self):
"""Get a list of calls made to this mocked command.
For each time the command was run, the list will contain a dictionary
with keys argv, env and cwd.
This won't work if you used the *content* parameter to alter what
the mocked command does.
"""
if recording_dir is None:
return []
if not os.path.isfile(self.recording_file):
return []
with open(self.recording_file, 'r') as f:
# 1E is ASCII record separator, last chunk is empty
chunks = f.read().split('\x1e')[:-1]
return [json.loads(c) for c in chunks]
def assert_called(self, args=None):
"""Assert that the mock command has been called at least once.
If args is passed, also check that it was called at least once with the
given arguments (not including the command name), e.g.::
with MockCommand('rsync') as mock_rsync:
function_to_test()
mock_rsync.assert_called(['/var/log', 'backup-server:logs'])
This won't work if you used the *content* parameter to alter what
the mocked command does.
"""
calls = self.get_calls()
assert calls != [], "Command %r was not called" % self.name
if args is not None:
if not any(args == c['argv'][1:] for c in calls):
msg = ["Command %r was not called with specified args (%r)" %
(self.name, args),
"It was called with these arguments: "]
for c in calls:
msg.append(' %r' % c['argv'][1:])
raise AssertionError('\n'.join(msg))
@contextlib.contextmanager
def assert_calls(cmd, args=None):
"""Assert that a block of code runs the given command.
If args is passed, also check that it was called at least once with the
given arguments (not including the command name).
Use as a context manager, e.g.::
with assert_calls('git'):
some_function_wrapping_git()
with assert_calls('git', ['add', myfile]):
some_other_function()
"""
with MockCommand(cmd) as mc:
yield
mc.assert_called(args=args)
|
2,991 |
create conf folders
|
# --------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
# --------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
# --------------------------------------------------------------------
import os
import pathlib
import shutil
import sys
import tempfile
HOME_DIR = pathlib.Path().home()
CONF_DIR = pathlib.Path(os.environ.get("XDG_CONFIG_HOME", HOME_DIR.joinpath(".config")))
USER_INV_DIR = CONF_DIR.joinpath("invesalius")
USER_PRESET_DIR = USER_INV_DIR.joinpath("presets")
USER_LOG_DIR = USER_INV_DIR.joinpath("logs")
USER_DL_WEIGHTS = USER_INV_DIR.joinpath("deep_learning/weights/")
USER_RAYCASTING_PRESETS_DIRECTORY = USER_PRESET_DIR.joinpath("raycasting")
TEMP_DIR = tempfile.gettempdir()
USER_PLUGINS_DIRECTORY = USER_INV_DIR.joinpath("plugins")
OLD_USER_INV_DIR = HOME_DIR.joinpath(".invesalius")
OLD_USER_PRESET_DIR = OLD_USER_INV_DIR.joinpath("presets")
OLD_USER_LOG_DIR = OLD_USER_INV_DIR.joinpath("logs")
INV_TOP_DIR = pathlib.Path(__file__).parent.parent.resolve()
PLUGIN_DIRECTORY = INV_TOP_DIR.joinpath("plugins")
ICON_DIR = INV_TOP_DIR.joinpath("icons")
SAMPLE_DIR = INV_TOP_DIR.joinpath("samples")
DOC_DIR = INV_TOP_DIR.joinpath("docs")
RAYCASTING_PRESETS_DIRECTORY = INV_TOP_DIR.joinpath("presets", "raycasting")
RAYCASTING_PRESETS_COLOR_DIRECTORY = INV_TOP_DIR.joinpath(
"presets", "raycasting", "color_list"
)
MODELS_DIR = INV_TOP_DIR.joinpath("ai")
# Inside the windows executable
if hasattr(sys, "frozen") and (
sys.frozen == "windows_exe" or sys.frozen == "console_exe"
):
abs_path = INV_TOP_DIR.parent.resolve()
ICON_DIR = abs_path.joinpath("icons")
SAMPLE_DIR = INV_TOP_DIR.joinpath("samples")
DOC_DIR = INV_TOP_DIR.joinpath("docs")
RAYCASTING_PRESETS_DIRECTORY = abs_path.joinpath("presets", "raycasting")
RAYCASTING_PRESETS_COLOR_DIRECTORY = abs_path.joinpath(
"presets", "raycasting", "color_list"
)
else:
ICON_DIR = pathlib.Path(os.environ.get("INV_ICON_DIR", ICON_DIR))
SAMPLE_DIR = pathlib.Path(os.environ.get("INV_SAMPLE_DIR", SAMPLE_DIR))
DOC_DIR = pathlib.Path(os.environ.get("INV_DOC_DIR", DOC_DIR))
RAYCASTING_PRESETS_DIRECTORY = pathlib.Path(
os.environ.get("INV_RAYCASTING_PRESETS_DIR", RAYCASTING_PRESETS_DIRECTORY)
)
RAYCASTING_PRESETS_COLOR_DIRECTORY = pathlib.Path(
os.environ.get("INV_RAYCASTING_COLOR_DIR", RAYCASTING_PRESETS_COLOR_DIRECTORY)
)
# Navigation paths
OBJ_DIR = str(INV_TOP_DIR.joinpath("navigation", "objects"))
MTC_CAL_DIR = str(INV_TOP_DIR.joinpath("navigation", "mtc_files", "CalibrationFiles"))
MTC_MAR_DIR = str(INV_TOP_DIR.joinpath("navigation", "mtc_files", "Markers"))
NDI_MAR_DIR_PROBE = str(INV_TOP_DIR.joinpath("navigation", "ndi_files", "Markers", "8700340.rom"))
NDI_MAR_DIR_REF = str(INV_TOP_DIR.joinpath("navigation", "ndi_files", "Markers", "8700339.rom"))
NDI_MAR_DIR_OBJ = str(INV_TOP_DIR.joinpath("navigation", "ndi_files", "Markers", "8700338.rom"))
OPTITRACK_CAL_DIR = str(INV_TOP_DIR.joinpath("navigation", "optitrack_files", "Calibration.cal"))
OPTITRACK_USERPROFILE_DIR = str(INV_TOP_DIR.joinpath("navigation", "optitrack_files", "UserProfile.motive"))
# MAC App
if not os.path.exists(ICON_DIR):
ICON_DIR = INV_TOP_DIR.parent.parent.joinpath("icons").resolve()
SAMPLE_DIR = INV_TOP_DIR.parent.parent.joinpath("samples").resolve()
DOC_DIR = INV_TOP_DIR.parent.parent.joinpath("docs").resolve()
def METHOD_NAME():
USER_INV_DIR.mkdir(parents=True, exist_ok=True)
USER_PRESET_DIR.mkdir(parents=True, exist_ok=True)
USER_LOG_DIR.mkdir(parents=True, exist_ok=True)
USER_DL_WEIGHTS.mkdir(parents=True, exist_ok=True)
USER_PLUGINS_DIRECTORY.mkdir(parents=True, exist_ok=True)
def copy_old_files():
for f in OLD_USER_INV_DIR.glob("*"):
if f.is_file():
print(
shutil.copy(
f,
USER_INV_DIR.joinpath(
str(f).replace(str(OLD_USER_INV_DIR) + "/", "")
),
)
)
|
2,992 |
prepare for inference async
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from typing import Any, List, Optional
import numpy as np
import openvino.runtime as ov
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNode
from nncf.openvino.graph.layer_attributes import OVLayerAttributes
from nncf.openvino.graph.metatypes.groups import CONSTANT_OPERATIONS
from nncf.openvino.graph.metatypes.groups import FAKE_QUANTIZE_OPERATIONS
from nncf.openvino.graph.metatypes.groups import INPUTS_QUANTIZABLE_OPERATIONS
from nncf.openvino.graph.metatypes.groups import OPERATIONS_WITH_WEIGHTS
from nncf.openvino.graph.metatypes.groups import QUANTIZE_AGNOSTIC_OPERATIONS
from nncf.openvino.graph.metatypes.groups import SHAPEOF_OPERATIONS
from nncf.openvino.graph.metatypes.openvino_metatypes import OVConcatMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVOpMetatype
from nncf.openvino.graph.node_utils import get_bias_value
from nncf.openvino.graph.node_utils import get_weight_value
from nncf.openvino.graph.node_utils import is_node_with_bias
from nncf.quantization.algorithms.accuracy_control.backend import AccuracyControlAlgoBackend
from nncf.quantization.algorithms.accuracy_control.backend import AsyncPreparedModel
def compile_model(model: ov.Model, done_queue: multiprocessing.Queue) -> None:
compiled_model = ov.Core().compile_model(model, "CPU")
model_stream = compiled_model.export_model()
done_queue.put(model_stream)
class OVAsyncPreparedModel(AsyncPreparedModel):
def __init__(self, proc: multiprocessing.Process, done_queue: multiprocessing.Queue):
self.proc = proc
self.done_queue = done_queue
def get(self, timeout=None) -> ov.CompiledModel:
try:
model_stream = self.done_queue.get(timeout=timeout)
except multiprocessing.TimeoutError as ex:
raise TimeoutError() from ex
return ov.Core().import_model(model_stream, "CPU")
class OVAccuracyControlAlgoBackend(AccuracyControlAlgoBackend):
"""
Implementation of the `AccuracyControlAlgoBackend` for OpenVINO backend.
"""
# Metatypes
@staticmethod
def get_quantizer_metatypes() -> List[OVOpMetatype]:
return FAKE_QUANTIZE_OPERATIONS
@staticmethod
def get_const_metatypes() -> List[OVOpMetatype]:
return CONSTANT_OPERATIONS
@staticmethod
def get_quantizable_metatypes() -> List[OVOpMetatype]:
return INPUTS_QUANTIZABLE_OPERATIONS
@staticmethod
def get_quantize_agnostic_metatypes() -> List[OVOpMetatype]:
return QUANTIZE_AGNOSTIC_OPERATIONS + [OVConcatMetatype]
@staticmethod
def get_shapeof_metatypes() -> List[OVOpMetatype]:
return SHAPEOF_OPERATIONS
# Manipulations with bias value and weights
@staticmethod
def is_node_with_bias(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:
return is_node_with_bias(node, nncf_graph)
@staticmethod
def is_node_with_weight(node: NNCFNode) -> bool:
return node.metatype in OPERATIONS_WITH_WEIGHTS and isinstance(node.layer_attributes, OVLayerAttributes)
@staticmethod
def get_bias_value(node_with_bias: NNCFNode, nncf_graph: NNCFGraph, model: ov.Model) -> np.ndarray:
return get_bias_value(node_with_bias, nncf_graph, model)
@staticmethod
def get_weight_value(node_with_weight: NNCFNode, model: ov.Model, port_id: int) -> np.ndarray:
return get_weight_value(node_with_weight, model, port_id)
@staticmethod
def get_weight_tensor_port_ids(node: NNCFNode) -> List[Optional[int]]:
return node.layer_attributes.get_const_port_ids()
@staticmethod
def get_model_size(model: ov.Model) -> int:
model_size = 0
for op in model.get_ops():
if op.get_type_name() == "Constant":
model_size += op.data.nbytes
return model_size
# Preparation of model
@staticmethod
def prepare_for_inference(model: ov.Model) -> Any:
return ov.compile_model(model)
@staticmethod
def METHOD_NAME(model: ov.Model) -> Any:
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=compile_model, args=(model, queue))
p.start()
return OVAsyncPreparedModel(p, queue)
|
2,993 |
donation
|
# -*- coding: utf-8 -*-
import httpcore
httpcore.SyncHTTPTransport = httpcore.AsyncHTTPProxy
import sys
import os
import platform
#redirect the original stdout and stderr
stdout=sys.stdout
stderr=sys.stderr
sys.stdout = open(os.path.join(os.getenv("temp"), "stdout.log"), "w")
sys.stderr = open(os.path.join(os.getenv("temp"), "stderr.log"), "w")
import languageHandler
import paths
#check if TWBlue is installed
# ToDo: Remove this soon as this is done already when importing the paths module.
if os.path.exists(os.path.join(paths.app_path(), "Uninstall.exe")):
paths.mode="installed"
import psutil
import commandline
import config
import output
import logging
import application
from mysc.thread_utils import call_threaded
import fixes
import widgetUtils
import webbrowser
from wxUI import commonMessageDialogs
from logger import logger
from update import updater
stdout_temp=sys.stdout
stderr_temp=sys.stderr
#if it's a binary version
if hasattr(sys, 'frozen'):
sys.stderr = open(os.path.join(paths.logs_path(), "stderr.log"), 'w')
sys.stdout = open(os.path.join(paths.logs_path(), "stdout.log"), 'w')
else:
sys.stdout=stdout
sys.stderr=stderr
# We are running from source, let's prepare vlc module for that situation
arch="x86"
if platform.architecture()[0][:2] == "64":
arch="x64"
os.environ['PYTHON_VLC_MODULE_PATH']=os.path.abspath(os.path.join(paths.app_path(), "..", "windows-dependencies", arch))
os.environ['PYTHON_VLC_LIB_PATH']=os.path.abspath(os.path.join(paths.app_path(), "..", "windows-dependencies", arch, "libvlc.dll"))
#the final log files have been opened succesfully, let's close the temporary files
stdout_temp.close()
stderr_temp.close()
#finally, remove the temporary files. TW Blue doesn't need them anymore, and we will get more free space on the harddrive
os.remove(stdout_temp.name)
os.remove(stderr_temp.name)
import sound
log = logging.getLogger("main")
def setup():
log.debug("Starting " + application.name + " %s" % (application.version,))
config.setup()
proxy_setup()
log.debug("Using %s %s" % (platform.system(), platform.architecture()[0]))
log.debug("Application path is %s" % (paths.app_path(),))
log.debug("config path is %s" % (paths.config_path(),))
sound.setup()
languageHandler.setLanguage(config.app["app-settings"]["language"])
fixes.setup()
output.setup()
from controller import settings
from controller import mainController
from sessionmanager import sessionManager
app = widgetUtils.mainLoopObject()
check_pid()
if config.app["app-settings"]["donation_dialog_displayed"] == False:
METHOD_NAME()
if config.app['app-settings']['check_for_updates']:
updater.do_update()
sm = sessionManager.sessionManagerController()
sm.fill_list()
if len(sm.sessions) == 0:
sm.show()
else:
sm.do_ok()
if hasattr(sm.view, "destroy"):
sm.view.destroy()
del sm
r = mainController.Controller()
r.view.show()
r.do_work()
r.check_invisible_at_startup()
call_threaded(r.start)
app.run()
def proxy_setup():
if config.app["proxy"]["server"] != "" and config.app["proxy"]["type"] > 0:
log.debug("Loading proxy settings")
proxy_url = config.app["proxy"]["server"] + ":" + str(config.app["proxy"]["port"])
if config.app["proxy"]["user"] != "" and config.app["proxy"]["password"] != "":
proxy_url = config.app["proxy"]["user"] + ":" + config.app["proxy"]["password"] + "@" + proxy_url
elif config.app["proxy"]["user"] != "" and config.proxyTypes[config.app["proxy"]["type"]] in ["socks4", "socks4a"]:
proxy_url = config.app["proxy"]["user"] + "@" + proxy_url
proxy_url = config.proxyTypes[config.app["proxy"]["type"]] + "://" + proxy_url
os.environ["HTTP_PROXY"] = proxy_url
os.environ["HTTPS_PROXY"] = proxy_url
def METHOD_NAME():
dlg = commonMessageDialogs.METHOD_NAME()
if dlg == widgetUtils.YES:
webbrowser.open_new_tab(_("https://twblue.es/donate"))
config.app["app-settings"]["donation_dialog_displayed"] = True
def check_pid():
"Ensures that only one copy of the application is running at a time."
pidpath = os.path.join(os.getenv("temp"), "{}.pid".format(application.name))
if os.path.exists(pidpath):
with open(pidpath) as fin:
pid = int(fin.read())
try:
p = psutil.Process(pid=pid)
if p.is_running():
# Display warning dialog
commonMessageDialogs.common_error(_("{0} is already running. Close the other instance before starting this one. If you're sure that {0} isn't running, try deleting the file at {1}. If you're unsure of how to do this, contact the {0} developers.").format(application.name, pidpath))
sys.exit(1)
except psutil.NoSuchProcess:
commonMessageDialogs.dead_pid()
# Write the new PID
with open(pidpath,"w") as cam:
cam.write(str(os.getpid()))
setup()
|
2,994 |
get date time
|
# -*- coding: utf-8 -*-
"""Parser for Windows NT shell items."""
import pyfwsi
from dfdatetime import fat_date_time as dfdatetime_fat_date_time
from plaso.containers import windows_events
from plaso.helpers.windows import shell_folders
from plaso.lib import definitions
class ShellItemsParser(object):
"""Parses for Windows NT shell items."""
NAME = 'shell_items'
_PATH_ESCAPE_CHARACTERS = {'\\': '\\\\'}
_PATH_ESCAPE_CHARACTERS.update(definitions.NON_PRINTABLE_CHARACTERS)
def __init__(self, origin):
"""Initializes the parser.
Args:
origin (str): origin of the event.
"""
super(ShellItemsParser, self).__init__()
self._origin = origin
self._path_escape_characters = str.maketrans(self._PATH_ESCAPE_CHARACTERS)
self._path_segments = []
def METHOD_NAME(self, fat_date_time):
"""Retrieves the date and time from a FAT date time.
Args:
fat_date_time (int): FAT date time.
Returns:
dfdatetime.DateTimeValues: date and time or None if not set.
"""
if not fat_date_time:
return None
return dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)
def _GetSanitizedPathString(self, path):
"""Retrieves a sanitize path string.
Args:
path (str): path.
Returns:
str: sanitized path string.
"""
if not path:
return None
return path.translate(self._path_escape_characters)
def _ParseShellItem(self, parser_mediator, shell_item):
"""Parses a shell item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
shell_item (pyfwsi.item): shell item.
"""
path_segment = self._ParseShellItemPathSegment(shell_item)
self._path_segments.append(path_segment)
# TODO: generate event_data for non file_entry shell items.
if isinstance(shell_item, pyfwsi.file_entry):
event_data = windows_events.WindowsShellItemFileEntryEventData()
event_data.modification_time = self.METHOD_NAME(
shell_item.get_modification_time_as_integer())
event_data.name = self._GetSanitizedPathString(shell_item.name)
event_data.origin = self._origin
event_data.shell_item_path = self.CopyToPath()
number_of_event_data = 0
for extension_block in shell_item.extension_blocks:
if isinstance(extension_block, pyfwsi.file_entry_extension):
file_reference = extension_block.file_reference
if file_reference:
file_reference = '{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
event_data.access_time = self.METHOD_NAME(
extension_block.get_access_time_as_integer())
event_data.creation_time = self.METHOD_NAME(
extension_block.get_creation_time_as_integer())
event_data.file_reference = file_reference
event_data.localized_name = extension_block.localized_name
event_data.long_name = self._GetSanitizedPathString(
extension_block.long_name)
# TODO: change to generate an event_data for each extension block.
if (event_data.access_time or event_data.creation_time or
event_data.modification_time):
parser_mediator.ProduceEventData(event_data)
number_of_event_data += 1
# TODO: change to generate an event_data for each shell item.
if not number_of_event_data and event_data.modification_time:
parser_mediator.ProduceEventData(event_data)
def _ParseShellItemPathSegment(self, shell_item):
"""Parses a shell item path segment.
Args:
shell_item (pyfwsi.item): shell item.
Returns:
str: shell item path segment.
"""
path_segment = None
if isinstance(shell_item, pyfwsi.root_folder):
description = shell_folders.WindowsShellFoldersHelper.GetDescription(
shell_item.shell_folder_identifier)
if description:
path_segment = description
else:
path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier)
path_segment = '<{0:s}>'.format(path_segment)
elif isinstance(shell_item, pyfwsi.volume):
if shell_item.name:
path_segment = self._GetSanitizedPathString(shell_item.name)
elif shell_item.identifier:
path_segment = '{{{0:s}}}'.format(shell_item.identifier)
elif isinstance(shell_item, pyfwsi.file_entry):
long_name = ''
for extension_block in shell_item.extension_blocks:
if isinstance(extension_block, pyfwsi.file_entry_extension):
long_name = self._GetSanitizedPathString(extension_block.long_name)
if long_name:
path_segment = long_name
elif shell_item.name:
path_segment = self._GetSanitizedPathString(shell_item.name)
elif isinstance(shell_item, pyfwsi.network_location):
if shell_item.location:
path_segment = shell_item.location
if path_segment is None and shell_item.class_type == 0x00:
# TODO: check for signature 0x23febbee
pass
if path_segment is None:
path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type)
return path_segment
def CopyToPath(self):
"""Copies the shell items to a path.
Returns:
str: converted shell item list path or None.
"""
number_of_path_segments = len(self._path_segments)
if number_of_path_segments == 0:
return None
strings = [self._path_segments[0]]
number_of_path_segments -= 1
for path_segment in self._path_segments[1:]:
# Remove a trailing \ except for the last path segment.
if path_segment.endswith('\\\\') and number_of_path_segments > 1:
path_segment = path_segment[:-2]
if ((path_segment.startswith('<') and path_segment.endswith('>')) or
len(strings) == 1):
strings.append(' {0:s}'.format(path_segment))
elif path_segment.startswith('\\'):
strings.append('{0:s}'.format(path_segment))
else:
strings.append('\\\\{0:s}'.format(path_segment))
number_of_path_segments -= 1
return ''.join(strings)
def GetUpperPathSegment(self):
"""Retrieves the upper shell item path segment.
Returns:
str: shell item path segment or "N/A".
"""
if not self._path_segments:
return 'N/A'
return self._path_segments[-1]
def ParseByteStream(
self, parser_mediator, byte_stream, parent_path_segments=None,
codepage='cp1252'):
"""Parses the shell items from the byte stream.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
byte_stream (bytes): shell items data.
parent_path_segments (Optional[list[str]]): parent shell item path
segments.
codepage (Optional[str]): byte stream codepage.
"""
if parent_path_segments and isinstance(parent_path_segments, list):
self._path_segments = list(parent_path_segments)
else:
self._path_segments = []
shell_item_list = pyfwsi.item_list()
parser_mediator.AppendToParserChain(self.NAME)
try:
shell_item_list.copy_from_byte_stream(
byte_stream, ascii_codepage=codepage)
for shell_item in iter(shell_item_list.items):
self._ParseShellItem(parser_mediator, shell_item)
finally:
parser_mediator.PopFromParserChain()
|
2,995 |
canonicalize url
|
from __future__ import annotations
import random
import urllib.parse
from typing import TYPE_CHECKING, Any, Optional
from sqlalchemy.orm import Session
if TYPE_CHECKING:
from files.classes import Comment, Submission, User
Submittable = Comment | Submission
else:
Submittable = Any
def _replace_urls(url:str) -> str:
def _replace_extensions(url:str, exts:list[str]) -> str:
for ext in exts:
url = url.replace(f'.{ext}', '.webp')
return url
for rd in ("://reddit.com", "://new.reddit.com", "://www.reddit.com", "://redd.it", "://libredd.it", "://teddit.net"):
url = url.replace(rd, "://old.reddit.com")
url = url.replace("nitter.net", "twitter.com") \
.replace("old.reddit.com/gallery", "reddit.com/gallery") \
.replace("https://youtu.be/", "https://youtube.com/watch?v=") \
.replace("https://music.youtube.com/watch?v=", "https://youtube.com/watch?v=") \
.replace("https://streamable.com/", "https://streamable.com/e/") \
.replace("https://youtube.com/shorts/", "https://youtube.com/watch?v=") \
.replace("https://mobile.twitter", "https://twitter") \
.replace("https://m.facebook", "https://facebook") \
.replace("m.wikipedia.org", "wikipedia.org") \
.replace("https://m.youtube", "https://youtube") \
.replace("https://www.youtube", "https://youtube") \
.replace("https://www.twitter", "https://twitter") \
.replace("https://www.instagram", "https://instagram") \
.replace("https://www.tiktok", "https://tiktok")
if "/i.imgur.com/" in url:
url = _replace_extensions(url, ['png', 'jpg', 'jpeg'])
elif "/media.giphy.com/" in url or "/c.tenor.com/" in url:
url = _replace_extensions(url, ['gif'])
elif "/i.ibb.com/" in url:
url = _replace_extensions(url, ['png', 'jpg', 'jpeg', 'gif'])
if url.startswith("https://streamable.com/") and not url.startswith("https://streamable.com/e/"):
url = url.replace("https://streamable.com/", "https://streamable.com/e/")
return url
def _httpsify_and_remove_tracking_urls(url:str) -> urllib.parse.ParseResult:
parsed_url = urllib.parse.urlparse(url)
domain = parsed_url.netloc
is_reddit_twitter_instagram_tiktok:bool = domain in \
('old.reddit.com','twitter.com','instagram.com','tiktok.com')
if is_reddit_twitter_instagram_tiktok:
query = ""
else:
qd = urllib.parse.parse_qs(parsed_url.query)
filtered = {k: val for k, val in qd.items() if not k.startswith('utm_') and not k.startswith('ref_')}
query = urllib.parse.urlencode(filtered, doseq=True)
new_url = urllib.parse.ParseResult(
scheme="https",
netloc=parsed_url.netloc,
path=parsed_url.path,
params=parsed_url.params,
query=query,
fragment=parsed_url.fragment,
)
return new_url
def METHOD_NAME(url:str) -> str:
return _replace_urls(url)
def canonicalize_url2(url:str, *, httpsify:bool=False) -> urllib.parse.ParseResult:
url_parsed = _replace_urls(url)
if httpsify:
url_parsed = _httpsify_and_remove_tracking_urls(url)
else:
url_parsed = urllib.parse.urlparse(url)
return url_parsed
def body_displayed(target:Submittable, v:Optional[User], is_html:bool) -> str:
moderated:Optional[str] = target.visibility_state.moderated_body(
v=v,
is_blocking=getattr(target, 'is_blocking', False)
)
if moderated: return moderated
body = target.body_html if is_html else target.body
if not body: return ""
if not v: return body
body = body.replace("old.reddit.com", v.reddit)
if v.nitter and '/i/' not in body and '/retweets' not in body:
body = body.replace("www.twitter.com", "nitter.net").replace("twitter.com", "nitter.net")
return body
def execute_shadowbanned_fake_votes(db:Session, target:Submittable, v:Optional[User]):
if not target or not v: return
if not v.shadowbanned: return
if v.id != target.author_id: return
if not (86400 > target.age_seconds > 20): return
ti = max(target.age_seconds // 60, 1)
maxupvotes = min(ti, 11)
rand = random.randint(0, maxupvotes)
if target.upvotes >= rand: return
amount = random.randint(0, 3)
if amount != 1: return
if hasattr(target, 'views'):
target.views += amount*random.randint(3, 5)
target.upvotes += amount
db.add(target)
db.commit()
|
2,996 |
etag
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetNetworkGroupResult',
'AwaitableGetNetworkGroupResult',
'get_network_group',
'get_network_group_output',
]
@pulumi.output_type
class GetNetworkGroupResult:
"""
The network group resource
"""
def __init__(__self__, description=None, METHOD_NAME=None, id=None, name=None, provisioning_state=None, resource_guid=None, system_data=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the network group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the scope assignment resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Unique identifier for this resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata related to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkGroupResult(GetNetworkGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkGroupResult(
description=self.description,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
system_data=self.system_data,
type=self.type)
def get_network_group(network_group_name: Optional[str] = None,
network_manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkGroupResult:
"""
Gets the specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkGroupName'] = network_group_name
__args__['networkManagerName'] = network_manager_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230201:getNetworkGroup', __args__, opts=opts, typ=GetNetworkGroupResult).value
return AwaitableGetNetworkGroupResult(
description=pulumi.get(__ret__, 'description'),
METHOD_NAME=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
resource_guid=pulumi.get(__ret__, 'resource_guid'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_network_group)
def get_network_group_output(network_group_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkGroupResult]:
"""
Gets the specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
"""
...
|
2,997 |
run
|
"""Autograder runner for R assignments"""
import copy
import nbformat as nbf
import os
import re
import tempfile
import yaml
from glob import glob
from nbconvert.exporters import ScriptExporter
from rpy2.robjects.packages import importr
from .abstract_runner import AbstractLanguageRunner
from ..utils import OtterRuntimeError
from ....export import export_notebook
from ....generate.token import APIClient
from ....test_files import GradingResults
from ....utils import chdir, get_source, knit_rmd_file, NBFORMAT_VERSION
R_PACKAGES = {
"knitr": importr("knitr"),
"ottr": importr("ottr"),
}
RMD_YAML_REGEX = r"^\n*---\n([\s\S]+?)\n---"
class RRunner(AbstractLanguageRunner):
subm_path_deletion_required = False
"""whether the submission path needs to be deleted (because it was created with tempfile)"""
def validate_submission(self, submission_path):
assignment_name = False
ext = os.path.splitext(submission_path)[1].lower()
if ext == ".ipynb":
nb = nbf.read(submission_path, as_version=nbf.NO_CONVERT)
assignment_name = self.get_notebook_assignment_name(nb)
elif ext == ".rmd":
assignment_name = None
with open(submission_path) as f:
rmd = f.read()
config = re.match(RMD_YAML_REGEX, rmd)
if config:
config = config.group(1)
assignment_name = yaml.full_load(config).get("assignment_name", None)
if assignment_name is not False:
self.validate_assignment_name(assignment_name)
def filter_cells_with_syntax_errors(self, nb):
"""
Filter out cells in an R notebook with syntax errors.
"""
new_cells = []
for cell in nb["cells"]:
if cell["cell_type"] == "code":
source = "\n".join(get_source(cell))
valid_syntax = R_PACKAGES["ottr"].valid_syntax(source)[0]
if valid_syntax:
new_cells.append(cell)
nb = copy.deepcopy(nb)
nb["cells"] = new_cells
return nb
def add_seeds_to_rmd_file(self, rmd_path):
"""
Add intercell seeding to an Rmd file.
"""
with open(rmd_path) as f:
rmd = f.read()
lines = rmd.split("\n")
insertions = []
for i, line in enumerate(lines):
if line.startswith("```{r"):
insertions.append(i)
seed = f"set.seed({self.ag_config.seed})"
if self.ag_config.seed_variable:
seed = f"{self.ag_config.seed_variable} = {self.ag_config.seed}"
for i in insertions[::-1]:
lines.insert(i + 1, seed)
with open(rmd_path, "w") as f:
f.write("\n".join(lines))
def add_seed_to_script(self, script_path):
"""
Add a line calling ``set.seed`` to the top of the R script at the specified path.
"""
with open(script_path) as f:
script = f.read()
script = f"set.seed({self.ag_config.seed})\n" + script
with open(script_path, "w") as f:
f.write(script)
def resolve_submission_path(self):
# create a temporary file at which to write a script if necessary
_, script_path = tempfile.mkstemp(suffix=".R")
# convert IPYNB files to Rmd files
nbs = glob("*.ipynb")
if len(nbs) > 1:
raise OtterRuntimeError("More than one IPYNB file found in submission")
elif len(nbs) == 1:
nb_path = nbs[0]
self.validate_submission(nb_path)
nb = nbf.read(nb_path, as_version=NBFORMAT_VERSION)
nb = self.filter_cells_with_syntax_errors(nb)
# create the R script
script, _ = ScriptExporter().from_notebook_node(nb)
with open(script_path, "w") as f:
f.write(script)
self.subm_path_deletion_required = True
return script_path
# convert Rmd files to R files
rmds = glob("*.Rmd")
if len(rmds) > 1:
raise OtterRuntimeError("More than one Rmd file found in submission")
elif len(rmds) == 1:
rmd_path = rmds[0]
self.validate_submission(rmd_path)
# add seeds
if self.ag_config.seed is not None:
self.add_seeds_to_rmd_file(rmd_path)
# create the R script
rmd_path = os.path.abspath(rmd_path)
R_PACKAGES["knitr"].purl(rmd_path, script_path)
self.subm_path_deletion_required = True
return script_path
os.remove(script_path)
# get the R script
scripts = glob("*.[Rr]")
if len(scripts) > 1:
raise OtterRuntimeError("More than one R script found in submission")
elif len(scripts) == 0:
raise OtterRuntimeError("No gradable files found in submission")
if self.ag_config.seed is not None:
self.add_seed_to_script(scripts[0])
return scripts[0]
def write_pdf(self, _):
# NOTE: this method ignores the submission_path argument, and instead resolves it again
# manually
# TODO: de-deduplicate this path resolution logic with resolve_submission_path
nbs = glob("*.ipynb")
if nbs:
subm_path = nbs[0]
ipynb = True
else:
rmds = glob("*.Rmd")
if rmds:
subm_path = rmds[0]
ipynb = False
else:
raise OtterRuntimeError("Could not find a file that can be converted to a PDF")
pdf_path = os.path.splitext(subm_path)[0] + ".pdf"
if ipynb:
export_notebook(
subm_path, dest=pdf_path, filtering=self.ag_config.filtering,
pagebreaks=self.ag_config.pagebreaks, exporter_type="latex")
else:
knit_rmd_file(subm_path, pdf_path)
return pdf_path
def METHOD_NAME(self):
os.environ["PATH"] = f"{self.ag_config.miniconda_path}/bin:" + os.environ.get("PATH")
with chdir("./submission"):
if self.ag_config.token is not None:
client = APIClient(token=self.ag_config.token)
generate_pdf = True
has_token = True
else:
generate_pdf = self.ag_config.pdf
has_token = False
client = None
subm_path = self.resolve_submission_path()
output = R_PACKAGES["ottr"].run_autograder(
subm_path, ignore_errors = not self.ag_config.debug, test_dir = "./tests")[0]
scores = GradingResults.from_ottr_json(output)
if generate_pdf:
self.write_and_maybe_submit_pdf(client, None, has_token, scores)
# delete the script if necessary
if self.subm_path_deletion_required:
os.remove(subm_path)
self.subm_path_deletion_required = False
return scores
|
2,998 |
update gui
|
import json
import rospy
import cv2
import sys
import base64
import threading
import time
import numpy as np
from datetime import datetime
from websocket_server import WebsocketServer
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host, hal):
t = threading.Thread(target=self.run_server)
self.payload = {'imageL': '', 'imageC': '','imageR': '', 'v': '', 'w': ''}
self.server = None
self.client = None
self.host = host
# Images variable
self.imageL_to_be_shown = None
self.imageC_to_be_shown = None
self.imageR_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
# Get HAL variables
self.v = None
self.w = None
self.pose3d = None
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
# Get HAL Object
self.hal = hal
t.start()
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
imageL_to_be_shown = self.imageL_to_be_shown
imageC_to_be_shown = self.imageC_to_be_shown
imageR_to_be_shown = self.imageR_to_be_shown
self.image_show_lock.release()
imageL = imageL_to_be_shown
payloadL = {'img': ''}
imageC = imageC_to_be_shown
payloadC = {'img': ''}
imageR = imageR_to_be_shown
payloadR = {'img': ''}
if(image_to_be_shown_updated == False):
return payloadL, payloadC, payloadR
imageL = cv2.resize(imageL, (0, 0), fx=0.50, fy=0.50)
frameL = cv2.imencode('.JPEG', imageL)[1]
encoded_imageL = base64.b64encode(frameL)
payloadL['img'] = encoded_imageL.decode('utf-8')
imageC = cv2.resize(imageC, (0, 0), fx=0.50, fy=0.50)
frameC = cv2.imencode('.JPEG', imageC)[1]
encoded_imageC = base64.b64encode(frameC)
payloadC['img'] = encoded_imageC.decode('utf-8')
imageR = cv2.resize(imageR, (0, 0), fx=0.50, fy=0.50)
frameR = cv2.imencode('.JPEG', imageR)[1]
encoded_imageR = base64.b64encode(frameR)
payloadR['img'] = encoded_imageR.decode('utf-8')
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payloadL, payloadC, payloadR
# Function for student to call
def showImages(self, imageL, imageC, imageR):
if (np.all(self.imageL_to_be_shown == imageL) == False or np.all(self.imageC_to_be_shown == imageC) == False or np.all(self.imageR_to_be_shown == imageR) == False):
self.image_show_lock.acquire()
self.imageL_to_be_shown = imageL
self.imageC_to_be_shown = imageC
self.imageR_to_be_shown = imageR
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# Update the gui
def METHOD_NAME(self):
payloadL, payloadC, payloadR = self.payloadImage()
self.payload["imageL"] = json.dumps(payloadL)
self.payload["imageC"] = json.dumps(payloadC)
self.payload["imageR"] = json.dumps(payloadR)
self.payload["v"] = json.dumps(self.v)
self.payload["w"] = json.dumps(self.w)
# Payload Point Message
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if (message[:4] == "#ack"):
self.set_acknowledge(True)
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
logged = False
while not logged:
try:
f = open("/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
def reset_gui(self):
# Reset Gui
print("reset")
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.run)
self.measure_thread.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def measure_thread(self):
while (self.gui.client == None):
pass
previous_time = datetime.now()
while (True):
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
def run(self):
while (self.gui.client == None):
pass
while (True):
start_time = datetime.now()
self.gui.METHOD_NAME()
acknowledge_message = self.gui.get_acknowledge()
while (acknowledge_message == False):
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if (ms < self.ideal_cycle):
time.sleep((self.ideal_cycle - ms) / 1000.0)
|
2,999 |
predict proba
|
from numpy import inf, nan
from sklearn.semi_supervised import LabelPropagation as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _LabelPropagationImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def METHOD_NAME(self, X):
return self._wrapped_model.METHOD_NAME(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LabelPropagation Label Propagation classifier",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"gamma",
"n_neighbors",
"max_iter",
"tol",
"n_jobs",
],
"relevantToOptimizer": [
"kernel",
"gamma",
"n_neighbors",
"max_iter",
"tol",
],
"additionalProperties": False,
"properties": {
"kernel": {
"anyOf": [
{"enum": ["knn", "rbf"]},
{"laleType": "callable", "forOptimizer": False},
],
"default": "rbf",
"description": "String identifier for kernel function to use or the kernel function itself",
},
"gamma": {
"type": "number",
"minimumForOptimizer": 0,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 20,
"description": "Parameter for rbf kernel",
},
"n_neighbors": {
"XXX TODO XXX": "integer > 0",
"description": "Parameter for knn kernel",
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 20,
"distribution": "uniform",
"default": 7,
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "Change maximum number of iterations allowed",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"default": 0.001,
"description": "Convergence tolerance: threshold to consider the system at steady state",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
},
{
"XXX TODO XXX": "Parameter: kernel > only 'rbf' and 'knn' strings are valid inputs"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit a semi-supervised label propagation model based",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "A {n_samples by n_samples} size matrix will be created from this",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Performs inductive inference across the model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predictions for input data",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict probability for each possible outcome.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Normalized probability distributions across class labels",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.semi_supervised.LabelPropagation#sklearn-semi_supervised-labelpropagation",
"import_from": "sklearn.semi_supervised",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
LabelPropagation = make_operator(_LabelPropagationImpl, _combined_schemas)
set_docstrings(LabelPropagation)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.