id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
1,800 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateIPv6TranslatorEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateIPv6TranslatorEntry','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BackendIpv4Port(self): # Integer
return self.get_query_params().get('BackendIpv4Port')
def set_BackendIpv4Port(self, BackendIpv4Port): # Integer
self.add_query_param('BackendIpv4Port', BackendIpv4Port)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_EntryName(self): # String
return self.get_query_params().get('EntryName')
def set_EntryName(self, EntryName): # String
self.add_query_param('EntryName', EntryName)
def get_AclStatus(self): # String
return self.get_query_params().get('AclStatus')
def set_AclStatus(self, AclStatus): # String
self.add_query_param('AclStatus', AclStatus)
def get_EntryBandwidth(self): # Integer
return self.get_query_params().get('EntryBandwidth')
def set_EntryBandwidth(self, EntryBandwidth): # Integer
self.add_query_param('EntryBandwidth', EntryBandwidth)
def get_AclType(self): # String
return self.get_query_params().get('AclType')
def METHOD_NAME(self, AclType): # String
self.add_query_param('AclType', AclType)
def get_AllocateIpv6Port(self): # Integer
return self.get_query_params().get('AllocateIpv6Port')
def set_AllocateIpv6Port(self, AllocateIpv6Port): # Integer
self.add_query_param('AllocateIpv6Port', AllocateIpv6Port)
def get_EntryDescription(self): # String
return self.get_query_params().get('EntryDescription')
def set_EntryDescription(self, EntryDescription): # String
self.add_query_param('EntryDescription', EntryDescription)
def get_BackendIpv4Addr(self): # String
return self.get_query_params().get('BackendIpv4Addr')
def set_BackendIpv4Addr(self, BackendIpv4Addr): # String
self.add_query_param('BackendIpv4Addr', BackendIpv4Addr)
def get_AclId(self): # String
return self.get_query_params().get('AclId')
def set_AclId(self, AclId): # String
self.add_query_param('AclId', AclId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransProtocol(self): # String
return self.get_query_params().get('TransProtocol')
def set_TransProtocol(self, TransProtocol): # String
self.add_query_param('TransProtocol', TransProtocol)
def get_Ipv6TranslatorId(self): # String
return self.get_query_params().get('Ipv6TranslatorId')
def set_Ipv6TranslatorId(self, Ipv6TranslatorId): # String
self.add_query_param('Ipv6TranslatorId', Ipv6TranslatorId)
| null |
1,801 |
#-*- conding: utf-8 -*-
import os
import warnings
import logging
logger = logging.getLogger('RMD_READER')
from pelican import readers
from pelican import signals
from pelican import settings
KNITR = None
RMD = False
FIG_PATH = None
R_STARTED = False
def startr():
global KNITR, R_OBJECTS, R_STARTED
if R_STARTED:
return
logger.debug('STARTING R')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import rpy2.rinterface
rpy2.rinterface.set_initoptions((b'rpy2', b'--no-save', b'--vanilla', b'--quiet'))
except AttributeError:
from rpy2.rinterface_lib import embedded
embedded.set_initoptions(("rpy2", "--no-save", "--vanilla", "--quiet"))
import rpy2.robjects as R_OBJECTS
from rpy2.robjects.packages import importr
KNITR = importr('knitr')
logger.debug('R STARTED')
R_STARTED = True
def initsignal(pelicanobj):
global RMD, FIG_PATH
try:
startr()
idx = KNITR.opts_knit.names.index('set')
path = pelicanobj.settings.get('PATH','%s/content' % settings.DEFAULT_CONFIG.get('PATH'))
logger.debug("RMD_READER PATH = %s", path)
KNITR.opts_knit[idx](**{'base.dir': path})
knitroptsknit = pelicanobj.settings.get('RMD_READER_KNITR_OPTS_KNIT', None)
if knitroptsknit:
KNITR.opts_knit[idx](**{str(k): v for k,v in knitroptsknit.items()})
idx = KNITR.opts_chunk.names.index('set')
knitroptschunk = pelicanobj.settings.get('RMD_READER_KNITR_OPTS_CHUNK', None)
if knitroptschunk:
FIG_PATH = knitroptschunk['fig.path'] if 'fig.path' in knitroptschunk else 'figure/'
KNITR.opts_chunk[idx](**{str(k): v for k,v in knitroptschunk.items()})
RMD = True
except ImportError as ex:
RMD = False
class RmdReader(readers.BaseReader):
file_extensions = ['Rmd', 'rmd']
@property
def METHOD_NAME(self):
return RMD
# You need to have a read method, which takes a filename and returns
# some content and the associated metadata.
def read(self, filename):
"""Parse content and metadata of markdown files"""
QUIET = self.settings.get('RMD_READER_KNITR_QUIET', True)
ENCODING = self.settings.get('RMD_READER_KNITR_ENCODING', 'UTF-8')
CLEANUP = self.settings.get('RMD_READER_CLEANUP', True)
RENAME_PLOT = self.settings.get('RMD_READER_RENAME_PLOT', 'chunklabel')
if type(RENAME_PLOT) is bool:
logger.error("RMD_READER_RENAME_PLOT takes a string value (either chunklabel or directory), please see the readme.")
if RENAME_PLOT:
RENAME_PLOT = 'chunklabel'
logger.error("Defaulting to chunklabel")
else:
RENAME_PLOT = 'disabled'
logger.error("Disabling plot renaming")
logger.debug("RMD_READER_KNITR_QUIET = %s", QUIET)
logger.debug("RMD_READER_KNITR_ENCODING = %s", ENCODING)
logger.debug("RMD_READER_CLEANUP = %s", CLEANUP)
logger.debug("RMD_READER_RENAME_PLOT = %s", RENAME_PLOT)
# replace single backslashes with double backslashes
filename = filename.replace('\\', '\\\\')
# parse Rmd file - generate md file
md_filename = filename.replace('.Rmd', '.aux').replace('.rmd', '.aux')
if RENAME_PLOT == 'chunklabel' or RENAME_PLOT == 'directory':
if RENAME_PLOT == 'chunklabel':
chunk_label = os.path.splitext(os.path.basename(filename))[0]
logger.debug('Chunk label: %s', chunk_label)
elif RENAME_PLOT == 'directory':
chunk_label = 'unnamed-chunk'
PATH = self.settings.get('PATH','%s/content' % settings.DEFAULT_CONFIG.get('PATH'))
src_name = os.path.splitext(os.path.relpath(filename, PATH))[0]
idx = KNITR.opts_chunk.names.index('set')
knitroptschunk = { 'fig.path': '%s-' % os.path.join(FIG_PATH, src_name) }
KNITR.opts_chunk[idx](**{str(k): v for k,v in knitroptschunk.items()})
logger.debug('Figures path: %s, chunk label: %s', knitroptschunk['fig.path'], chunk_label)
R_OBJECTS.r('''
opts_knit$set(unnamed.chunk.label="{unnamed_chunk_label}")
render_markdown()
hook_plot <- knit_hooks$get('plot')
knit_hooks$set(plot=function(x, options) hook_plot(paste0("{{static}}/", x), options))
'''.format(unnamed_chunk_label=chunk_label))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
KNITR.knit(filename, md_filename, quiet=QUIET, encoding=ENCODING)
# read md file - create a MarkdownReader
md_reader = readers.MarkdownReader(self.settings)
content, metadata = md_reader.read(md_filename)
# remove md file
if CLEANUP:
os.remove(md_filename)
return content, metadata
def add_reader(readers):
readers.reader_classes['rmd'] = RmdReader
def register():
signals.readers_init.connect(add_reader)
signals.initialized.connect(initsignal)
| null |
1,802 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class InitFaceVerifyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'InitFaceVerify')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self): # String
return self.get_query_params().get('ProductCode')
def set_ProductCode(self, ProductCode): # String
self.add_query_param('ProductCode', ProductCode)
def get_FaceContrastPicture(self): # String
return self.get_body_params().get('FaceContrastPicture')
def set_FaceContrastPicture(self, FaceContrastPicture): # String
self.add_body_params('FaceContrastPicture', FaceContrastPicture)
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_CertifyId(self): # String
return self.get_query_params().get('CertifyId')
def set_CertifyId(self, CertifyId): # String
self.add_query_param('CertifyId', CertifyId)
def get_EncryptType(self): # String
return self.get_query_params().get('EncryptType')
def set_EncryptType(self, EncryptType): # String
self.add_query_param('EncryptType', EncryptType)
def get_CertNo(self): # String
return self.get_query_params().get('CertNo')
def set_CertNo(self, CertNo): # String
self.add_query_param('CertNo', CertNo)
def get_OuterOrderNo(self): # String
return self.get_query_params().get('OuterOrderNo')
def METHOD_NAME(self, OuterOrderNo): # String
self.add_query_param('OuterOrderNo', OuterOrderNo)
def get_CertType(self): # String
return self.get_query_params().get('CertType')
def set_CertType(self, CertType): # String
self.add_query_param('CertType', CertType)
def get_FaceContrastPictureUrl(self): # String
return self.get_query_params().get('FaceContrastPictureUrl')
def set_FaceContrastPictureUrl(self, FaceContrastPictureUrl): # String
self.add_query_param('FaceContrastPictureUrl', FaceContrastPictureUrl)
def get_Model(self): # String
return self.get_body_params().get('Model')
def set_Model(self, Model): # String
self.add_body_params('Model', Model)
def get_MetaInfo(self): # String
return self.get_query_params().get('MetaInfo')
def set_MetaInfo(self, MetaInfo): # String
self.add_query_param('MetaInfo', MetaInfo)
def get_OssObjectName(self): # String
return self.get_query_params().get('OssObjectName')
def set_OssObjectName(self, OssObjectName): # String
self.add_query_param('OssObjectName', OssObjectName)
def get_CertName(self): # String
return self.get_query_params().get('CertName')
def set_CertName(self, CertName): # String
self.add_query_param('CertName', CertName)
def get_Ip(self): # String
return self.get_query_params().get('Ip')
def set_Ip(self, Ip): # String
self.add_query_param('Ip', Ip)
def get_Mobile(self): # String
return self.get_query_params().get('Mobile')
def set_Mobile(self, Mobile): # String
self.add_query_param('Mobile', Mobile)
def get_AuthId(self): # String
return self.get_body_params().get('AuthId')
def set_AuthId(self, AuthId): # String
self.add_body_params('AuthId', AuthId)
def get_SceneId(self): # Long
return self.get_query_params().get('SceneId')
def set_SceneId(self, SceneId): # Long
self.add_query_param('SceneId', SceneId)
def get_OssBucketName(self): # String
return self.get_query_params().get('OssBucketName')
def set_OssBucketName(self, OssBucketName): # String
self.add_query_param('OssBucketName', OssBucketName)
def get_CallbackToken(self): # String
return self.get_query_params().get('CallbackToken')
def set_CallbackToken(self, CallbackToken): # String
self.add_query_param('CallbackToken', CallbackToken)
def get_ReturnUrl(self): # String
return self.get_query_params().get('ReturnUrl')
def set_ReturnUrl(self, ReturnUrl): # String
self.add_query_param('ReturnUrl', ReturnUrl)
def get_CallbackUrl(self): # String
return self.get_query_params().get('CallbackUrl')
def set_CallbackUrl(self, CallbackUrl): # String
self.add_query_param('CallbackUrl', CallbackUrl)
def get_Crop(self): # String
return self.get_body_params().get('Crop')
def set_Crop(self, Crop): # String
self.add_body_params('Crop', Crop)
def get_CertifyUrlType(self): # String
return self.get_query_params().get('CertifyUrlType')
def set_CertifyUrlType(self, CertifyUrlType): # String
self.add_query_param('CertifyUrlType', CertifyUrlType)
| null |
1,803 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkconfig.endpoint import endpoint_data
import json
class CreateAggregateCompliancePackRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Config', '2020-09-07', 'CreateAggregateCompliancePack')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TagKeyScope(self): # String
return self.get_body_params().get('TagKeyScope')
def set_TagKeyScope(self, TagKeyScope): # String
self.add_body_params('TagKeyScope', TagKeyScope)
def get_CompliancePackName(self): # String
return self.get_body_params().get('CompliancePackName')
def set_CompliancePackName(self, CompliancePackName): # String
self.add_body_params('CompliancePackName', CompliancePackName)
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_CompliancePackTemplateId(self): # String
return self.get_body_params().get('CompliancePackTemplateId')
def set_CompliancePackTemplateId(self, CompliancePackTemplateId): # String
self.add_body_params('CompliancePackTemplateId', CompliancePackTemplateId)
def get_Description(self): # String
return self.get_body_params().get('Description')
def set_Description(self, Description): # String
self.add_body_params('Description', Description)
def get_AggregatorId(self): # String
return self.get_body_params().get('AggregatorId')
def set_AggregatorId(self, AggregatorId): # String
self.add_body_params('AggregatorId', AggregatorId)
def get_TagValueScope(self): # String
return self.get_body_params().get('TagValueScope')
def set_TagValueScope(self, TagValueScope): # String
self.add_body_params('TagValueScope', TagValueScope)
def get_RegionIdsScope(self): # String
return self.get_body_params().get('RegionIdsScope')
def set_RegionIdsScope(self, RegionIdsScope): # String
self.add_body_params('RegionIdsScope', RegionIdsScope)
def get_DefaultEnable(self): # Boolean
return self.get_body_params().get('DefaultEnable')
def set_DefaultEnable(self, DefaultEnable): # Boolean
self.add_body_params('DefaultEnable', DefaultEnable)
def METHOD_NAME(self): # Array
return self.get_body_params().get('ConfigRules')
def set_ConfigRules(self, ConfigRules): # Array
self.add_body_params("ConfigRules", json.dumps(ConfigRules))
def get_RiskLevel(self): # Integer
return self.get_body_params().get('RiskLevel')
def set_RiskLevel(self, RiskLevel): # Integer
self.add_body_params('RiskLevel', RiskLevel)
def get_ResourceGroupIdsScope(self): # String
return self.get_body_params().get('ResourceGroupIdsScope')
def set_ResourceGroupIdsScope(self, ResourceGroupIdsScope): # String
self.add_body_params('ResourceGroupIdsScope', ResourceGroupIdsScope)
def get_ExcludeResourceIdsScope(self): # String
return self.get_body_params().get('ExcludeResourceIdsScope')
def set_ExcludeResourceIdsScope(self, ExcludeResourceIdsScope): # String
self.add_body_params('ExcludeResourceIdsScope', ExcludeResourceIdsScope)
| null |
1,804 |
from boa3_test.tests.boa_test import BoaTest # needs to be the first import to avoid circular imports
from boa3.internal.exception import CompilerError
from boa3.internal.neo3.vm import VMState
from boa3_test.test_drive.testrunner.neo_test_runner import NeoTestRunner
class TestPolicyInterop(BoaTest):
default_folder: str = 'test_sc/interop_test/policy'
def test_get_exec_fee_factor(self):
path, _ = self.get_deploy_file_paths('GetExecFeeFactor.py')
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int)
def test_get_exec_fee_too_many_parameters(self):
path = self.get_contract_path('GetExecFeeFactorTooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_get_fee_per_byte(self):
path, _ = self.get_deploy_file_paths('GetFeePerByte.py')
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int)
def test_get_fee_per_byte_too_many_parameters(self):
path = self.get_contract_path('GetFeePerByteTooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_get_storage_price(self):
path, _ = self.get_deploy_file_paths('GetStoragePrice.py')
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int)
def test_get_storage_price_too_many_parameters(self):
path = self.get_contract_path('GetStoragePriceTooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_is_blocked(self):
path, _ = self.get_deploy_file_paths('IsBlocked.py')
runner = NeoTestRunner(runner_id=self.method_name())
invokes = []
expected_results = []
invokes.append(runner.call_contract(path, 'main', bytes(20)))
expected_results.append(False)
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
for x in range(len(invokes)):
self.assertEqual(expected_results[x], invokes[x].result)
def METHOD_NAME(self):
path = self.get_contract_path('IsBlockedMismatchedTypeInt.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
path = self.get_contract_path('IsBlockedMismatchedTypeStr.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
path = self.get_contract_path('IsBlockedMismatchedTypeBool.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_is_blocked_too_many_parameters(self):
path = self.get_contract_path('IsBlockedTooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_is_blocked_too_few_parameters(self):
path = self.get_contract_path('IsBlockedTooFewArguments.py')
self.assertCompilerLogs(CompilerError.UnfilledArgument, path)
def test_import_policy(self):
path, _ = self.get_deploy_file_paths('ImportPolicy.py')
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int)
def test_import_interop_policy(self):
path, _ = self.get_deploy_file_paths('ImportInteropPolicy.py')
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int)
| null |
1,805 |
# OPENCORE - ADD
try:
from methods.regular.regular_api import *
except:
from default.methods.regular.regular_api import *
from shared.permissions.super_admin_only import Super_Admin
from shared.database.connection.connection import Connection
from shared.connection.connection_operations import Connection_Operations
@routes.route('/api/v1/connection/info/<int:connection_id>',
methods = ['GET'])
@General_permissions.grant_permission_for(['normal_user'])
def METHOD_NAME(connection_id):
"""
Permissions handled by connection_Runner
"""
with sessionMaker.session_scope() as session:
connection_operations = Connection_Operations(
session = session,
member = None,
connection_id = connection_id
)
if len(connection_operations.log["error"].keys()) >= 1:
return jsonify(log = connection_operations.log), 400
connection_operations.get_existing_connection(connection_id)
connection_operations.validate_existing_connection_id_permissions()
connection_operations.log['success'] = True
return jsonify(
log = connection_operations.log,
connection = connection_operations.connection.serialize()), 200
@routes.route('/api/v1/connection/save',
methods = ['POST'])
@General_permissions.grant_permission_for(['normal_user'])
def connection_save_api():
"""
May or may not have an ID if it's new.
I think it's safe for save to be different from running it.
metadata
meaning it's data one level removed from actual connection
ie how the connection should be structured
"""
spec_list = [
{"connection_id": {
'kind': int,
'required': False # (ie for first save)
}
},
{'connection': dict} # see connection_spec for spec
]
log, input, untrusted_input = regular_input.master(
request = request,
spec_list = spec_list)
if len(log["error"].keys()) >= 1:
return jsonify(log = log), 400
with sessionMaker.session_scope() as session:
connection_operations = Connection_Operations(
session = session,
member = None,
connection_id = input['connection_id'],
metadata = input['connection']
)
if len(connection_operations.log["error"].keys()) >= 1:
return jsonify(log = connection_operations.log), 400
connection_operations.validate_connection_permissions_scope(
permission_scope = input['connection'].get('permission_scope'),
project_string_id = input['connection'].get('project_string_id')
)
connection_operations.save()
if len(connection_operations.log["error"].keys()) >= 1:
return jsonify(log = connection_operations.log), 400
connection_operations.log['success'] = True
return jsonify(
log = connection_operations.log,
connection = connection_operations.connection.serialize()), 200
@routes.route('/api/project/<string:project_string_id>/connections', methods = ['GET'])
@Project_permissions.user_has_project(["admin", "Editor", "Viewer", "allow_if_project_is_public"])
def connection_list_api(project_string_id):
"""
security model assumes that validate_connection_permissions_permission_scope
checks it / returns forbidden if not applicable.
"""
log = regular_log.default()
with sessionMaker.session_scope() as session:
### MAIN ###
connection_operations = Connection_Operations(
session = session,
member = None
)
if len(connection_operations.log["error"].keys()) >= 1:
return jsonify(log = connection_operations.log), 400
connection_operations.validate_connection_permissions_scope(
permission_scope = "project",
project_string_id = project_string_id,
)
# Curious if we want a connection "grouping" concept or not
connection_list = connection_operations.connection_list()
connection_list_serialized = []
for connection in connection_list:
connection_list_serialized.append(connection.serialize())
log['success'] = True
return jsonify(connection_list = connection_list_serialized,
log = connection_operations.log), 200
| null |
1,806 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoceanbasepro.endpoint import endpoint_data
class DescribeAnomalySQLListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeAnomalySQLList','oceanbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # String
return self.get_body_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_body_params('StartTime', StartTime)
def get_PageNumber(self): # Integer
return self.get_body_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_body_params('PageNumber', PageNumber)
def get_SearchRule(self): # String
return self.get_body_params().get('SearchRule')
def set_SearchRule(self, SearchRule): # String
self.add_body_params('SearchRule', SearchRule)
def get_TenantId(self): # String
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_body_params('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_body_params().get('PageSize')
def METHOD_NAME(self, PageSize): # Integer
self.add_body_params('PageSize', PageSize)
def get_SearchParameter(self): # String
return self.get_body_params().get('SearchParameter')
def set_SearchParameter(self, SearchParameter): # String
self.add_body_params('SearchParameter', SearchParameter)
def get_SortOrder(self): # String
return self.get_body_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_body_params('SortOrder', SortOrder)
def get_SearchValue(self): # String
return self.get_body_params().get('SearchValue')
def set_SearchValue(self, SearchValue): # String
self.add_body_params('SearchValue', SearchValue)
def get_SQLId(self): # String
return self.get_body_params().get('SQLId')
def set_SQLId(self, SQLId): # String
self.add_body_params('SQLId', SQLId)
def get_FilterCondition(self): # String
return self.get_body_params().get('FilterCondition')
def set_FilterCondition(self, FilterCondition): # String
self.add_body_params('FilterCondition', FilterCondition)
def get_EndTime(self): # String
return self.get_body_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_body_params('EndTime', EndTime)
def get_NodeIp(self): # String
return self.get_body_params().get('NodeIp')
def set_NodeIp(self, NodeIp): # String
self.add_body_params('NodeIp', NodeIp)
def get_InstanceId(self): # String
return self.get_body_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_body_params('InstanceId', InstanceId)
def get_DbName(self): # String
return self.get_body_params().get('DbName')
def set_DbName(self, DbName): # String
self.add_body_params('DbName', DbName)
def get_SearchKeyWord(self): # String
return self.get_body_params().get('SearchKeyWord')
def set_SearchKeyWord(self, SearchKeyWord): # String
self.add_body_params('SearchKeyWord', SearchKeyWord)
def get_SortColumn(self): # String
return self.get_body_params().get('SortColumn')
def set_SortColumn(self, SortColumn): # String
self.add_body_params('SortColumn', SortColumn)
def get_AcceptLanguage(self): # String
return self.get_body_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_body_params('AcceptLanguage', AcceptLanguage)
| null |
1,807 |
import datetime
import os
import platform
import pytest
import subprocess
import sys
import uuid
sys.path.insert(0, 'src')
from cosalib import cmdlib
PY_MAJOR, PY_MINOR, PY_PATCH = platform.python_version_tuple()
def test_runcmd():
"""
Verify runcmd returns expected information
"""
result = cmdlib.runcmd(['echo', 'hi'])
assert result.stdout is None
with pytest.raises(FileNotFoundError):
cmdlib.runcmd(['idonotexist'])
# If we are not at least on Python 3.7 we must skip the following test
if PY_MAJOR == 3 and PY_MINOR >= 7:
result = cmdlib.runcmd(['echo', 'hi'], capture_output=True)
assert result.stdout == b'hi\n'
def test_write_and_load_json(tmpdir):
"""
Ensure write_json writes loadable json
"""
data = {
'test': ['data'],
}
path = os.path.join(tmpdir, 'data.json')
cmdlib.write_json(path, data)
# Ensure the file exists
assert os.path.isfile(path)
# Ensure the data matches
assert cmdlib.load_json(path) == data
def test_sha256sum_file(tmpdir):
"""
Verify we get the proper sha256 sum
"""
test_file = os.path.join(tmpdir, 'testfile')
with open(test_file, 'w') as f:
f.write('test')
# $ sha256sum testfile
# 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08
e = '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08'
shasum = cmdlib.sha256sum_file(test_file)
assert shasum == e
def test_fatal(capsys):
"""
Ensure that fatal does indeed attempt to exit
"""
test_string = str(uuid.uuid4())
err = None
with pytest.raises(SystemExit) as err:
cmdlib.fatal(test_string)
# Check that our test string is in stderr
assert test_string in str(err)
def test_info(capsys):
"""
Verify test_info writes properly to stderr without exit
"""
test_string = str(uuid.uuid4())
cmdlib.info(test_string)
captured = capsys.readouterr()
assert test_string in captured.err
def test_rfc3339_time():
"""
Verify the format returned from rfc3339_time
"""
t = cmdlib.rfc3339_time()
assert datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
# now and utcnow don't set TZ's. We should get a raise
with pytest.raises(AssertionError):
cmdlib.rfc3339_time(datetime.datetime.now())
def test_rm_allow_noent(tmpdir):
"""
Ensure rm_allow_noent works both with existing and non existing files
"""
test_path = os.path.join(tmpdir, 'testfile')
with open(test_path, 'w') as f:
f.write('test')
# Exists
cmdlib.rm_allow_noent(test_path)
# Doesn't exist
cmdlib.rm_allow_noent(test_path)
def test_image_info(tmpdir):
cmdlib.runcmd([
"qemu-img", "create", "-f", "qcow2", f"{tmpdir}/test.qcow2", "10M"])
assert cmdlib.image_info(f"{tmpdir}/test.qcow2").get('format') == "qcow2"
cmdlib.runcmd([
"qemu-img", "create", "-f", "vpc",
'-o', 'force_size,subformat=fixed',
f"{tmpdir}/test.vpc", "10M"])
assert cmdlib.image_info(f"{tmpdir}/test.vpc").get('format') == "vpc"
def test_merge_dicts(tmpdir):
x = {
1: "Nope",
2: ["a", "b", "c"],
3: {"3a": True}
}
y = {4: True}
z = {1: "yup",
3: {
"3a": False,
"3b": "found"
}
}
# merge y into x
m = cmdlib.merge_dicts(x, y)
for i in range(1, 4):
assert i in m
assert y[4] is True
# merge z into x
m = cmdlib.merge_dicts(x, z)
assert m[1] == "Nope"
assert x[2] == m[2]
assert m[3]["3a"] is True
assert m[3]["3b"] == "found"
# merge x into z
m = cmdlib.merge_dicts(z, x)
assert m[1] == "yup"
assert x[2] == m[2]
assert m[3] == z[3]
def METHOD_NAME(tmpdir):
fn = f"{tmpdir}/image.yaml"
with open(fn, 'w') as f:
f.write("""
size: 10
extra-kargs:
- foobar
unique-key-a: true
""")
o = cmdlib.flatten_image_yaml(fn)
assert o['size'] == 10
assert o['extra-kargs'] == ['foobar']
assert o['unique-key-a']
with open(fn, 'a') as f:
f.write("include: image-base.yaml")
base_fn = f"{tmpdir}/image-base.yaml"
with open(base_fn, 'w') as f:
f.write("""
size: 8
extra-kargs:
- bazboo
unique-key-b: true
""")
o = cmdlib.flatten_image_yaml(fn)
assert o['size'] == 10
assert o['extra-kargs'] == ['foobar', 'bazboo']
assert o['unique-key-a']
assert o['unique-key-b']
| null |
1,808 |
"""Tests for docker engine."""
import json
import re
from pathlib import Path
from shutil import which
import pytest
from cwltool.main import main
from .util import (
get_data,
get_main_output,
needs_docker,
needs_podman,
needs_singularity,
)
@needs_docker
def test_docker_workflow(tmp_path: Path) -> None:
"""Basic test for docker with a CWL Workflow."""
result_code, _, stderr = get_main_output(
[
"--default-container",
"docker.io/debian:stable-slim",
"--outdir",
str(tmp_path),
get_data("tests/wf/hello-workflow.cwl"),
"--usermessage",
"hello",
]
)
assert "completed success" in stderr
assert (tmp_path / "response.txt").read_text("utf-8") == "hello"
assert result_code == 0
def test_docker_iwdr() -> None:
result_code = main(
[
"--default-container",
"docker.io/debian:stable-slim",
get_data("tests/wf/iwdr-entry.cwl"),
"--message",
"hello",
]
)
docker_installed = bool(which("docker"))
if docker_installed:
assert result_code == 0
else:
assert result_code != 0
@needs_docker
def test_docker_incorrect_image_pull() -> None:
result_code = main(
[
"--default-container",
"non-existant-weird-image",
get_data("tests/wf/hello-workflow.cwl"),
"--usermessage",
"hello",
]
)
assert result_code != 0
@needs_docker
def test_docker_file_mount() -> None:
# test for bug in
# ContainerCommandLineJob.create_file_and_add_volume()
#
# the bug was that it would use the file literal contents as the
# temporary file name, which can easily result in a file name that
# is too long or otherwise invalid. This test case uses ".."
result_code = main(
[get_data("tests/wf/literalfile.cwl"), get_data("tests/wf/literalfile-job.yml")]
)
assert result_code == 0
@needs_docker
def test_docker_strict_cpu_limit(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--strict-cpu-limit",
"--default-container",
"docker.io/debian:stable-slim",
"--outdir",
str(tmp_path),
get_data("tests/wf/cores_float.cwl"),
]
)
stderr = re.sub(r"\s\s+", " ", stderr)
assert result_code == 0
assert "--cpus=2" in stderr
@needs_docker
def test_docker_strict_memory_limit(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--strict-memory-limit",
"--default-container",
"docker.io/debian:stable-slim",
"--outdir",
str(tmp_path),
get_data("tests/wf/storage_float.cwl"),
]
)
stderr = re.sub(r"\s\s+", " ", stderr)
assert result_code == 0
assert "--memory=255m" in stderr
@needs_docker
def test_docker_strict_cpu_limit_warning(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--default-container",
"docker.io/debian:stable-slim",
"--outdir",
str(tmp_path),
get_data("tests/wf/cores_float.cwl"),
]
)
stderr = re.sub(r"\s\s+", " ", stderr)
assert result_code == 0
assert "Skipping Docker software container '--cpus' limit" in stderr
@needs_docker
def test_docker_strict_memory_limit_warning(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--default-container",
"docker.io/debian:stable-slim",
"--outdir",
str(tmp_path),
get_data("tests/wf/storage_float.cwl"),
]
)
stderr = re.sub(r"\s\s+", " ", stderr)
assert result_code == 0
assert "Skipping Docker software container '--memory' limit" in stderr
@needs_docker
def METHOD_NAME(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--outdir",
str(tmp_path),
get_data("tests/secondary-files-required-container.cwl"),
]
)
assert result_code == 0, stderr
assert (
json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
== "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
@needs_podman
def test_podman_required_secfile(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--podman",
"--outdir",
str(tmp_path),
get_data("tests/secondary-files-required-container.cwl"),
]
)
assert result_code == 0, stderr
assert (
json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
== "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
@needs_singularity
def test_singularity_required_secfile(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
singularity_dir = tmp_path / "singularity"
singularity_dir.mkdir()
monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
result_code, stdout, stderr = get_main_output(
[
"--singularity",
"--outdir",
str(tmp_path / "out"),
get_data("tests/secondary-files-required-container.cwl"),
]
)
assert result_code == 0, stderr
assert (
json.loads(stdout)["output"]["secondaryFiles"][0]["checksum"]
== "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
@needs_docker
def test_docker_required_missing_secfile(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--outdir",
str(tmp_path),
get_data("tests/secondary-files-required-missing-container.cwl"),
]
)
assert result_code == 1, stderr
stderr = re.sub(r"\s\s+", " ", stderr)
assert "Job error:" in stderr
assert "Error collecting output for parameter 'output'" in stderr
assert (
"tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
)
assert "file.ext3" in stderr
@needs_podman
def test_podman_required_missing_secfile(tmp_path: Path) -> None:
result_code, stdout, stderr = get_main_output(
[
"--podman",
"--outdir",
str(tmp_path),
get_data("tests/secondary-files-required-missing-container.cwl"),
]
)
assert result_code == 1, stderr
stderr = re.sub(r"\s\s+", " ", stderr)
assert "Job error:" in stderr
assert "Error collecting output for parameter 'output'" in stderr
assert (
"tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
)
assert "file.ext3" in stderr
@needs_singularity
def test_singularity_required_missing_secfile(
tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
singularity_dir = tmp_path / "singularity"
singularity_dir.mkdir()
monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
result_code, stdout, stderr = get_main_output(
[
"--singularity",
"--outdir",
str(tmp_path),
get_data("tests/secondary-files-required-missing-container.cwl"),
]
)
assert result_code == 1, stderr
stderr = re.sub(r"\s\s+", " ", stderr)
assert "Job error:" in stderr
assert "Error collecting output for parameter 'output'" in stderr
assert (
"tests/secondary-files-required-missing-container.cwl:16:5: Missing required secondary file"
)
assert "file.ext3" in stderr
| null |
1,809 |
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Code for the MAML algorithm and network definitions. """
from __future__ import print_function
import numpy as np
import sys
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
def METHOD_NAME(input, label, bn_batch_stat, args, init_params=None):
output = forward_conv(input, bn_batch_stat, args, init_params)
loss = loss_func(output, label)
output2 = output.get_unlinked_variable(need_grad=False)
accuracy = 1.0 - F.mean(F.top_n_error(output2, label, n=1))
return (loss, accuracy)
# Network construction functions
def forward_conv(inp, bn_batch_stat, args, init_params, activation=F.relu):
hidden1 = conv_block(inp, 'layer1', bn_batch_stat,
activation, args, init_params)
hidden2 = conv_block(hidden1, 'layer2', bn_batch_stat,
activation, args, init_params)
hidden3 = conv_block(hidden2, 'layer3', bn_batch_stat,
activation, args, init_params)
hidden4 = conv_block(hidden3, 'layer4', bn_batch_stat,
activation, args, init_params)
if args.datasource != 'omniglot' or args.method != 'maml':
# hidden4 = F.reshape(hidden4, (hidden4.d.shape[0], -1), inplace=False)
pass
else:
hidden4 = F.mean(hidden4, (2, 3))
if init_params is None or 'layer5/affine/W' not in init_params:
output = PF.affine(hidden4, args.num_classes, name='layer5')
else:
output = F.affine(
hidden4, init_params['layer5/affine/W'], init_params['layer5/affine/b'])
return output
def conv_block(inp, layer_name, bn_batch_stat, activation, args, init_params):
""" Perform, conv, batch norm, nonlinearity, and max pool """
k = 3
stride, no_stride = (2, 2), (1, 1)
pad = (1, 1)
if init_params is None or layer_name + '/conv/W' not in init_params:
if args.max_pool:
conv_output = PF.convolution(
inp, args.num_filters, (k, k), pad=pad, stride=no_stride, name=layer_name)
else:
conv_output = PF.convolution(
inp, args.num_filters, (k, k), pad=pad, stride=stride, name=layer_name)
normed = normalize(conv_output, layer_name,
bn_batch_stat, activation, args, init_params)
else:
if args.max_pool:
conv_output = F.convolution(
inp, init_params[layer_name + '/conv/W'], init_params[layer_name + '/conv/b'], pad=pad, stride=no_stride)
else:
conv_output = F.convolution(
inp, init_params[layer_name + '/conv/W'], init_params[layer_name + '/conv/b'], pad=pad, stride=stride)
normed = normalize(conv_output, layer_name,
bn_batch_stat, activation, args, init_params)
if args.max_pool:
normed = F.max_pooling(normed, stride, stride=stride)
return normed
def normalize(inp, layer_name, bn_batch_stat, activation, args, init_params):
if args.norm == 'batch_norm':
if init_params is None:
inp = PF.batch_normalization(
inp, batch_stat=bn_batch_stat, name=layer_name)
else:
inp = F.batch_normalization(inp, init_params[layer_name + '/bn/beta'], init_params[layer_name + '/bn/gamma'],
mean=None, variance=None, batch_stat=bn_batch_stat)
if activation is not None:
return activation(inp)
else:
return inp
def loss_func(pred, label):
return F.mean(F.softmax_cross_entropy(pred, label))
| null |
1,810 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateDataCacheRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Eci', '2018-08-08', 'CreateDataCache', 'eci')
def get_SecurityGroupId(self):
return self.get_query_params().get("SecurityGroupId")
def set_SecurityGroupId(self, SecurityGroupId):
self.add_query_param("SecurityGroupId", SecurityGroupId)
def get_VSwitchId(self):
return self.get_query_params().get("VSwitchId")
def set_VSwitchId(self, VSwitchId):
self.add_query_param("VSwitchId", VSwitchId)
def get_Bucket(self):
return self.get_query_params().get("Bucket")
def set_Bucket(self, Bucket):
self.add_query_param("Bucket", Bucket)
def get_Path(self):
return self.get_query_params().get("Path")
def set_Path(self, Path):
self.add_query_param("Path", Path)
def get_Name(self):
return self.get_query_params().get("Name")
def set_Name(self, Name):
self.add_query_param("Name", Name)
def get_Size(self):
return self.get_query_params().get("Size")
def set_Size(self, Size):
self.add_query_param("Size", Size)
def get_RetentionDays(self):
return self.get_query_params().get("RetentionDays")
def set_RetentionDays(self, RetentionDays):
self.add_query_param("RetentionDays", RetentionDays)
def get_ResourceGroupId(self):
return self.get_query_params().get("ResourceGroupId")
def set_ResourceGroupId(self, ResourceGroupId):
self.add_query_param("ResourceGroupId", ResourceGroupId)
def METHOD_NAME(self):
return self.get_query_params().get("ClientToken")
def set_ClientToken(self, ClientToken):
self.add_query_param("ClientToken", ClientToken)
def get_EipInstanceId(self):
return self.get_query_params().get("EipInstanceId")
def set_EipInstanceId(self, EipInstanceId):
self.add_query_param("EipInstanceId", EipInstanceId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for i in range(len(Tags)):
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key', Tags[i].get('Key'))
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value', Tags[i].get('Value'))
def get_DataSource(self):
return self.get_query_params().get("DataSource")
def set_DataSource(self, DataSource):
if DataSource.get("Type") is not None:
self.add_query_param("DataSource.Type", DataSource.get("Type"))
if DataSource.get("Options") is not None:
for k, v in DataSource.get("Options").items():
self.add_query_param(f"DataSource.Options.#{len(k)}#{k}", v)
def get_EipCreateParam(self):
return self.get_query_params().get("EipCreateParam")
def set_EipCreateParam(self, EipCreateParam):
if EipCreateParam.get("Bandwidth") is not None:
self.add_query_param("EipCreateParam.Bandwidth", EipCreateParam.get('Bandwidth'))
if EipCreateParam.get("CommonBandwidthPackage") is not None:
self.add_query_param("EipCreateParam.CommonBandwidthPackage", EipCreateParam.get('CommonBandwidthPackage'))
if EipCreateParam.get("InternetChargeType") is not None:
self.add_query_param("EipCreateParam.InternetChargeType", EipCreateParam.get('InternetChargeType'))
if EipCreateParam.get("PublicIpAddressPoolId") is not None:
self.add_query_param("EipCreateParam.PublicIpAddressPoolId", EipCreateParam.get('PublicIpAddressPoolId'))
if EipCreateParam.get("ISP") is not None:
self.add_query_param("EipCreateParam.ISP", EipCreateParam.get('ISP'))
| null |
1,811 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeDemandsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDemands','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_InstanceTypeFamily(self): # String
return self.get_query_params().get('InstanceTypeFamily')
def set_InstanceTypeFamily(self, InstanceTypeFamily): # String
self.add_query_param('InstanceTypeFamily', InstanceTypeFamily)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DemandStatuss(self): # RepeatList
return self.get_query_params().get('DemandStatus')
def METHOD_NAME(self, DemandStatus): # RepeatList
for depth1 in range(len(DemandStatus)):
self.add_query_param('DemandStatus.' + str(depth1 + 1), DemandStatus[depth1])
def get_DemandId(self): # String
return self.get_query_params().get('DemandId')
def set_DemandId(self, DemandId): # String
self.add_query_param('DemandId', DemandId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_DemandType(self): # String
return self.get_query_params().get('DemandType')
def set_DemandType(self, DemandType): # String
self.add_query_param('DemandType', DemandType)
| null |
1,812 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Spark step operator flavor."""
import json
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
from pydantic import validator
from zenml.config.base_settings import BaseSettings
from zenml.step_operators.base_step_operator import (
BaseStepOperatorConfig,
BaseStepOperatorFlavor,
)
if TYPE_CHECKING:
from zenml.integrations.spark.step_operators.spark_step_operator import (
SparkStepOperator,
)
class SparkStepOperatorSettings(BaseSettings):
"""Spark step operator settings.
Attributes:
deploy_mode: can either be 'cluster' (default) or 'client' and it
decides where the driver node of the application will run.
submit_kwargs: is the JSON string of a dict, which will be used
to define additional params if required (Spark has quite a
lot of different parameters, so including them, all in the step
operator was not implemented).
"""
deploy_mode: str = "cluster"
submit_kwargs: Optional[Dict[str, Any]] = None
@validator("submit_kwargs", pre=True)
def _convert_json_string(
cls, value: Union[None, str, Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""Converts potential JSON strings passed via the CLI to dictionaries.
Args:
value: The value to convert.
Returns:
The converted value.
Raises:
TypeError: If the value is not a `str`, `Dict` or `None`.
ValueError: If the value is an invalid json string or a json string
that does not decode into a dictionary.
"""
if isinstance(value, str):
try:
dict_ = json.loads(value)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid json string '{value}'") from e
if not isinstance(dict_, Dict):
raise ValueError(
f"Json string '{value}' did not decode into a dictionary."
)
return dict_
elif isinstance(value, Dict) or value is None:
return value
else:
raise TypeError(f"{value} is not a json string or a dictionary.")
class SparkStepOperatorConfig( # type: ignore[misc] # https://github.com/pydantic/pydantic/issues/4173
BaseStepOperatorConfig, SparkStepOperatorSettings
):
"""Spark step operator config.
Attributes:
master: is the master URL for the cluster. You might see different
schemes for different cluster managers which are supported by Spark
like Mesos, YARN, or Kubernetes. Within the context of this PR,
the implementation supports Kubernetes as a cluster manager.
"""
master: str
class SparkStepOperatorFlavor(BaseStepOperatorFlavor):
"""Spark step operator flavor."""
@property
def name(self) -> str:
"""Name of the flavor.
Returns:
The name of the flavor.
"""
return "spark"
@property
def METHOD_NAME(self) -> Type[SparkStepOperatorConfig]:
"""Returns `SparkStepOperatorConfig` config class.
Returns:
The config class.
"""
return SparkStepOperatorConfig
@property
def docs_url(self) -> Optional[str]:
"""A url to point at docs explaining this flavor.
Returns:
A flavor docs url.
"""
return self.generate_default_docs_url()
@property
def sdk_docs_url(self) -> Optional[str]:
"""A url to point at SDK docs explaining this flavor.
Returns:
A flavor SDK docs url.
"""
return self.generate_default_sdk_docs_url()
@property
def implementation_class(self) -> Type["SparkStepOperator"]:
"""Implementation class for this flavor.
Returns:
The implementation class.
"""
from zenml.integrations.spark.step_operators.spark_step_operator import (
SparkStepOperator,
)
return SparkStepOperator
| null |
1,813 |
import img2pdf
from PIL import Image
import os
from plugin import plugin
from colorama import Fore
@plugin('image to pdf')
class ImageToPDF:
"""
A tool to converrt images to pdf file
"""
def __init__(self):
# Path of the folder or image to be converted
self.path = None
self.image = None
def __call__(self, jarvis, s):
self.METHOD_NAME(jarvis)
def METHOD_NAME(self, jarvis):
jarvis.say('')
jarvis.say('This tool will help you convert image to pdf')
while True:
self.available_options(jarvis)
user_input = jarvis.input('Your choice: ')
user_input = user_input.lower()
if user_input == 'q' or user_input == 'quit' or user_input == '3':
jarvis.say("See you next time :D", Fore.CYAN)
break
# For single image to be converted to pdf
elif user_input == '1':
while True:
image_path = jarvis.input(
'Enter the full path of the image: ')
if os.path.exists(image_path) and (image_path.endswith('.jpg') or image_path.endswith('.png')):
break
else:
jarvis.say(
'Opps! Looks like you entered an invalid path. Kindly Re-enter', Fore.RED)
pdf_bytes = self.single_image_to_pdf(jarvis, image_path)
# For multiple images in a folder to be converted to pdf
elif user_input == '2':
while True:
folder_path = jarvis.input(
'Enter the full path of the folder: ')
if os.path.exists(folder_path):
break
else:
jarvis.say(
'Opps! Looks like you entered an invalid path. Kindly Re-enter', Fore.RED)
pdf_bytes = self.folder_to_pdf(jarvis, folder_path)
# For an incorrectly entered option
else:
jarvis.incorrect_option()
continue
destination = jarvis.get_saving_directory(self.path)
# Naming and saving the pdf file
file_name = jarvis.input('What would you like to name your pdf? ')
pdf_destination = destination + '/' + file_name + '.pdf'
print('Final Destination ' + pdf_destination)
self.save_pdf(jarvis, pdf_bytes, pdf_destination)
def available_options(self, jarvis):
"""
Message displayed to prompt the user about converting
images to pdf
"""
jarvis.say('Select one of the following options:')
jarvis.say('1: Convert a single image')
jarvis.say('2: Convert all images of the folder')
jarvis.say('3: Quit')
def single_image_to_pdf(self, jarvis, image_path):
"""
This function is used to convert a single image
with a given path to a pdf file.
"""
self.path = image_path
self.image = Image.open(image_path)
pdf_bytes = img2pdf.convert(self.image.filename)
self.image.close()
return pdf_bytes
def folder_to_pdf(self, jarvis, folder_path):
"""
This function is used to convert all the images
in a given folder path to a single PDF file
"""
self.path = folder_path
source_images = []
os.chdir(self.path)
for image in os.listdir(os.getcwd()):
if image.endswith('.jpg') or image.endswith('.png'):
source_images.append(image)
pdf_bytes = img2pdf.convert(source_images)
return pdf_bytes
def save_pdf(self, jarvis, pdf_bytes, destination):
"""
Save the pdf to the thus supplied location
or prompt the user to choose a new location
"""
pdf_file = open(destination, 'wb')
pdf_file.write(pdf_bytes)
pdf_file.close()
jarvis.say('Your pdf is created successfully', Fore.GREEN)
| null |
1,814 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListAuthorizationRulesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'IoTCC', '2021-05-13', 'ListAuthorizationRules','IoTCC')
self.set_method('POST')
def get_FuzzyDestination(self): # String
return self.get_query_params().get('FuzzyDestination')
def set_FuzzyDestination(self, FuzzyDestination): # String
self.add_query_param('FuzzyDestination', FuzzyDestination)
def get_FuzzyAuthorizationRuleName(self): # String
return self.get_query_params().get('FuzzyAuthorizationRuleName')
def set_FuzzyAuthorizationRuleName(self, FuzzyAuthorizationRuleName): # String
self.add_query_param('FuzzyAuthorizationRuleName', FuzzyAuthorizationRuleName)
def get_DestinationTypes(self): # RepeatList
return self.get_query_params().get('DestinationType')
def set_DestinationTypes(self, DestinationType): # RepeatList
for depth1 in range(len(DestinationType)):
self.add_query_param('DestinationType.' + str(depth1 + 1), DestinationType[depth1])
def get_Destinations(self): # RepeatList
return self.get_query_params().get('Destination')
def set_Destinations(self, Destination): # RepeatList
for depth1 in range(len(Destination)):
self.add_query_param('Destination.' + str(depth1 + 1), Destination[depth1])
def get_Protocols(self): # RepeatList
return self.get_query_params().get('Protocol')
def set_Protocols(self, Protocol): # RepeatList
for depth1 in range(len(Protocol)):
self.add_query_param('Protocol.' + str(depth1 + 1), Protocol[depth1])
def get_AuthorizationRuleIdss(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleIds')
def set_AuthorizationRuleIdss(self, AuthorizationRuleIds): # RepeatList
for depth1 in range(len(AuthorizationRuleIds)):
self.add_query_param('AuthorizationRuleIds.' + str(depth1 + 1), AuthorizationRuleIds[depth1])
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_AuthorizationRuleType(self): # String
return self.get_query_params().get('AuthorizationRuleType')
def set_AuthorizationRuleType(self, AuthorizationRuleType): # String
self.add_query_param('AuthorizationRuleType', AuthorizationRuleType)
def get_Policys(self): # RepeatList
return self.get_query_params().get('Policy')
def set_Policys(self, Policy): # RepeatList
for depth1 in range(len(Policy)):
self.add_query_param('Policy.' + str(depth1 + 1), Policy[depth1])
def get_AuthorizationRuleStatuss(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleStatus')
def set_AuthorizationRuleStatuss(self, AuthorizationRuleStatus): # RepeatList
for depth1 in range(len(AuthorizationRuleStatus)):
self.add_query_param('AuthorizationRuleStatus.' + str(depth1 + 1), AuthorizationRuleStatus[depth1])
def get_AuthorizationRuleNames(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleName')
def set_AuthorizationRuleNames(self, AuthorizationRuleName): # RepeatList
for depth1 in range(len(AuthorizationRuleName)):
self.add_query_param('AuthorizationRuleName.' + str(depth1 + 1), AuthorizationRuleName[depth1])
def get_DestinationPorts(self): # RepeatList
return self.get_query_params().get('DestinationPort')
def METHOD_NAME(self, DestinationPort): # RepeatList
for depth1 in range(len(DestinationPort)):
self.add_query_param('DestinationPort.' + str(depth1 + 1), DestinationPort[depth1])
def get_IoTCloudConnectorId(self): # String
return self.get_query_params().get('IoTCloudConnectorId')
def set_IoTCloudConnectorId(self, IoTCloudConnectorId): # String
self.add_query_param('IoTCloudConnectorId', IoTCloudConnectorId)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
| null |
1,815 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdcdn.endpoint import endpoint_data
class DescribeDcdnUserDomainsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dcdn', '2018-01-15', 'DescribeDcdnUserDomains')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_CheckDomainShow(self): # Boolean
return self.get_query_params().get('CheckDomainShow')
def set_CheckDomainShow(self, CheckDomainShow): # Boolean
self.add_query_param('CheckDomainShow', CheckDomainShow)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_ChangeEndTime(self): # String
return self.get_query_params().get('ChangeEndTime')
def set_ChangeEndTime(self, ChangeEndTime): # String
self.add_query_param('ChangeEndTime', ChangeEndTime)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_Coverage(self): # String
return self.get_query_params().get('Coverage')
def set_Coverage(self, Coverage): # String
self.add_query_param('Coverage', Coverage)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DomainStatus(self): # String
return self.get_query_params().get('DomainStatus')
def set_DomainStatus(self, DomainStatus): # String
self.add_query_param('DomainStatus', DomainStatus)
def get_DomainSearchType(self): # String
return self.get_query_params().get('DomainSearchType')
def set_DomainSearchType(self, DomainSearchType): # String
self.add_query_param('DomainSearchType', DomainSearchType)
def get_ChangeStartTime(self): # String
return self.get_query_params().get('ChangeStartTime')
def set_ChangeStartTime(self, ChangeStartTime): # String
self.add_query_param('ChangeStartTime', ChangeStartTime)
| null |
1,816 |
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import matplotlib.pyplot as plt
from neu.tts.trainer import Trainer
import nnabla as nn
import nnabla.functions as F
import numpy as np
def save_image(data, path, label, title, figsize=(6, 5)):
r"""Saves an image to file."""
plt.figure(figsize=figsize)
plt.imshow(data.copy(), origin='lower', aspect='auto')
plt.xlabel(label[0])
plt.ylabel(label[1])
plt.title(title)
plt.colorbar()
plt.savefig(path, bbox_inches='tight')
plt.close()
class Tacotron2Trainer(Trainer):
r"""Trainer for Tacotron2."""
def METHOD_NAME(self, key='train'):
r"""Builds the graph and update the placeholder.
Args:
key (str, optional): Type of computational graph. Defaults to 'train'.
"""
assert key in ('train', 'valid')
self.model.training = key != 'valid'
hp = self.hparams
# define input variables
x_txt = nn.Variable([hp.batch_size, hp.text_len])
x_mel = nn.Variable([hp.batch_size, hp.mel_len, hp.n_mels*hp.r])
x_gat = nn.Variable([hp.batch_size, hp.mel_len])
# output variables
o_mel, o_mel_p, o_gat, o_att = self.model(x_txt, x_mel)
o_mel = o_mel.apply(persistent=True)
o_mel_p = o_mel_p.apply(persistent=True)
o_gat = o_gat.apply(persistent=True)
o_att = o_att.apply(persistent=True)
# loss functions
def criteria(x, t):
return F.mean(F.squared_error(x, t))
l_mel = (criteria(o_mel, x_mel) +
criteria(o_mel_p, x_mel)).apply(persistent=True)
l_gat = F.mean(F.sigmoid_cross_entropy(
o_gat, x_gat)).apply(persistent=True)
l_net = (l_mel + l_gat).apply(persistent=True)
self.placeholder[key] = {
'x_mel': x_mel, 'x_gat': x_gat, 'x_txt': x_txt,
'o_mel': o_mel, 'o_mel_p': o_mel_p, 'o_gat': o_gat, 'o_att': o_att,
'l_mel': l_mel, 'l_gat': l_gat, 'l_net': l_net
}
self.out_variables = ['train/l_mel', 'train/l_gat', 'train/l_net']
def train_on_batch(self):
r"""Updates the model parameters."""
batch_size = self.hparams.batch_size
p, dl = self.placeholder['train'], self.dataloader['train']
self.optimizer.zero_grad()
if self.hparams.comm.n_procs > 1:
self.hparams.event.default_stream_synchronize()
p['x_mel'].d, p['x_txt'].d, p['x_gat'].d = dl.next()
p['l_net'].forward(clear_no_need_grad=True)
p['l_net'].backward(clear_buffer=True)
self.monitor.update('train/l_mel', p['l_mel'].d.copy(), batch_size)
self.monitor.update('train/l_gat', p['l_gat'].d.copy(), batch_size)
self.monitor.update('train/l_net', p['l_net'].d.copy(), batch_size)
if self.hparams.comm.n_procs > 1:
self.hparams.comm.all_reduce(
self._grads, division=True, inplace=False)
self.hparams.event.add_default_stream_event()
self.optimizer.update()
def valid_on_batch(self):
r"""Performs validation."""
batch_size = self.hparams.batch_size
p, dl = self.placeholder['valid'], self.dataloader['valid']
if self.hparams.comm.n_procs > 1:
self.hparams.event.default_stream_synchronize()
p['x_mel'].d, p['x_txt'].d, p['x_gat'].d = dl.next()
p['l_net'].forward(clear_buffer=True)
self.loss.data += p['l_net'].d.copy() * batch_size
self.monitor.update('valid/l_mel', p['l_mel'].d.copy(), batch_size)
self.monitor.update('valid/l_gat', p['l_gat'].d.copy(), batch_size)
self.monitor.update('valid/l_net', p['l_net'].d.copy(), batch_size)
def callback_on_epoch_end(self):
if self.hparams.comm.n_procs > 1:
self.hparams.comm.all_reduce(
[self.loss], division=True, inplace=False)
self.loss.data /= self.dataloader['valid'].size
if self.hparams.comm.rank == 0:
p, hp = self.placeholder['train'], self.hparams
self.monitor.info(f'valid/loss={self.loss.data[0]:.5f}\n')
if self.cur_epoch % hp.epochs_per_checkpoint == 0:
path = Path(hp.output_path) / 'output' / \
f'epoch_{self.cur_epoch}'
path.mkdir(parents=True, exist_ok=True)
# write attention and spectrogram outputs
for k in ('o_att', 'o_mel'):
p[k].forward(clear_buffer=True)
data = p[k].d[0].copy()
save_image(
data=data.reshape(
(-1, hp.n_mels)).T if k == 'o_mel' else data.T,
path=path / (k + '.png'),
label=('Decoder timestep', 'Encoder timestep') if k == 'o_att' else (
'Frame', 'Channel'),
title={'o_att': 'Attention',
'o_mel': 'Mel spectrogram'}[k],
figsize=(6, 5) if k == 'o_att' else (6, 3)
)
self.model.save_parameters(
str(path / f'model_{self.cur_epoch}.h5'))
self.loss.zero()
| null |
1,817 |
#! /usr/bin/env python3
# Copyright (c) 2014, 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2007,2011 The Hewlett-Packard Development Company
# Copyright (c) 2016 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod
import difflib
import re
import sys
from .region import *
tabsize = 8
lead = re.compile(r"^([ \t]+)")
trail = re.compile(r"([ \t]+)$")
any_control = re.compile(r"\b(if|while|for)([ \t]*)\(")
class UserInterface(object, metaclass=ABCMeta):
def __init__(self, verbose=False):
self.verbose = verbose
def prompt(self, prompt, results, default):
while True:
result = self._prompt(prompt, results, default)
if result in results:
return result
@abstractmethod
def _prompt(self, prompt, results, default):
pass
@abstractmethod
def write(self, string):
pass
class StdioUI(UserInterface):
def _prompt(self, prompt, results, default):
return input(prompt) or default
def write(self, string):
sys.stdout.write(string)
def _re_ignore(expr):
"""Helper function to create regular expression ignore file
matcher functions"""
rex = re.compile(expr)
def match_re(fname):
return rex.match(fname)
return match_re
def _re_only(expr):
"""Helper function to create regular expressions to only keep
matcher functions"""
rex = re.compile(expr)
def match_re(fname):
return not rex.match(fname)
return match_re
# This list contains a list of functions that are called to determine
# if a file should be excluded from the style matching rules or
# not. The functions are called with the file name relative to the
# repository root (without a leading slash) as their argument. A file
# is excluded if any function in the list returns true.
style_ignores = [
# Ignore external projects as they are unlikely to follow the gem5
# coding convention.
_re_ignore("^ext/"),
# Ignore test data, as they are not code
_re_ignore("^tests/(?:quick|long)/"),
_re_ignore("^tests/test-progs/hello/bin/"),
# Only include Scons files and those with extensions that suggest source
# code
_re_only(
"^((.*\/)?(SConscript|SConstruct)|"
".*\.(c|h|cc|hh|cpp|hpp|isa|proto))$"
),
]
def check_ignores(fname):
"""Check if a file name matches any of the ignore rules"""
for rule in style_ignores:
if rule(fname):
return True
return False
def normalized_len(line):
"""Return a normalized line length with expanded tabs"""
count = 0
for c in line:
if c == "\t":
count += tabsize - count % tabsize
else:
count += 1
return count
def METHOD_NAME(old, new, context=0):
regions = Regions()
m = difflib.SequenceMatcher(a=old, b=new, autojunk=False)
for group in m.get_grouped_opcodes(context):
first = group[0]
last = group[-1]
regions.extend(Region(first[3], last[4] + 1))
return regions
| null |
1,818 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
import re
from NeoVintageous.nv import variables
from NeoVintageous.nv.vi import seqs
from NeoVintageous.nv.vim import INSERT
from NeoVintageous.nv.vim import NORMAL
from NeoVintageous.nv.vim import OPERATOR_PENDING
from NeoVintageous.nv.vim import SELECT
from NeoVintageous.nv.vim import VISUAL
from NeoVintageous.nv.vim import VISUAL_BLOCK
from NeoVintageous.nv.vim import VISUAL_LINE
mappings = {
INSERT: {},
NORMAL: {},
OPERATOR_PENDING: {},
SELECT: {},
VISUAL: {},
VISUAL_BLOCK: {},
VISUAL_LINE: {}
} # type: dict
_NAMED_KEYS = [
seqs.BACKSLASH,
seqs.BACKSPACE,
seqs.BAR,
seqs.DEL,
seqs.DOWN,
seqs.END,
seqs.ENTER,
seqs.ESC,
seqs.HOME,
seqs.INSERT,
seqs.KEYPAD_0,
seqs.KEYPAD_1,
seqs.KEYPAD_2,
seqs.KEYPAD_3,
seqs.KEYPAD_4,
seqs.KEYPAD_5,
seqs.KEYPAD_6,
seqs.KEYPAD_7,
seqs.KEYPAD_8,
seqs.KEYPAD_9,
seqs.KEYPAD_DIVIDE,
seqs.KEYPAD_ENTER,
seqs.KEYPAD_MINUS,
seqs.KEYPAD_MULTIPLY,
seqs.KEYPAD_PERIOD,
seqs.KEYPAD_PLUS,
seqs.LEADER,
seqs.LEFT,
seqs.LESS_THAN,
seqs.PAGE_DOWN,
seqs.PAGE_UP,
seqs.RIGHT,
seqs.SPACE,
seqs.TAB,
seqs.UP,
seqs.F1,
seqs.F2,
seqs.F3,
seqs.F4,
seqs.F5,
seqs.F6,
seqs.F7,
seqs.F8,
seqs.F9,
seqs.F10,
seqs.F11,
seqs.F12,
seqs.F13,
seqs.F14,
seqs.F15,
seqs.F16,
seqs.F17,
seqs.F18,
seqs.F19,
seqs.F20,
]
_NAMED_KEY_ALIASES = {
'enter': 'cr',
'return': 'cr'
}
def _resolve_named_key_alias(key: str):
try:
return _NAMED_KEY_ALIASES[key]
except KeyError:
return key
_KEYPAD_NUM = re.compile('<k(\\d)>')
def resolve_keypad_count(key: str) -> str:
keypad_num = _KEYPAD_NUM.search(key)
if keypad_num:
return keypad_num.group(1)
return key
class KeySequenceTokenizer():
"""Takes in a sequence of key names and tokenizes it."""
_EOF = -2
def __init__(self, source: str):
"""Sequence of key names in Vim notation."""
self.idx = -1
self.source = source
def _consume(self):
self.idx += 1
if self.idx >= len(self.source):
self.idx -= -1
return self._EOF
return self.source[self.idx]
def _peek_one(self):
if (self.idx + 1) >= len(self.source):
return self._EOF
return self.source[self.idx + 1]
def _is_named_key(self, key: str) -> bool:
return key.lower() in _NAMED_KEYS
def _sort_modifiers(self, modifiers: str) -> str:
"""Ensure consistency in the order of modifier letters according to c > m > s."""
if len(modifiers) == 6:
modifiers = 'c-m-s-'
elif len(modifiers) > 2:
if modifiers.startswith('s-') and modifiers.endswith('c-'):
modifiers = 'c-s-'
elif modifiers.startswith('s-') and modifiers.endswith('m-'):
modifiers = 'm-s-'
elif modifiers.startswith('m-') and modifiers.endswith('c-'):
modifiers = 'c-m-'
return modifiers
def _long_key_name(self) -> str:
key_name = ''
modifiers = ''
while True:
c = self._consume()
if c == self._EOF:
raise ValueError("expected '>' at index {0}".format(self.idx))
elif (c.lower() in ('c', 's', 'm', 'd', 'a')) and (self._peek_one() == '-'):
# <A-...> is aliased to <M-...>
if c.lower() == 'a':
c = 'm'
if c.lower() in modifiers.lower():
raise ValueError('invalid modifier sequence: {0}'.format(self.source))
modifiers += c + self._consume()
elif c == '>':
modifiers = self._sort_modifiers(modifiers.lower())
if len(key_name) == 1:
if not modifiers:
raise ValueError('wrong sequence {0}'.format(self.source))
return '<' + modifiers.upper() + key_name + '>'
elif self._is_named_key('<' + _resolve_named_key_alias(key_name.lower()) + '>'):
return '<' + modifiers.upper() + _resolve_named_key_alias(key_name.lower()) + '>'
else:
raise ValueError("'<{0}>' is not a known key".format(key_name))
else:
key_name += c
def _tokenize_one(self):
c = self._consume()
if c == '<':
return self._expand_vars(self._long_key_name())
else:
return c
def METHOD_NAME(self):
while True:
token = self._tokenize_one()
if token == self._EOF:
break
yield token
def _expand_vars(self, c: str) -> str:
return variables.get(c) if variables.is_key_name(c) else c
def tokenize_keys(keys: str) -> list:
return KeySequenceTokenizer(keys).METHOD_NAME()
_BARE_COMMAND_NAME_PATTERN = re.compile(r'^(?:".)?(?:[1-9]+)?')
def to_bare_command_name(seq: str) -> str:
# Args:
# seq (str): The command sequence.
#
# Return:
# str: The command sequence with register and counts strips e.g. 2daw ->
# daw, "a2d2aw -> daw, etc. The special case '0' is returned
# unmodified.
if seq == '0':
return seq
# Account for d2d and similar sequences.
new_seq = list(tokenize_keys(_BARE_COMMAND_NAME_PATTERN.sub('', seq)))
return ''.join(k for k in new_seq if not k.isdigit())
def assign(seq: str, modes, *args, **kwargs):
"""
Register a 'key sequence' to 'command' mapping with NeoVintageous.
The registered key sequence must be known to NeoVintageous. The
registered command must be a ViMotionDef or ViOperatorDef.
The decorated class is instantiated with `*args` and `**kwargs`.
@keys
A list of (`mode:tuple`, `sequence:string`) pairs to map the decorated
class to.
"""
def inner(cls):
for mode in modes:
mappings[mode][seq] = cls(*args, **kwargs)
return cls
return inner
| null |
1,819 |
#!/usr/bin/env python3
# this is all kinda of clunky because...
# we are using an edgelist/tuple formulation...
# but at least you can do it... ;-)
#
# generate and rmat graph
# make the graph undirected/symmetric
# find the graph's connected components using bfs
import arkouda as ak
def gen_rmat_edges(lgNv, Ne_per_v, p, perm=False):
# number of vertices
Nv = 2**lgNv
# number of edges
Ne = Ne_per_v * Nv
# probabilities
a = p
b = (1.0 - a)/ 3.0
c = b
d = b
# init edge arrays
ii = ak.ones(Ne,dtype=ak.int64)
jj = ak.ones(Ne,dtype=ak.int64)
# quantites to use in edge generation loop
ab = a+b
c_norm = c / (c + d)
a_norm = a / (a + b)
# generate edges
for ib in range(1,lgNv):
ii_bit = (ak.randint(0,1,Ne,dtype=ak.float64) > ab)
jj_bit = (ak.randint(0,1,Ne,dtype=ak.float64) > (c_norm * ii_bit + a_norm * (~ ii_bit)))
ii = ii + ((2**(ib-1)) * ii_bit)
jj = jj + ((2**(ib-1)) * jj_bit)
# sort all based on ii and jj using coargsort
# all edges should be sorted based on both vertices of the edge
iv = ak.coargsort((ii,jj))
# permute into sorted order
ii = ii[iv] # permute first vertex into sorted order
jj = jj[iv] # permute second vertex into sorted order
# to premute/rename vertices
if perm:
# generate permutation for new vertex numbers(names)
ir = ak.argsort(ak.randint(0,1,Nv,dtype=ak.float64))
# renumber(rename) vertices
ii = ir[ii] # rename first vertex
jj = ir[jj] # rename second vertex
#
# maybe: remove edges which are self-loops???
#
# return pair of pdarrays
return (ii,jj)
# src and dst pdarrays hold the edge list
# seeds pdarray with starting vertices/seeds
def bfs(src,dst,seeds,printLayers=False):
# holds vertices in the current layer of the bfs
Z = ak.unique(seeds)
# holds the visited vertices
V = ak.unique(Z) # holds vertices in Z to start with
# frontiers
F = [Z]
while Z.size != 0:
if printLayers:
print("Z.size = ",Z.size," Z = ",Z)
fZv = ak.in1d(src,Z) # find src vertex edges
W = ak.unique(dst[fZv]) # compress out dst vertices to match and make them unique
Z = ak.setdiff1d(W,V) # subtract out vertices already visited
V = ak.union1d(V,Z) # union current frontier into vertices already visited
F.append(Z)
return (F,V)
# src pdarray holding source vertices
# dst pdarray holding destination vertices
# printCComp flag to print the connected components as they are found
#
# edges needs to be symmetric/undirected
def METHOD_NAME(src, dst, printCComp=False, printLayers=False):
unvisited = ak.unique(src)
if printCComp: print("unvisited size = ", unvisited.size, unvisited)
components = []
while unvisited.size > 0:
# use lowest numbered vertex as representative vertex
rep_vertex = unvisited[0]
# bfs from rep_vertex
layers,visited = bfs(src,dst,ak.array([rep_vertex]),printLayers)
# add verticies in component to list of components
components.append(visited)
# subtract out visited from unvisited vertices
unvisited = ak.setdiff1d(unvisited,visited)
if printCComp: print(" visited size = ", visited.size, visited)
if printCComp: print("unvisited size = ", unvisited.size, unvisited)
return components
if __name__ == "__main__":
import matplotlib.pyplot as plt
import argparse, sys, gc, math, time
parser = argparse.ArgumentParser(description="Generates an rmat structured spare matrix as tuples(ii,jj)")
parser.add_argument('hostname', help='Hostname of arkouda server')
parser.add_argument('port', type=int, help='Port of arkouda server')
parser.add_argument('--lgNv', type=int, default=20, help='problem scale: log_2(Vertices)')
parser.add_argument('--Ne_per_v', type=int, default=2, help='number of edges per vertex')
parser.add_argument('--prob', type=float, default=0.01, help='prob of quadrant-0')
parser.add_argument('--perm', default=False, action='store_true', help='permute vertex indices/names')
parser.add_argument('--pl', default=False, action='store_true', help='print layers in bfs')
parser.add_argument('--pc', default=False, action='store_true', help='print connected comp as they are found')
args = parser.parse_args()
ak.verbose = False
ak.connect(args.hostname, args.port)
print((args.lgNv, args.Ne_per_v, args.prob, args.perm, args.pl))
(ii,jj) = gen_rmat_edges(args.lgNv, args.Ne_per_v, args.prob, perm=args.perm)
print("ii = ", (ii.size, ii))
print("ii(min,max) = ", (ii.min(), ii.max()))
print("jj = ", (jj.size, jj))
print("jj(min,max) = ", (jj.min(), jj.max()))
# make graph undirected/symmetric
# graph needs to undirected for connected components to work
src = ak.concatenate((ii,jj))
dst = ak.concatenate((jj,ii))
print("src = ", (src.size, src))
print("src(min,max) = ", (src.min(), src.max()))
print("dst = ", (dst.size, dst))
print("dst(min,max) = ", (dst.min(), dst.max()))
# find components using BFS
components = METHOD_NAME(src, dst, printCComp=args.pc, printLayers=args.pl)
print("number of components = ",len(components))
print("representative vertices = ",[c[0] for c in components])
ak.disconnect()
| null |
1,820 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouteTableAggregationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouteTableAggregation')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def METHOD_NAME(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_TransitRouteTableAggregationDescription(self): # String
return self.get_query_params().get('TransitRouteTableAggregationDescription')
def set_TransitRouteTableAggregationDescription(self, TransitRouteTableAggregationDescription): # String
self.add_query_param('TransitRouteTableAggregationDescription', TransitRouteTableAggregationDescription)
def get_TransitRouteTableAggregationName(self): # String
return self.get_query_params().get('TransitRouteTableAggregationName')
def set_TransitRouteTableAggregationName(self, TransitRouteTableAggregationName): # String
self.add_query_param('TransitRouteTableAggregationName', TransitRouteTableAggregationName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_TransitRouteTableAggregationScope(self): # String
return self.get_query_params().get('TransitRouteTableAggregationScope')
def set_TransitRouteTableAggregationScope(self, TransitRouteTableAggregationScope): # String
self.add_query_param('TransitRouteTableAggregationScope', TransitRouteTableAggregationScope)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouteTableId(self): # String
return self.get_query_params().get('TransitRouteTableId')
def set_TransitRouteTableId(self, TransitRouteTableId): # String
self.add_query_param('TransitRouteTableId', TransitRouteTableId)
def get_TransitRouteTableAggregationCidr(self): # String
return self.get_query_params().get('TransitRouteTableAggregationCidr')
def set_TransitRouteTableAggregationCidr(self, TransitRouteTableAggregationCidr): # String
self.add_query_param('TransitRouteTableAggregationCidr', TransitRouteTableAggregationCidr)
| null |
1,821 |
"""
apipkg: control the exported namespace of a Python package.
see https://pypi.python.org/pypi/apipkg
(c) holger krekel, 2009 - MIT license
"""
import os
import sys
from types import ModuleType
from .version import version as __version__ # NOQA:F401
def _py_abspath(path):
"""
special version of abspath
that will leave paths from jython jars alone
"""
if path.startswith("__pyclasspath__"):
return path
else:
return os.path.abspath(path)
def METHOD_NAME(name):
"""try to get the version of the named distribution,
returs None on failure"""
from pkg_resources import get_distribution, DistributionNotFound
try:
dist = get_distribution(name)
except DistributionNotFound:
pass
else:
return dist.version
def initpkg(pkgname, exportdefs, attr=None, eager=False):
""" initialize given package from the export definitions. """
attr = attr or {}
oldmod = sys.modules.get(pkgname)
d = {}
f = getattr(oldmod, "__file__", None)
if f:
f = _py_abspath(f)
d["__file__"] = f
if hasattr(oldmod, "__version__"):
d["__version__"] = oldmod.__version__
if hasattr(oldmod, "__loader__"):
d["__loader__"] = oldmod.__loader__
if hasattr(oldmod, "__path__"):
d["__path__"] = [_py_abspath(p) for p in oldmod.__path__]
if hasattr(oldmod, "__package__"):
d["__package__"] = oldmod.__package__
if "__doc__" not in exportdefs and getattr(oldmod, "__doc__", None):
d["__doc__"] = oldmod.__doc__
d["__spec__"] = getattr(oldmod, "__spec__", None)
d.update(attr)
if hasattr(oldmod, "__dict__"):
oldmod.__dict__.update(d)
mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
sys.modules[pkgname] = mod
# eagerload in bypthon to avoid their monkeypatching breaking packages
if "bpython" in sys.modules or eager:
for module in list(sys.modules.values()):
if isinstance(module, ApiModule):
module.__dict__
return mod
def importobj(modpath, attrname):
"""imports a module, then resolves the attrname on it"""
module = __import__(modpath, None, None, ["__doc__"])
if not attrname:
return module
retval = module
names = attrname.split(".")
for x in names:
retval = getattr(retval, x)
return retval
class ApiModule(ModuleType):
"""the magical lazy-loading module standing"""
def __docget(self):
try:
return self.__doc
except AttributeError:
if "__doc__" in self.__map__:
return self.__makeattr("__doc__")
def __docset(self, value):
self.__doc = value
__doc__ = property(__docget, __docset)
def __init__(self, name, importspec, implprefix=None, attr=None):
self.__name__ = name
self.__all__ = [x for x in importspec if x != "__onfirstaccess__"]
self.__map__ = {}
self.__implprefix__ = implprefix or name
if attr:
for name, val in attr.items():
# print "setting", self.__name__, name, val
setattr(self, name, val)
for name, importspec in importspec.items():
if isinstance(importspec, dict):
subname = "{}.{}".format(self.__name__, name)
apimod = ApiModule(subname, importspec, implprefix)
sys.modules[subname] = apimod
setattr(self, name, apimod)
else:
parts = importspec.split(":")
modpath = parts.pop(0)
attrname = parts and parts[0] or ""
if modpath[0] == ".":
modpath = implprefix + modpath
if not attrname:
subname = "{}.{}".format(self.__name__, name)
apimod = AliasModule(subname, modpath)
sys.modules[subname] = apimod
if "." not in name:
setattr(self, name, apimod)
else:
self.__map__[name] = (modpath, attrname)
def __repr__(self):
repr_list = []
if hasattr(self, "__version__"):
repr_list.append("version=" + repr(self.__version__))
if hasattr(self, "__file__"):
repr_list.append("from " + repr(self.__file__))
if repr_list:
return "<ApiModule {!r} {}>".format(self.__name__, " ".join(repr_list))
return "<ApiModule {!r}>".format(self.__name__)
def __makeattr(self, name):
"""lazily compute value for name or raise AttributeError if unknown."""
# print "makeattr", self.__name__, name
target = None
if "__onfirstaccess__" in self.__map__:
target = self.__map__.pop("__onfirstaccess__")
importobj(*target)()
try:
modpath, attrname = self.__map__[name]
except KeyError:
if target is not None and name != "__onfirstaccess__":
# retry, onfirstaccess might have set attrs
return getattr(self, name)
raise AttributeError(name)
else:
result = importobj(modpath, attrname)
setattr(self, name, result)
try:
del self.__map__[name]
except KeyError:
pass # in a recursive-import situation a double-del can happen
return result
__getattr__ = __makeattr
@property
def __dict__(self):
# force all the content of the module
# to be loaded when __dict__ is read
dictdescr = ModuleType.__dict__["__dict__"]
dict = dictdescr.__get__(self)
if dict is not None:
hasattr(self, "some")
for name in self.__all__:
try:
self.__makeattr(name)
except AttributeError:
pass
return dict
def AliasModule(modname, modpath, attrname=None):
mod = []
def getmod():
if not mod:
x = importobj(modpath, None)
if attrname is not None:
x = getattr(x, attrname)
mod.append(x)
return mod[0]
x = modpath + ("." + attrname if attrname else "")
repr_result = "<AliasModule {!r} for {!r}>".format(modname, x)
class AliasModule(ModuleType):
def __repr__(self):
return repr_result
def __getattribute__(self, name):
try:
return getattr(getmod(), name)
except ImportError:
if modpath == "pytest" and attrname is None:
# hack for pylibs py.test
return None
else:
raise
def __setattr__(self, name, value):
setattr(getmod(), name, value)
def __delattr__(self, name):
delattr(getmod(), name)
return AliasModule(str(modname))
| null |
1,822 |
# Support for filament width sensor
#
# Copyright (C) 2019 Mustafa YILDIZ <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
ADC_REPORT_TIME = 0.500
ADC_SAMPLE_TIME = 0.001
ADC_SAMPLE_COUNT = 8
MEASUREMENT_INTERVAL_MM = 10
class FilamentWidthSensor:
def __init__(self, config):
self.printer = config.get_printer()
self.reactor = self.printer.get_reactor()
self.pin = config.get('pin')
self.nominal_filament_dia = config.getfloat(
'default_nominal_filament_diameter', above=1.0)
self.measurement_delay = config.getfloat('measurement_delay', above=0.)
self.measurement_max_difference = config.getfloat('max_difference',
above=0.)
self.max_diameter = (self.nominal_filament_dia
+ self.measurement_max_difference)
self.min_diameter = (self.nominal_filament_dia
- self.measurement_max_difference)
self.is_active = True
# filament array [position, filamentWidth]
self.filament_array = []
self.lastFilamentWidthReading = 0
# printer objects
self.toolhead = self.ppins = self.mcu_adc = None
self.printer.register_event_handler("klippy:ready", self.METHOD_NAME)
# Start adc
self.ppins = self.printer.lookup_object('pins')
self.mcu_adc = self.ppins.setup_pin('adc', self.pin)
self.mcu_adc.setup_minmax(ADC_SAMPLE_TIME, ADC_SAMPLE_COUNT)
self.mcu_adc.setup_adc_callback(ADC_REPORT_TIME, self.adc_callback)
# extrude factor updating
self.extrude_factor_update_timer = self.reactor.register_timer(
self.extrude_factor_update_event)
# Register commands
self.gcode = self.printer.lookup_object('gcode')
self.gcode.register_command('QUERY_FILAMENT_WIDTH', self.cmd_M407)
self.gcode.register_command('RESET_FILAMENT_WIDTH_SENSOR',
self.cmd_ClearFilamentArray)
self.gcode.register_command('DISABLE_FILAMENT_WIDTH_SENSOR',
self.cmd_M406)
self.gcode.register_command('ENABLE_FILAMENT_WIDTH_SENSOR',
self.cmd_M405)
# Initialization
def METHOD_NAME(self):
# Load printer objects
self.toolhead = self.printer.lookup_object('toolhead')
# Start extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NOW)
def adc_callback(self, read_time, read_value):
# read sensor value
self.lastFilamentWidthReading = round(read_value * 5, 2)
def update_filament_array(self, last_epos):
# Fill array
if len(self.filament_array) > 0:
# Get last reading position in array & calculate next
# reading position
next_reading_position = (self.filament_array[-1][0]
+ MEASUREMENT_INTERVAL_MM)
if next_reading_position <= (last_epos + self.measurement_delay):
self.filament_array.append([last_epos + self.measurement_delay,
self.lastFilamentWidthReading])
else:
# add first item to array
self.filament_array.append([self.measurement_delay + last_epos,
self.lastFilamentWidthReading])
def extrude_factor_update_event(self, eventtime):
# Update extrude factor
pos = self.toolhead.get_position()
last_epos = pos[3]
# Update filament array for lastFilamentWidthReading
self.update_filament_array(last_epos)
# Does filament exists
if self.lastFilamentWidthReading > 0.5:
if len(self.filament_array) > 0:
# Get first position in filament array
pending_position = self.filament_array[0][0]
if pending_position <= last_epos:
# Get first item in filament_array queue
item = self.filament_array.pop(0)
filament_width = item[1]
if ((filament_width <= self.max_diameter)
and (filament_width >= self.min_diameter)):
percentage = round(self.nominal_filament_dia**2
/ filament_width**2 * 100)
self.gcode.run_script("M221 S" + str(percentage))
else:
self.gcode.run_script("M221 S100")
else:
self.gcode.run_script("M221 S100")
self.filament_array = []
if self.is_active:
return eventtime + 1
else:
return self.reactor.NEVER
def cmd_M407(self, gcmd):
response = ""
if self.lastFilamentWidthReading > 0:
response += ("Filament dia (measured mm): "
+ str(self.lastFilamentWidthReading))
else:
response += "Filament NOT present"
gcmd.respond_info(response)
def cmd_ClearFilamentArray(self, gcmd):
self.filament_array = []
gcmd.respond_info("Filament width measurements cleared!")
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
def cmd_M405(self, gcmd):
response = "Filament width sensor Turned On"
if self.is_active:
response = "Filament width sensor is already On"
else:
self.is_active = True
# Start extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NOW)
gcmd.respond_info(response)
def cmd_M406(self, gcmd):
response = "Filament width sensor Turned Off"
if not self.is_active:
response = "Filament width sensor is already Off"
else:
self.is_active = False
# Stop extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NEVER)
# Clear filament array
self.filament_array = []
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
gcmd.respond_info(response)
def load_config(config):
return FilamentWidthSensor(config)
| null |
1,823 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UpdateImageCacheRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, "Eci", "2018-08-08", "UpdateImageCache", "eci")
def METHOD_NAME(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId):
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount):
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId):
self.add_query_param('OwnerId', OwnerId)
def get_ImageCacheId(self):
return self.get_query_params().get("ImageCacheId")
def set_ImageCacheId(self, ImageCacheId):
self.add_query_param("ImageCacheId", ImageCacheId)
def get_SecurityGroupId(self):
return self.get_query_params().get("SecurityGroupId")
def set_SecurityGroupId(self, SecurityGroupId):
self.add_query_param("SecurityGroupId", SecurityGroupId)
def get_VSwitchId(self):
return self.get_query_params().get("VSwitchId")
def set_VSwitchId(self, VSwitchId):
self.add_query_param("VSwitchId", VSwitchId)
def get_ImageCacheName(self):
return self.get_query_params().get("ImageCacheName")
def set_ImageCacheName(self, ImageCacheName):
self.add_query_param("ImageCacheName", ImageCacheName)
def get_EipInstanceId(self):
return self.get_query_params().get("EipInstanceId")
def set_EipInstanceId(self, EipInstanceId):
self.add_query_param("EipInstanceId", EipInstanceId)
def get_ResourceGroupId(self):
return self.get_query_params().get("ResourceGroupId")
def set_ResourceGroupId(self, ResourceGroupId):
self.add_query_param("ResourceGroupId", ResourceGroupId)
def get_ClientToken(self):
return self.get_query_params().get("ClientToken")
def set_ClientToken(self, ClientToken):
self.add_query_param("ClientToken", ClientToken)
def get_ImageCacheSize(self):
return self.get_query_params().get('ImageCacheSize')
def set_ImageCacheSize(self, ImageCacheSize):
self.add_query_param('ImageCacheSize', ImageCacheSize)
def get_RetentionDays(self):
return self.get_query_params().get('RetentionDays')
def set_RetentionDays(self, RetentionDays):
self.add_query_param('RetentionDays', RetentionDays)
def get_AutoMatchImageCache(self):
return self.get_query_params().get('AutoMatchImageCache')
def set_AutoMatchImageCache(self, AutoMatchImageCache):
self.add_query_param('AutoMatchImageCache', AutoMatchImageCache)
def get_ImageRegistryCredentials(self):
return self.get_query_params().get('ImageRegistryCredentials')
def set_ImageRegistryCredentials(self, ImageRegistryCredentials):
for i in range(len(ImageRegistryCredentials)):
if ImageRegistryCredentials[i].get('Server') is not None:
self.add_query_param('ImageRegistryCredential.' + str(i + 1) + '.Server',
ImageRegistryCredentials[i].get('Server'))
if ImageRegistryCredentials[i].get('UserName') is not None:
self.add_query_param('ImageRegistryCredential.' + str(i + 1) + '.UserName',
ImageRegistryCredentials[i].get('UserName'))
if ImageRegistryCredentials[i].get('Password') is not None:
self.add_query_param('ImageRegistryCredential.' + str(i + 1) + '.Password',
ImageRegistryCredentials[i].get('Password'))
def get_AcrRegistryInfos(self):
return self.get_query_params().get('AcrRegistryInfos')
def set_AcrRegistryInfos(self, AcrRegistryInfos):
if AcrRegistryInfos is not None:
for i in range(len(AcrRegistryInfos)):
if AcrRegistryInfos[i].get('Domains') is not None:
for j in range(len(AcrRegistryInfos[i].get('Domains'))):
self.add_query_param('AcrRegistryInfo.' + str(i + 1) + 'Domain.' + str(j + 1),
AcrRegistryInfos[i].get('Domains')[j])
if AcrRegistryInfos[i].get('InstanceName') is not None:
self.add_query_param('AcrRegistryInfo.' + str(i + 1) + '.InstanceName',
AcrRegistryInfos[i].get('InstanceName'))
if AcrRegistryInfos[i].get('InstanceId') is not None:
self.add_query_param('AcrRegistryInfo.' + str(i + 1) + '.InstanceId',
AcrRegistryInfos[i].get('InstanceId'))
if AcrRegistryInfos[i].get('RegionId') is not None:
self.add_query_param('AcrRegistryInfo.' + str(i + 1) + '.RegionId',
AcrRegistryInfos[i].get('RegionId'))
def get_Images(self):
return self.get_query_params().get('Images')
def set_Images(self, Images):
for i in range(len(Images)):
if Images[i] is not None:
self.add_query_param('Image.' + str(i + 1), Images[i])
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for i in range(len(Tags)):
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key', Tags[i].get('Key'))
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value', Tags[i].get('Value'))
def get_Flash(self):
return self.get_query_params().get("Flash")
def set_Flash(self, Flash):
self.add_query_param("Flash", Flash)
def get_Annotations(self):
return self.get_query_params().get("Annotations")
def set_Annotations(self, Annotations):
self.add_query_param("Annotations", Annotations)
| null |
1,824 |
# Copyright 2017 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <[email protected]>
# Copyright 2021 Camptocamp SA (http://www.camptocamp.com)
# @author Simone Orsi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
from odoo.addons.base_sparse_field.models.fields import Serialized
class ShopinvaderImageMixin(models.AbstractModel):
_name = "shopinvader.image.mixin"
_description = "Shopinvader Image Mixin"
_image_field = None
images = Serialized(
compute="_compute_images",
string="Shopinvader Image",
compute_sudo=True,
)
# Tech field to store images data.
# It cannot be computed because the computation
# might required generating thumbs
# which requires access to the storage files
# which requires components registry to be available
# which is not the case when Odoo starts.
images_stored = Serialized()
images_store_hash = fields.Char()
def _compute_images(self):
# Force computation if needed
self.filtered(lambda x: x.METHOD_NAME())._compute_images_stored()
for record in self:
record.images = record.images_stored
def _compute_images_stored(self):
for record in self:
record.images_stored = record._get_image_data_for_record()
record.images_store_hash = record._get_images_store_hash()
def METHOD_NAME(self):
return self.images_store_hash != self._get_images_store_hash()
@property
def _resize_scales_field(self):
return "%s_resize_ids" % self._name.replace(".", "_")
def _resize_scales(self):
return self.backend_id[self._resize_scales_field]
def _get_images_store_hash(self):
self.ensure_one()
if not self[self._image_field]:
return False
return str(hash(self._get_images_store_hash_tuple()))
def _get_images_store_hash_timestamp(self):
"""Get the timestamp of the last modification of the images
This also includes the last modification of their relation or tags records
:return: datetime
"""
images_relation = self[self._image_field]
timestamps = [
*images_relation.mapped("write_date"),
*images_relation.mapped("image_id.write_date"),
]
if "tag_id" in images_relation._fields:
timestamps += images_relation.mapped("tag_id.write_date")
return max(timestamps) if timestamps else False
def _get_images_store_hash_tuple(self):
images = self[self._image_field].image_id
# Get fresh URLs.
# Normally we have only one backend
# but potentially you can have different backends by image record.
# If any base URL changes, images should be recomputed.
# Eg: swap an image to another backend or change the CDN URL.
# NOTE: this is not perfect in terms of perf because it will cause
# calls to `get_or_create_thumbnail` when no image data has changed
# but it's better than having broken URLs.
public_urls = tuple([self._get_image_url(x) for x in images])
resize_scales = tuple(
self._resize_scales().mapped(lambda r: (r.key, r.size_x, r.size_y))
)
timestamp = self._get_images_store_hash_timestamp()
# TODO: any other bit to consider here?
return resize_scales + public_urls + (timestamp,)
def _get_image_url_key(self, image_relation):
# You can inherit this method to change the name of the image of
# your website. By default we use the name of the product or category
# linked to the image processed
# Note the url will be slugify by the get_or_create_thumnail
self.ensure_one()
return self.display_name
def _get_image_data_for_record(self):
self.ensure_one()
res = []
resizes = self._resize_scales()
for image_relation in self[self._image_field]:
url_key = self._get_image_url_key(image_relation)
image_data = {}
for resize in resizes:
thumbnail = image_relation.image_id.get_or_create_thumbnail(
resize.size_x, resize.size_y, url_key=url_key
)
image_data[resize.key] = self._prepare_data_resize(
thumbnail, image_relation
)
res.append(image_data)
return res
def _prepare_data_resize(self, thumbnail, image_relation):
"""Prepare data to fill images serialized field
:param thumbnail: ``storage.thumbnail`` recordset
:param image_relation: ``image.relation.abstract`` recordset
:return: dict
"""
self.ensure_one()
res = {"src": self._get_image_url(thumbnail), "alt": self.name}
if "tag_id" in image_relation._fields:
res["tag"] = image_relation.tag_id.name or ""
return res
def _get_image_url(self, image):
fname = "url" if self.backend_id.image_data_include_cdn_url else "url_path"
return image[fname]
| null |
1,825 |
"""
A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories
https://arxiv.org/pdf/1604.01696.pdf
'Story Cloze Test' (2018) is a commonsense reasoning framework for evaluating story
understanding, story generation, and script learning. This test requires a system
to choose the correct ending to a four-sentence story.
Homepage: https://cs.rochester.edu/nlp/rocstories/
"""
import numpy as np
from lm_eval.base import rf, Task
from lm_eval.metrics import mean
_CITATION = """
@inproceedings{sharma-etal-2018-tackling,
title = "Tackling the Story Ending Biases in The Story Cloze Test",
author = "Sharma, Rishi and
Allen, James and
Bakhshandeh, Omid and
Mostafazadeh, Nasrin",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-2119",
doi = "10.18653/v1/P18-2119",
pages = "752--757",
abstract = "The Story Cloze Test (SCT) is a recent framework for evaluating story comprehension and script learning. There have been a variety of models tackling the SCT so far. Although the original goal behind the SCT was to require systems to perform deep language understanding and commonsense reasoning for successful narrative understanding, some recent models could perform significantly better than the initial baselines by leveraging human-authorship biases discovered in the SCT dataset. In order to shed some light on this issue, we have performed various data analysis and analyzed a variety of top performing models presented for this task. Given the statistics we have aggregated, we have designed a new crowdsourcing scheme that creates a new SCT dataset, which overcomes some of the biases. We benchmark a few models on the new dataset and show that the top-performing model on the original SCT dataset fails to keep up its performance. Our findings further signify the importance of benchmarking NLP systems on various evolving test sets.",
}
"""
class StoryCloze(Task):
VERSION = 0
DATASET_PATH = "story_cloze"
DATASET_NAME = None
def __init__(self, data_dir: str):
"""
StoryCloze is not publicly available. You must download the data by
following https://cs.rochester.edu/nlp/rocstories/ and pass the folder
path into the `data_dir` arg.
"""
super().__init__(data_dir=data_dir)
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
pass
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc):
return " ".join(
[
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
]
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return " ".join(
[
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
]
)
def doc_to_target(self, doc):
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
# `- 1` because the `answer_right_ending` index is 1-based.
return " " + clozes[doc["answer_right_ending"] - 1]
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
lls = [rf.loglikelihood(ctx, " {}".format(choice))[0] for choice in clozes]
return lls
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
gold = doc["answer_right_ending"] - 1
acc = 1.0 if np.argmax(results) == gold else 0.0
return {"acc": acc}
def METHOD_NAME(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {"acc": mean}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {"acc": True}
class StoryCloze2016(StoryCloze):
DATASET_NAME = "2016"
class StoryCloze2018(StoryCloze):
DATASET_NAME = "2018"
| null |
1,826 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecd.endpoint import endpoint_data
class CreateADConnectorOfficeSiteRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ecd', '2020-09-30', 'CreateADConnectorOfficeSite')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_SubDomainDnsAddresss(self): # RepeatList
return self.get_query_params().get('SubDomainDnsAddress')
def set_SubDomainDnsAddresss(self, SubDomainDnsAddress): # RepeatList
for depth1 in range(len(SubDomainDnsAddress)):
self.add_query_param('SubDomainDnsAddress.' + str(depth1 + 1), SubDomainDnsAddress[depth1])
def get_CenOwnerId(self): # Long
return self.get_query_params().get('CenOwnerId')
def set_CenOwnerId(self, CenOwnerId): # Long
self.add_query_param('CenOwnerId', CenOwnerId)
def get_EnableInternetAccess(self): # Boolean
return self.get_query_params().get('EnableInternetAccess')
def set_EnableInternetAccess(self, EnableInternetAccess): # Boolean
self.add_query_param('EnableInternetAccess', EnableInternetAccess)
def get_SubDomainName(self): # String
return self.get_query_params().get('SubDomainName')
def set_SubDomainName(self, SubDomainName): # String
self.add_query_param('SubDomainName', SubDomainName)
def get_DomainPassword(self): # String
return self.get_query_params().get('DomainPassword')
def set_DomainPassword(self, DomainPassword): # String
self.add_query_param('DomainPassword', DomainPassword)
def get_VerifyCode(self): # String
return self.get_query_params().get('VerifyCode')
def set_VerifyCode(self, VerifyCode): # String
self.add_query_param('VerifyCode', VerifyCode)
def get_EnableAdminAccess(self): # Boolean
return self.get_query_params().get('EnableAdminAccess')
def set_EnableAdminAccess(self, EnableAdminAccess): # Boolean
self.add_query_param('EnableAdminAccess', EnableAdminAccess)
def get_Bandwidth(self): # Integer
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self, Bandwidth): # Integer
self.add_query_param('Bandwidth', Bandwidth)
def get_DesktopAccessType(self): # String
return self.get_query_params().get('DesktopAccessType')
def set_DesktopAccessType(self, DesktopAccessType): # String
self.add_query_param('DesktopAccessType', DesktopAccessType)
def get_AdHostname(self): # String
return self.get_query_params().get('AdHostname')
def set_AdHostname(self, AdHostname): # String
self.add_query_param('AdHostname', AdHostname)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_Specification(self): # Long
return self.get_query_params().get('Specification')
def set_Specification(self, Specification): # Long
self.add_query_param('Specification', Specification)
def get_OfficeSiteName(self): # String
return self.get_query_params().get('OfficeSiteName')
def set_OfficeSiteName(self, OfficeSiteName): # String
self.add_query_param('OfficeSiteName', OfficeSiteName)
def get_MfaEnabled(self): # Boolean
return self.get_query_params().get('MfaEnabled')
def set_MfaEnabled(self, MfaEnabled): # Boolean
self.add_query_param('MfaEnabled', MfaEnabled)
def get_DomainUserName(self): # String
return self.get_query_params().get('DomainUserName')
def METHOD_NAME(self, DomainUserName): # String
self.add_query_param('DomainUserName', DomainUserName)
def get_CidrBlock(self): # String
return self.get_query_params().get('CidrBlock')
def set_CidrBlock(self, CidrBlock): # String
self.add_query_param('CidrBlock', CidrBlock)
def get_ProtocolType(self): # String
return self.get_query_params().get('ProtocolType')
def set_ProtocolType(self, ProtocolType): # String
self.add_query_param('ProtocolType', ProtocolType)
def get_DnsAddresss(self): # RepeatList
return self.get_query_params().get('DnsAddress')
def set_DnsAddresss(self, DnsAddress): # RepeatList
for depth1 in range(len(DnsAddress)):
self.add_query_param('DnsAddress.' + str(depth1 + 1), DnsAddress[depth1])
| null |
1,827 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class EditJobTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'EditJobTemplate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self): # String
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self, StderrRedirectPath): # String
self.add_query_param('StderrRedirectPath', StderrRedirectPath)
def get_ClockTime(self): # String
return self.get_query_params().get('ClockTime')
def METHOD_NAME(self, ClockTime): # String
self.add_query_param('ClockTime', ClockTime)
def get_CommandLine(self): # String
return self.get_query_params().get('CommandLine')
def set_CommandLine(self, CommandLine): # String
self.add_query_param('CommandLine', CommandLine)
def get_ArrayRequest(self): # String
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self, ArrayRequest): # String
self.add_query_param('ArrayRequest', ArrayRequest)
def get_UnzipCmd(self): # String
return self.get_query_params().get('UnzipCmd')
def set_UnzipCmd(self, UnzipCmd): # String
self.add_query_param('UnzipCmd', UnzipCmd)
def get_PackagePath(self): # String
return self.get_query_params().get('PackagePath')
def set_PackagePath(self, PackagePath): # String
self.add_query_param('PackagePath', PackagePath)
def get_Mem(self): # String
return self.get_query_params().get('Mem')
def set_Mem(self, Mem): # String
self.add_query_param('Mem', Mem)
def get_StdoutRedirectPath(self): # String
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self, StdoutRedirectPath): # String
self.add_query_param('StdoutRedirectPath', StdoutRedirectPath)
def get_Variables(self): # String
return self.get_query_params().get('Variables')
def set_Variables(self, Variables): # String
self.add_query_param('Variables', Variables)
def get_RunasUser(self): # String
return self.get_query_params().get('RunasUser')
def set_RunasUser(self, RunasUser): # String
self.add_query_param('RunasUser', RunasUser)
def get_ReRunable(self): # Boolean
return self.get_query_params().get('ReRunable')
def set_ReRunable(self, ReRunable): # Boolean
self.add_query_param('ReRunable', ReRunable)
def get_Thread(self): # Integer
return self.get_query_params().get('Thread')
def set_Thread(self, Thread): # Integer
self.add_query_param('Thread', Thread)
def get_TemplateId(self): # String
return self.get_query_params().get('TemplateId')
def set_TemplateId(self, TemplateId): # String
self.add_query_param('TemplateId', TemplateId)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_Gpu(self): # Integer
return self.get_query_params().get('Gpu')
def set_Gpu(self, Gpu): # Integer
self.add_query_param('Gpu', Gpu)
def get_WithUnzipCmd(self): # Boolean
return self.get_query_params().get('WithUnzipCmd')
def set_WithUnzipCmd(self, WithUnzipCmd): # Boolean
self.add_query_param('WithUnzipCmd', WithUnzipCmd)
def get_Node(self): # Integer
return self.get_query_params().get('Node')
def set_Node(self, Node): # Integer
self.add_query_param('Node', Node)
def get_Task(self): # Integer
return self.get_query_params().get('Task')
def set_Task(self, Task): # Integer
self.add_query_param('Task', Task)
def get_InputFileUrl(self): # String
return self.get_query_params().get('InputFileUrl')
def set_InputFileUrl(self, InputFileUrl): # String
self.add_query_param('InputFileUrl', InputFileUrl)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Queue(self): # String
return self.get_query_params().get('Queue')
def set_Queue(self, Queue): # String
self.add_query_param('Queue', Queue)
| null |
1,828 |
# Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Source classes."""
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional, Type, Union
from uuid import UUID
from pydantic import BaseModel, Extra, validator
from zenml.logger import get_logger
if TYPE_CHECKING:
AnyClassMethod = classmethod[Any] # type: ignore[type-arg]
logger = get_logger(__name__)
class SourceType(Enum):
"""Enum representing different types of sources."""
USER = "user"
BUILTIN = "builtin"
INTERNAL = "internal"
DISTRIBUTION_PACKAGE = "distribution_package"
CODE_REPOSITORY = "code_repository"
UNKNOWN = "unknown"
class Source(BaseModel):
"""Source specification.
A source specifies a module name as well as an optional attribute of that
module. These values can be used to import the module and get the value
of the attribute inside the module.
Example:
The source `Source(module="zenml.config.source", attribute="Source")`
references the class that this docstring is describing. This class is
defined in the `zenml.config.source` module and the name of the
attribute is the class name `Source`.
Attributes:
module: The module name.
attribute: Optional name of the attribute inside the module.
type: The type of the source.
"""
module: str
attribute: Optional[str] = None
type: SourceType
@classmethod
def from_import_path(
cls, METHOD_NAME: str, is_module_path: bool = False
) -> "Source":
"""Creates a source from an import path.
Args:
import_path: The import path.
is_module_path: If the import path points to a module or not.
Raises:
ValueError: If the import path is empty.
Returns:
The source.
"""
if not METHOD_NAME:
raise ValueError(
"Invalid empty import path. The import path needs to refer "
"to a Python module and an optional attribute of that module."
)
# Remove internal version pins for backwards compatibility
if "@" in METHOD_NAME:
METHOD_NAME = METHOD_NAME.split("@", 1)[0]
if is_module_path or "." not in METHOD_NAME:
module = METHOD_NAME
attribute = None
else:
module, attribute = METHOD_NAME.rsplit(".", maxsplit=1)
return Source(
module=module, attribute=attribute, type=SourceType.UNKNOWN
)
@property
def METHOD_NAME(self) -> str:
"""The import path of the source.
Returns:
The import path of the source.
"""
if self.attribute:
return f"{self.module}.{self.attribute}"
else:
return self.module
@property
def is_internal(self) -> bool:
"""If the source is internal (=from the zenml package).
Returns:
True if the source is internal, False otherwise
"""
if self.type not in {SourceType.UNKNOWN, SourceType.INTERNAL}:
return False
return self.module.split(".", maxsplit=1)[0] == "zenml"
@property
def is_module_source(self) -> bool:
"""If the source is a module source.
Returns:
If the source is a module source.
"""
return self.attribute is None
class Config:
"""Pydantic config class."""
extra = Extra.allow
class DistributionPackageSource(Source):
"""Source representing an object from a distribution package.
Attributes:
package_name: Name of the package.
version: The package version.
"""
package_name: str
version: Optional[str] = None
type: SourceType = SourceType.DISTRIBUTION_PACKAGE
@validator("type")
def _validate_type(cls, value: SourceType) -> SourceType:
"""Validate the source type.
Args:
value: The source type.
Raises:
ValueError: If the source type is not `DISTRIBUTION_PACKAGE`.
Returns:
The source type.
"""
if value != SourceType.DISTRIBUTION_PACKAGE:
raise ValueError("Invalid source type.")
return value
class CodeRepositorySource(Source):
"""Source representing an object from a code repository.
Attributes:
repository_id: The code repository ID.
commit: The commit.
subdirectory: The subdirectory of the source root inside the code
repository.
"""
repository_id: UUID
commit: str
subdirectory: str
type: SourceType = SourceType.CODE_REPOSITORY
@validator("type")
def _validate_type(cls, value: SourceType) -> SourceType:
"""Validate the source type.
Args:
value: The source type.
Raises:
ValueError: If the source type is not `CODE_REPOSITORY`.
Returns:
The source type.
"""
if value != SourceType.CODE_REPOSITORY:
raise ValueError("Invalid source type.")
return value
def convert_source_validator(*attributes: str) -> "AnyClassMethod":
"""Function to convert pydantic fields containing legacy class paths.
In older versions, sources (sometimes also called class paths) like
`zenml.materializers.BuiltInMaterializer` were stored as strings in our
configuration classes. These strings got replaced by a separate class, and
this function returns a validator to convert those old strings to the new
classes.
Args:
*attributes: List of attributes to convert.
Returns:
Pydantic validator class method to be used on BaseModel subclasses
to convert source fields.
"""
@validator(*attributes, pre=True, allow_reuse=True)
def _convert_source(
cls: Type[BaseModel], value: Union[Source, str, None]
) -> Optional[Source]:
"""Converts an old source string to a source object.
Args:
cls: The class on which the attributes are defined.
value: Source string or object.
Returns:
The converted source.
"""
if isinstance(value, str):
value = Source.from_import_path(value)
return value
return _convert_source
| null |
1,829 |
# coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from evadb.parser.create_function_statement import CreateFunctionStatement
from evadb.parser.create_statement import CreateDatabaseStatement, CreateTableStatement
from evadb.parser.drop_object_statement import DropObjectStatement
from evadb.parser.explain_statement import ExplainStatement
from evadb.parser.insert_statement import InsertTableStatement
from evadb.parser.load_statement import LoadDataStatement
from evadb.parser.parser import Parser
from evadb.parser.rename_statement import RenameTableStatement
from evadb.parser.select_statement import SelectStatement
from evadb.parser.show_statement import ShowStatement
from evadb.parser.types import ObjectType
from evadb.parser.use_statement import UseStatement
# List of statements for which we omit binder and optimizer and pass the statement
# directly to the executor.
SKIP_BINDER_AND_OPTIMIZER_STATEMENTS = (CreateDatabaseStatement, UseStatement)
def parse_expression(expr: str):
mock_query = f"SELECT {expr} FROM DUMMY;"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, SelectStatement), "Expected a select statement"
return stmt.target_list
def parse_predicate_expression(expr: str):
mock_query = f"SELECT * FROM DUMMY WHERE {expr};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, SelectStatement), "Expected a select statement"
return stmt.where_clause
def parse_table_clause(expr: str, chunk_size: int = None, chunk_overlap: int = None):
mock_query_parts = [f"SELECT * FROM {expr}"]
if chunk_size:
mock_query_parts.append(f"CHUNK_SIZE {chunk_size}")
if chunk_overlap:
mock_query_parts.append(f"CHUNK_OVERLAP {chunk_overlap}")
mock_query_parts.append(";")
mock_query = " ".join(mock_query_parts)
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, SelectStatement), "Expected a select statement"
assert stmt.from_table.is_table_atom
return stmt.from_table
def parse_create_function(
function_name: str,
if_not_exists: bool,
function_file_path: str,
type: str,
**kwargs,
):
mock_query = (
f"CREATE FUNCTION IF NOT EXISTS {function_name}"
if if_not_exists
else f"CREATE FUNCTION {function_name}"
)
if type is not None:
mock_query += f" TYPE {type}"
task, model = kwargs["task"], kwargs["model"]
if task is not None and model is not None:
mock_query += f" TASK '{task}' MODEL '{model}'"
else:
mock_query += f" IMPL '{function_file_path}'"
mock_query += ";"
stmt = Parser().parse(mock_query)[0]
assert isinstance(
stmt, CreateFunctionStatement
), "Expected a create function statement"
return stmt
def parse_create_table(table_name: str, if_not_exists: bool, columns: str, **kwargs):
mock_query = (
f"CREATE TABLE IF NOT EXISTS {table_name} ({columns});"
if if_not_exists
else f"CREATE TABLE {table_name} ({columns});"
)
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, CreateTableStatement), "Expected a create table statement"
return stmt
def parse_show(show_type: str, **kwargs):
mock_query = f"SHOW {show_type};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, ShowStatement), "Expected a show statement"
return stmt
def parse_explain(query: str, **kwargs):
mock_query = f"EXPLAIN {query};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, ExplainStatement), "Expected a explain statement"
return stmt
def parse_insert(table_name: str, columns: str, values: str, **kwargs):
mock_query = f"INSERT INTO {table_name} {columns} VALUES {values};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, InsertTableStatement), "Expected a insert statement"
return stmt
def parse_load(table_name: str, file_regex: str, format: str, **kwargs):
mock_query = f"LOAD {format.upper()} '{file_regex}' INTO {table_name};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, LoadDataStatement), "Expected a load statement"
return stmt
def parse_drop(object_type: ObjectType, name: str, if_exists: bool):
mock_query = f"DROP {object_type}"
mock_query = (
f" {mock_query} IF EXISTS {name} " if if_exists else f"{mock_query} {name}"
)
mock_query += ";"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, DropObjectStatement), "Expected a drop object statement"
return stmt
def parse_drop_table(table_name: str, if_exists: bool):
return parse_drop(ObjectType.TABLE, table_name, if_exists)
def parse_drop_function(function_name: str, if_exists: bool):
return parse_drop(ObjectType.FUNCTION, function_name, if_exists)
def parse_drop_index(index_name: str, if_exists: bool):
return parse_drop(ObjectType.INDEX, index_name, if_exists)
def parse_drop_database(database_name: str, if_exists: bool):
return parse_drop(ObjectType.DATABASE, database_name, if_exists)
def parse_query(query):
stmt = Parser().parse(query)
assert len(stmt) == 1
return stmt[0]
def METHOD_NAME(expr: str, alias: str):
mock_query = f"SELECT * FROM DUMMY JOIN LATERAL {expr} AS {alias};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, SelectStatement), "Expected a select statement"
assert stmt.from_table.is_join()
return stmt.from_table.join_node.right
def parse_create_vector_index(index_name: str, table_name: str, expr: str, using: str):
mock_query = f"CREATE INDEX {index_name} ON {table_name} ({expr}) USING {using};"
stmt = Parser().parse(mock_query)[0]
return stmt
def parse_sql_orderby_expr(expr: str):
mock_query = f"SELECT * FROM DUMMY ORDER BY {expr};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, SelectStatement), "Expected a select statement"
return stmt.orderby_list
def parse_rename(old_name: str, new_name: str):
mock_query = f"RENAME TABLE {old_name} TO {new_name};"
stmt = Parser().parse(mock_query)[0]
assert isinstance(stmt, RenameTableStatement), "Expected a rename statement"
return stmt
| null |
1,830 |
# Copyright (c) 2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from os import path
import m5
from m5.objects import *
m5.util.addToPath("../..")
from common import SysPaths
class ArmSstSystem(ArmSystem):
def __init__(self, cpu_clock_rate, **kwargs):
super(ArmSstSystem, self).__init__(**kwargs)
self.voltage_domain = VoltageDomain(voltage="1.0V")
self.clk_domain = SrcClockDomain(
clock=cpu_clock_rate, voltage_domain=Parent.voltage_domain
)
self.terminal = Terminal()
self.vncserver = VncServer()
self.iobus = IOXBar()
# Since the latency from CPU to the bus was set in SST,
# additional latency is undesirable.
self.membus = NoncoherentXBar(
frontend_latency=0,
forward_latency=0,
response_latency=0,
header_latency=0,
width=64,
)
self.membus.badaddr_responder = BadAddr()
self.membus.default = self.membus.badaddr_responder.pio
_my_ranges = [
AddrRange(0, size="64MiB"),
AddrRange(0x80000000, size="16GiB"),
]
self.memory_outgoing_bridge = OutgoingRequestBridge(
physical_address_ranges=_my_ranges
)
self.memory_outgoing_bridge.port = self.membus.mem_side_ports
self.cpu = [TimingSimpleCPU(cpu_id=0)]
self.mem_mode = "timing"
for cpu in self.cpu:
cpu.createThreads()
cpu.icache_port = self.membus.cpu_side_ports
cpu.dcache_port = self.membus.cpu_side_ports
cpu.mmu.connectWalkerPorts(
self.membus.cpu_side_ports, self.membus.cpu_side_ports
)
self.bridge = Bridge(delay="50ns")
self.bridge.mem_side_port = self.iobus.cpu_side_ports
self.bridge.cpu_side_port = self.membus.mem_side_ports
def METHOD_NAME(self, mem_size):
"""
Define system memory ranges. This depends on the physical
memory map provided by the realview platform and by the memory
size provided by the user (mem_size argument).
The method is iterating over all platform ranges until they cover
the entire user's memory requirements.
"""
mem_ranges = []
for mem_range in self.platform._mem_regions:
size_in_range = min(mem_size, mem_range.size())
mem_ranges.append(
AddrRange(start=mem_range.start, size=size_in_range)
)
mem_size -= size_in_range
if mem_size == 0:
return mem_ranges
raise ValueError("memory size too big for platform capabilities")
def createArmPlatform(system):
class VExpress_GEM5_V1_SST(VExpress_GEM5_V1):
bootmem = SubSystem()
system.platform = VExpress_GEM5_V1_SST()
if hasattr(system.platform.gic, "cpu_addr"):
system.gic_cpu_addr = system.platform.gic.cpu_addr
system.platform.attachOnChipIO(system.membus, system.bridge)
system.platform.attachIO(system.iobus)
system.platform.setupBootLoader(system, SysPaths.binary)
parser = argparse.ArgumentParser()
parser.add_argument("--kernel", help="Path to the Kernel")
parser.add_argument(
"--cpu-clock-rate", type=str, help="CPU clock rate, e.g. 3GHz"
)
parser.add_argument("--memory-size", type=str, help="Memory size, e.g. 4GiB")
parser.add_argument("--root-device", type=str, default="/dev/vda")
args = parser.parse_args()
system = ArmSstSystem(args.cpu_clock_rate)
# Setup Linux workload
system.workload = ArmFsLinux()
system.workload.object_file = args.kernel
system.workload.dtb_filename = path.join(m5.options.outdir, "system.dtb")
system.workload.addr_check = False
# Create RealView platform
createArmPlatform(system)
system.mem_ranges = system.METHOD_NAME(int(Addr(args.memory_size)))
system.system_outgoing_bridge = OutgoingRequestBridge()
system.system_port = system.system_outgoing_bridge.port
system.generateDtb(system.workload.dtb_filename)
# Linux boot command flags
kernel_cmd = [
# Tell Linux to use the simulated serial port as a console
"console=ttyAMA0",
# Hard-code timi
"lpj=19988480",
# Disable address space randomisation to get a consistent
# memory layout.
"norandmaps",
# Tell Linux where to find the root disk image.
f"root={args.root_device}",
# Mount the root disk read-write by default.
"rw",
# Tell Linux about the amount of physical memory present.
f"mem={args.memory_size}",
]
system.workload.command_line = " ".join(kernel_cmd)
for cpu in system.cpu:
cpu.createInterruptController()
root = Root(full_system=True, system=system)
| null |
1,831 |
from django import forms
from django.urls import reverse
from django.utils.translation import gettext as _
from creme.creme_core.core.setting_key import SettingKey, setting_key_registry
from creme.creme_core.models import SettingValue
from creme.creme_core.tests.base import CremeTestCase
# TODO: clean registry in teardDown...
class SettingTestCase(CremeTestCase):
@staticmethod
def _build_edit_url(setting_value):
return reverse('creme_config__edit_setting', args=(setting_value.id,))
def test_edit_string(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_string', description='Page title',
app_label='persons', type=SettingKey.STRING, hidden=False,
)
setting_key_registry.register(sk)
title = 'May the source be with you'
sv = SettingValue(key=sk)
sv.value = title
sv.save()
url = self._build_edit_url(sv)
response1 = self.assertGET200(url)
self.assertTemplateUsed(response1, 'creme_core/generics/blockform/edit-popup.html')
ctxt1 = response1.context
self.assertEqual(_('Edit «{key}»').format(key=sk.description), ctxt1.get('title'))
self.assertEqual(_('Save the modifications'), ctxt1.get('submit_label'))
with self.assertNoException():
value_f1 = ctxt1['form'].fields['value']
self.assertIsInstance(value_f1, forms.CharField)
self.assertIsInstance(value_f1.widget, forms.Textarea)
self.assertEqual(title, value_f1.initial)
# ---
title = title.upper()
self.assertNoFormError(self.client.post(url, data={'value': title}))
self.assertEqual(title, self.refresh(sv).value)
def METHOD_NAME(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_int', description='Page size',
app_label='persons', type=SettingKey.INT,
)
setting_key_registry.register(sk)
size = 156
sv = SettingValue(key=sk)
sv.value = size
sv.save()
size += 15
self.assertNoFormError(self.client.post(self._build_edit_url(sv), data={'value': size}))
self.assertEqual(size, self.refresh(sv).value)
def test_edit_bool(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_bool', description='Display logo ?',
app_label='persons', type=SettingKey.BOOL,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = True
sv.save()
# False -> empty POST
self.assertNoFormError(self.client.post(self._build_edit_url(sv), data={}))
self.assertFalse(self.refresh(sv).value)
def test_edit_hour(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_hour', description='Reminder hour',
app_label='persons', type=SettingKey.HOUR,
)
setting_key_registry.register(sk)
hour = 11
sv = SettingValue(key=sk)
sv.value = hour
sv.save()
url = self._build_edit_url(sv)
hour += 1
self.assertNoFormError(self.client.post(url, data={'value': hour}))
self.assertEqual(hour, self.refresh(sv).value)
response1 = self.assertPOST200(url, data={'value': 24})
self.assertFormError(
response1.context['form'],
field='value',
errors=_('Ensure this value is less than or equal to %(limit_value)s.') % {
'limit_value': 23,
},
)
# ---
response2 = self.assertPOST200(url, data={'value': -1})
self.assertFormError(
response2.context['form'],
field='value',
errors=_('Ensure this value is greater than or equal to %(limit_value)s.') % {
'limit_value': 0,
},
)
def test_edit_email(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_email', description='Campaign Sender',
app_label='persons', type=SettingKey.EMAIL,
)
setting_key_registry.register(sk)
email = '[email protected]'
sv = SettingValue(key=sk)
sv.value = email
sv.save()
url = self._build_edit_url(sv)
response = self.assertPOST200(url, data={'value': 42})
self.assertFormError(
response.context['form'],
field='value', errors=_('Enter a valid email address.'),
)
email = '[email protected]'
self.assertNoFormError(self.client.post(url, data={'value': email}))
self.assertEqual(email, self.refresh(sv).value)
def test_edit_hidden01(self):
"Hidden => not editable (value=True)"
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_hidden01', description='Display logo ?',
app_label='persons', type=SettingKey.BOOL, hidden=True,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = True
sv.save()
self.assertGET409(self._build_edit_url(sv))
def test_edit_hidden02(self):
"Hidden => not editable (value=False)."
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_hidden02', description='Display logo ?',
app_label='persons', type=SettingKey.BOOL, hidden=True,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = False
sv.save()
self.assertGET409(self._build_edit_url(sv))
def test_edit_blank01(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_blank01', description='API key',
app_label='persons', type=SettingKey.STRING,
blank=True,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = '123-456-abc'
sv.save()
self.assertNoFormError(self.client.post(self._build_edit_url(sv), data={'value': ''}))
sv = self.refresh(sv)
self.assertEqual('', sv.value_str)
self.assertIsNone(sv.value)
def test_edit_blank02(self):
self.login_as_root()
sk = SettingKey(
id='persons-test_edit_blank02', description='API key',
app_label='persons', type=SettingKey.INT,
blank=True,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = 12345
sv.save()
self.assertNoFormError(self.client.post(self._build_edit_url(sv)))
sv = self.refresh(sv)
self.assertEqual('', sv.value_str)
self.assertIsNone(sv.value)
# ---
self.assertNoFormError(self.client.post(self._build_edit_url(sv), data={'value': ''}))
sv = self.refresh(sv)
self.assertEqual('', sv.value_str)
self.assertIsNone(sv.value)
def test_edit_app_perm01(self):
self.login_as_standard(admin_4_apps=['creme_core'])
sk = SettingKey(
id='creme_core-test_edit_app_perm01', description='Page title',
app_label='creme_core', type=SettingKey.STRING, hidden=False,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = 'May the source be with you'
sv.save()
self.assertGET200(self._build_edit_url(sv))
def test_edit_app_perm02(self):
"No app perm => error."
self.login_as_standard()
sk = SettingKey(
id='creme_core-test_edit_app_perm02', description='Page title',
app_label='creme_core', type=SettingKey.STRING, hidden=False,
)
setting_key_registry.register(sk)
sv = SettingValue(key=sk)
sv.value = 'May the source be with you'
sv.save()
self.assertGET403(self._build_edit_url(sv))
| null |
1,832 |
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The following code derives from packaging 21.3 before the LegacyVersion class
# was removed: https://github.com/pypa/packaging/blob/21.3/packaging/version.py
import re
from typing import (
Iterator,
List,
Tuple,
Union,
)
from packaging.version import (
_BaseVersion,
InvalidVersion,
Version,
)
__all__ = ["parse_version", "LegacyVersion"]
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
def parse_version(version: str) -> Union["LegacyVersion", Version]:
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class LegacyVersion(_BaseVersion):
def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self) -> str:
return self._version
def __repr__(self) -> str:
return f"<LegacyVersion('{self}')>"
@property
def public(self) -> str:
return self._version
@property
def base_version(self) -> str:
return self._version
@property
def epoch(self) -> int:
return -1
@property
def release(self) -> None:
return None
@property
def pre(self) -> None:
return None
@property
def post(self) -> None:
return None
@property
def dev(self) -> None:
return None
@property
def local(self) -> None:
return None
@property
def is_prerelease(self) -> bool:
return False
@property
def is_postrelease(self) -> bool:
return False
@property
def METHOD_NAME(self) -> bool:
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s: str) -> Iterator[str]:
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
return epoch, tuple(parts)
| null |
1,833 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkarms.endpoint import endpoint_data
class SearchTracesByPageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ARMS', '2019-08-08', 'SearchTracesByPage','arms')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EndTime(self): # Long
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # Long
self.add_query_param('EndTime', EndTime)
def get_Pid(self): # String
return self.get_query_params().get('Pid')
def set_Pid(self, Pid): # String
self.add_query_param('Pid', Pid)
def get_StartTime(self): # Long
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # Long
self.add_query_param('StartTime', StartTime)
def get_Reverse(self): # Boolean
return self.get_query_params().get('Reverse')
def set_Reverse(self, Reverse): # Boolean
self.add_query_param('Reverse', Reverse)
def get_MinDuration(self): # Long
return self.get_query_params().get('MinDuration')
def set_MinDuration(self, MinDuration): # Long
self.add_query_param('MinDuration', MinDuration)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_IsError(self): # Boolean
return self.get_query_params().get('IsError')
def METHOD_NAME(self, IsError): # Boolean
self.add_query_param('IsError', IsError)
def get_Tagss(self): # RepeatList
return self.get_query_params().get('Tags')
def set_Tagss(self, Tags): # RepeatList
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_ServiceIp(self): # String
return self.get_query_params().get('ServiceIp')
def set_ServiceIp(self, ServiceIp): # String
self.add_query_param('ServiceIp', ServiceIp)
def get_ExclusionFilterss(self): # RepeatList
return self.get_query_params().get('ExclusionFilters')
def set_ExclusionFilterss(self, ExclusionFilters): # RepeatList
for depth1 in range(len(ExclusionFilters)):
if ExclusionFilters[depth1].get('Value') is not None:
self.add_query_param('ExclusionFilters.' + str(depth1 + 1) + '.Value', ExclusionFilters[depth1].get('Value'))
if ExclusionFilters[depth1].get('Key') is not None:
self.add_query_param('ExclusionFilters.' + str(depth1 + 1) + '.Key', ExclusionFilters[depth1].get('Key'))
def get_OperationName(self): # String
return self.get_query_params().get('OperationName')
def set_OperationName(self, OperationName): # String
self.add_query_param('OperationName', OperationName)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ServiceName(self): # String
return self.get_query_params().get('ServiceName')
def set_ServiceName(self, ServiceName): # String
self.add_query_param('ServiceName', ServiceName)
| null |
1,834 |
from __future__ import print_function
import IMP
import IMP.test
import IMP.domino
import IMP.core
class TrivialParticleStates(IMP.domino.ParticleStates):
def __init__(self, n):
IMP.domino.ParticleStates.__init__(self)
self.n = n
def get_number_of_particle_states(self):
return self.n
def load_state(self, i, p):
pass
def do_show(self, stream):
pass
class Tests(IMP.test.TestCase):
def _test_basic(self, nm):
"""Testing default subset states"""
m = IMP.Model()
ps = []
ns = 5
np = 4
for i in range(0, np):
ps.append(IMP.Particle(m))
pst = IMP.domino.ParticleStatesTable()
pft = IMP.domino.ExclusionSubsetFilterTable(pst)
dsst = nm(pst, [pft])
for p in ps:
pst.set_particle_states(p, TrivialParticleStates(ns))
lsc = IMP.domino.Subset(ps)
pss = IMP.domino.PackedAssignmentContainer()
dsst.load_assignments(lsc, pss)
ss = pss.get_assignments((0, pss.get_number_of_assignments()))
self.assertEqual(len(ss), ns ** len(ps))
all_states = []
for state in ss:
# print state
# print all_states
self.assertNotIn(state, all_states)
all_states.append(state)
def _test_equivalencies(self, nm):
"""Testing default subset states with equivalencies"""
m = IMP.Model()
ps = []
ns = 5
np = 4
for i in range(0, np):
ps.append(IMP.Particle(m))
pst = IMP.domino.ParticleStatesTable()
tps = TrivialParticleStates(ns)
pst.set_particle_states(ps[0], tps)
pst.set_particle_states(ps[1], tps)
for p in ps[2:]:
pst.set_particle_states(p, TrivialParticleStates(ns))
pft = IMP.domino.ExclusionSubsetFilterTable(pst)
dsst = nm(pst, [pft])
lsc = IMP.domino.Subset(ps)
IMP.set_log_level(IMP.SILENT)
pss = IMP.domino.PackedAssignmentContainer()
dsst.load_assignments(lsc, pss)
ss = pss.get_assignments((0, pss.get_number_of_assignments()))
self.assertEqual(len(ss), ns ** (len(ps) - 2) * (ns) * (ns - 1))
all_states = []
print("testing")
for state in ss:
# print state
# print all_states
self.assertNotIn(state, all_states)
all_states.append(state)
def _test_explicit(self, nm):
"""Testing default subset states with explicit equivalencies"""
m = IMP.Model()
ps = []
ns = 5
np = 4
for i in range(0, np):
ps.append(IMP.Particle(m))
pst = IMP.domino.ParticleStatesTable()
tps = TrivialParticleStates(ns)
pst.set_particle_states(ps[0], tps)
pst.set_particle_states(ps[1], tps)
for p in ps[2:]:
pst.set_particle_states(p, TrivialParticleStates(ns))
pft = IMP.domino.ExclusionSubsetFilterTable()
pft.add_pair((ps[0], ps[1]))
dsst = nm(pst, [pft])
lsc = IMP.domino.Subset(ps)
IMP.set_log_level(IMP.SILENT)
pss = IMP.domino.PackedAssignmentContainer()
dsst.load_assignments(lsc, pss)
ss = pss.get_assignments((0, pss.get_number_of_assignments()))
self.assertEqual(len(ss), ns ** (len(ps) - 2) * (ns) * (ns - 1))
all_states = []
print("testing")
for state in ss:
# print state
# print all_states
self.assertNotIn(state, all_states)
all_states.append(state)
def test_bandb(self):
"""Test branch and bound subset states"""
self._test_basic(IMP.domino.BranchAndBoundAssignmentsTable)
self._test_equivalencies(IMP.domino.BranchAndBoundAssignmentsTable)
self._test_explicit(IMP.domino.BranchAndBoundAssignmentsTable)
def test_simple(self):
"""Test simple subset states"""
self._test_basic(IMP.domino.SimpleAssignmentsTable)
self._test_equivalencies(IMP.domino.SimpleAssignmentsTable)
self._test_explicit(IMP.domino.SimpleAssignmentsTable)
def METHOD_NAME(self):
"""Test recursive subset states"""
self._test_basic(IMP.domino.RecursiveAssignmentsTable)
self._test_equivalencies(IMP.domino.RecursiveAssignmentsTable)
self._test_explicit(IMP.domino.RecursiveAssignmentsTable)
if __name__ == '__main__':
IMP.test.main()
| null |
1,835 |
import random
from tempfile import TemporaryDirectory
import pytest
from pytest import mark
from lhotse import CutSet
from lhotse.audio import RecordingSet
from lhotse.features import FeatureSet
from lhotse.manipulation import combine
from lhotse.supervision import SupervisionSet
from lhotse.testing.dummies import DummyManifest, as_lazy
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_split_even(manifest_type):
manifest = DummyManifest(manifest_type, begin_id=0, end_id=100)
manifest_subsets = manifest.split(num_splits=2)
assert len(manifest_subsets) == 2
assert manifest_subsets[0] == DummyManifest(manifest_type, begin_id=0, end_id=50)
assert manifest_subsets[1] == DummyManifest(manifest_type, begin_id=50, end_id=100)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_split_randomize(manifest_type):
manifest = DummyManifest(manifest_type, begin_id=0, end_id=100)
manifest_subsets = manifest.split(num_splits=2, shuffle=True)
assert len(manifest_subsets) == 2
recombined_items = list(manifest_subsets[0]) + list(manifest_subsets[1])
assert len(recombined_items) == len(manifest)
# Different ordering (we convert to lists first because the *Set classes might internally
# re-order after concatenation, e.g. by using dict or post-init sorting)
assert recombined_items != list(manifest)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
@mark.parametrize("drop_last", [True, False])
def test_split_odd_1(manifest_type, drop_last):
manifest = DummyManifest(manifest_type, begin_id=0, end_id=100)
manifest_subsets = manifest.split(num_splits=3, drop_last=drop_last)
assert len(manifest_subsets) == 3
if drop_last:
assert manifest_subsets[0] == DummyManifest(
manifest_type, begin_id=0, end_id=33
)
assert manifest_subsets[1] == DummyManifest(
manifest_type, begin_id=33, end_id=66
)
assert manifest_subsets[2] == DummyManifest(
manifest_type, begin_id=66, end_id=99
)
else:
assert manifest_subsets[0] == DummyManifest(
manifest_type, begin_id=0, end_id=34
)
assert manifest_subsets[1] == DummyManifest(
manifest_type, begin_id=34, end_id=67
)
assert manifest_subsets[2] == DummyManifest(
manifest_type, begin_id=67, end_id=100
)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
@mark.parametrize("drop_last", [True, False])
def test_split_odd_2(manifest_type, drop_last):
manifest = DummyManifest(manifest_type, begin_id=0, end_id=32)
manifest_subsets = manifest.split(num_splits=3, drop_last=drop_last)
assert len(manifest_subsets) == 3
if drop_last:
assert manifest_subsets[0] == DummyManifest(
manifest_type, begin_id=0, end_id=10
)
assert manifest_subsets[1] == DummyManifest(
manifest_type, begin_id=10, end_id=20
)
assert manifest_subsets[2] == DummyManifest(
manifest_type, begin_id=20, end_id=30
)
else:
assert manifest_subsets[0] == DummyManifest(
manifest_type, begin_id=0, end_id=11
)
assert manifest_subsets[1] == DummyManifest(
manifest_type, begin_id=11, end_id=22
)
assert manifest_subsets[2] == DummyManifest(
manifest_type, begin_id=22, end_id=32
)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_cannot_split_to_more_chunks_than_items(manifest_type):
manifest = DummyManifest(manifest_type, begin_id=0, end_id=1)
with pytest.raises(ValueError):
manifest.split(num_splits=10)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def METHOD_NAME(manifest_type):
with TemporaryDirectory() as d:
manifest = DummyManifest(manifest_type, begin_id=0, end_id=100)
manifest_subsets = manifest.split_lazy(output_dir=d, chunk_size=49)
assert len(manifest_subsets) == 3
assert list(manifest_subsets[0]) == list(
DummyManifest(manifest_type, begin_id=0, end_id=49)
)
assert list(manifest_subsets[1]) == list(
DummyManifest(manifest_type, begin_id=49, end_id=98)
)
assert list(manifest_subsets[2]) == list(
DummyManifest(manifest_type, begin_id=98, end_id=100)
)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_combine(manifest_type):
expected = DummyManifest(manifest_type, begin_id=0, end_id=200)
combined = combine(
DummyManifest(manifest_type, begin_id=0, end_id=68),
DummyManifest(manifest_type, begin_id=68, end_id=136),
DummyManifest(manifest_type, begin_id=136, end_id=200),
)
assert combined.to_eager() == expected
combined_iterable = combine(
[
DummyManifest(manifest_type, begin_id=0, end_id=68),
DummyManifest(manifest_type, begin_id=68, end_id=136),
DummyManifest(manifest_type, begin_id=136, end_id=200),
]
)
assert combined_iterable.to_eager() == expected
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_subset_first(manifest_type):
any_set = DummyManifest(manifest_type, begin_id=0, end_id=200)
expected = DummyManifest(manifest_type, begin_id=0, end_id=10)
subset = any_set.subset(first=10)
assert subset == expected
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
def test_subset_last(manifest_type):
any_set = DummyManifest(manifest_type, begin_id=0, end_id=200)
expected = DummyManifest(manifest_type, begin_id=190, end_id=200)
subset = any_set.subset(last=10)
assert subset == expected
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, FeatureSet, CutSet])
@mark.parametrize(["first", "last"], [(None, None), (10, 10)])
def test_subset_raises(manifest_type, first, last):
any_set = DummyManifest(manifest_type, begin_id=0, end_id=200)
with pytest.raises(AssertionError):
subset = any_set.subset(first=first, last=last)
@mark.parametrize("manifest_type", [RecordingSet, SupervisionSet, CutSet])
@mark.parametrize("rng", [None, random.Random(1337)])
def test_shuffle(manifest_type, rng):
any_set = DummyManifest(manifest_type, begin_id=0, end_id=200)
shuffled = any_set.shuffle(rng=rng)
assert list(any_set.ids) != list(shuffled.ids)
assert set(any_set.ids) == set(shuffled.ids)
| null |
1,836 |
import copy
import imghdr
import json
import struct
import click
import cv2
import flask
import numpy as np
import METHOD_NAME as img
app = flask.Flask(__name__, static_folder='static',
static_url_path='/adder/static')
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/config.json')
def config_json():
return flask.jsonify({
'gridImageSize': app.config['arguments']['grid_image_size'],
'existingAnnotationColor': app.config['arguments']['existing_annotation_color'],
'addedAnnotationColor': app.config['arguments']['added_annotation_color'],
})
@app.route('/existing-annotations.json')
def existing_annotations_json():
return flask.jsonify(app.config['existing_annotations'])
@app.route('/added-annotations.json')
def added_annotations_json():
return flask.jsonify(app.config['added_annotations'])
@app.route('/image_size/<int:index>')
def image_size(index):
if index < 0:
return flask.make_response('index cannot be negative', 404)
if index >= len(app.config['existing_annotations'].keys()):
return flask.make_response('index cannot be larger than amount of paths', 404)
METHOD_NAME = sorted(app.config['existing_annotations'].keys())[index]
width, height = img.size(METHOD_NAME)
return flask.jsonify({
'width': width,
'height': height,
})
@app.route('/image/<int:index>')
def METHOD_NAME(index):
if index < 0:
return flask.make_response('index cannot be negative', 404)
if index >= len(app.config['existing_annotations'].keys()):
return flask.make_response('index cannot be larger than amount of images', 404)
METHOD_NAME = sorted(app.config['existing_annotations'].keys())[index]
METHOD_NAME = img.read(METHOD_NAME)
METHOD_NAME = img.convert_color_space(
METHOD_NAME,
source_color_space=app.config['arguments']['color_space'],
target_color_space='RGB',
)
if 'crop' in flask.request.args:
try:
center_x = float(flask.request.args['centerX'])
center_y = float(flask.request.args['centerY'])
radius = float(flask.request.args['radius'])
except (KeyError, ValueError):
return flask.make_response('centerX, centerY, or radius missing or malformed', 404)
upper_left = np.array([[center_x - radius],
[center_y - radius]])
lower_right = np.array([[center_x + radius],
[center_y + radius]])
METHOD_NAME = img.crop(
METHOD_NAME,
upper_left,
lower_right,
[
app.config['arguments']['default_gray'],
app.config['arguments']['default_gray'],
app.config['arguments']['default_gray'],
],
)
if 'scale' in flask.request.args:
try:
scale_width = int(
flask.request.args['width']) if 'width' in flask.request.args else None
scale_height = int(
flask.request.args['height']) if 'height' in flask.request.args else None
except ValueError:
return flask.make_response('width or height malformed', 404)
if scale_width is None and scale_height is None:
return flask.make_response('width and height missing', 404)
if scale_width is None:
scale_width = int(scale_height / METHOD_NAME.shape[0] * METHOD_NAME.shape[1])
if scale_height is None:
scale_height = int(scale_width / METHOD_NAME.shape[1] * METHOD_NAME.shape[0])
size = np.array([[scale_width],
[scale_height]])
METHOD_NAME = img.resize(METHOD_NAME, size)
response = flask.make_response(img.encode(METHOD_NAME, 'png'))
response.headers['Content-Type'] = 'image/png'
return response
def write_output_annotation_files():
with open(app.config['arguments']['output_annotations_file'], 'w') as f:
json.dump(app.config['added_annotations'], f, sort_keys=True, indent=4)
f.write('\n')
@app.route('/set_added/<int:index>', methods=['POST'])
def set_added(index):
if index < 0:
return flask.make_response('index cannot be negative', 404)
if index >= len(app.config['added_annotations'].keys()):
return flask.make_response('index cannot be larger than amount of images', 404)
METHOD_NAME = sorted(app.config['added_annotations'].keys())[index]
# set added circles
app.config['added_annotations'][METHOD_NAME] = flask.request.json
write_output_annotation_files()
return flask.jsonify({'ok': True})
@app.route('/telemetry/<string:id>', methods=['POST'])
def telemetry(id):
if not id.isalnum():
return flask.make_response('Malformed ID', 400)
with open(f'{app.config["arguments"]["output_annotations_file"]}.{id}.telemetry', 'a') as f:
for message in flask.request.json:
json.dump(message, f)
f.write('\n')
return flask.make_response('Ok', 200)
@click.command()
@click.option('--debug', is_flag=True, help='Run server in debug/development mode which enables hot reloading of the application')
@click.option('--host', default='localhost', help='Hostname to listen on, set this to \'0.0.0.0\' to have the server available externally as well', show_default=True)
@click.option('--port', default=5000, help='Port of the webserver', show_default=True)
@click.option('--crop-scale-factor', default=1.5, help='Scale factor when cropping annotations', show_default=True)
@click.option('--color-space', type=click.Choice(['YCbCr', 'RGB', 'Grayscale'], case_sensitive=False), default='YCbCr', help='Color space of raw images', show_default=True)
@click.option('--grid-image-size', default=200, help='Size of scaled images in grid', show_default=True)
@click.option('--existing-annotation-color', default=(0, 0, 0), help='Color of existing annotations (three uint8 color components in [0,255] in RGB)', show_default=True)
@click.option('--added-annotation-color', default=(0, 255, 0), help='Color of added annotations by this tool (three uint8 color components in [0,255] in RGB)', show_default=True)
@click.argument('input_annotations_file', type=click.File('r'))
@click.argument('output_annotations_file', type=click.Path(dir_okay=False))
def server(*args, **kwargs):
app.config['arguments'] = kwargs
app.config['existing_annotations'] = json.load(
app.config['arguments']['input_annotations_file'])
try:
with open(app.config['arguments']['output_annotations_file']) as f:
app.config['added_annotations'] = json.load(f)
except FileNotFoundError:
# allow missing output file
app.config['added_annotations'] = copy.deepcopy(
app.config['existing_annotations'])
write_output_annotation_files()
app.run(debug=app.config['arguments']['debug'], host=app.config['arguments']
['host'], port=app.config['arguments']['port'])
| null |
1,837 |
import unittest
import asyncio
import pandas as pd
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.network_iterator import (
NetworkIterator,
NetworkStatus,
)
class MockNetworkIterator(NetworkIterator):
def __init__(self):
super().__init__()
self._start_network_event = asyncio.Event()
self._stop_network_event = asyncio.Event()
async def start_network(self):
self._start_network_event.set()
self._stop_network_event = asyncio.Event()
async def stop_network(self):
self._stop_network_event.set()
self._network_status = NetworkStatus.STOPPED
self._start_network_event = asyncio.Event()
async def check_network(self):
if self.network_status != NetworkStatus.CONNECTED:
self.last_connected_timestamp = self.current_timestamp
return NetworkStatus.CONNECTED
else:
return NetworkStatus.NOT_CONNECTED
class NetworkIteratorUnitTest(unittest.TestCase):
start: pd.Timestamp = pd.Timestamp("2021-01-01", tz="UTC")
end: pd.Timestamp = pd.Timestamp("2022-01-01 01:00:00", tz="UTC")
start_timestamp: float = start.timestamp()
end_timestamp: float = end.timestamp()
clock_tick_size = 10
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
return super().setUpClass()
def setUp(self):
self.network_iterator = MockNetworkIterator()
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.clock.add_iterator(self.network_iterator)
return super().setUp()
def test_network_status(self):
# This test technically tests the _check_network_loop() and all its paths.
self.assertEqual(NetworkStatus.STOPPED, self.network_iterator.network_status)
self.network_iterator.check_network_interval = 0.5
self.clock.backtest_til(self.start_timestamp)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(NetworkStatus.CONNECTED, self.network_iterator.network_status)
self.assertTrue(self.network_iterator._start_network_event.is_set())
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(NetworkStatus.NOT_CONNECTED, self.network_iterator.network_status)
self.assertTrue(self.network_iterator._stop_network_event.is_set())
def test_last_connected_timestamp(self):
self.clock.backtest_til(self.start_timestamp)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(self.start_timestamp, self.network_iterator.last_connected_timestamp)
def METHOD_NAME(self):
self.clock.backtest_til(self.start_timestamp)
self.assertIsNotNone(self.network_iterator.check_network_task)
def test_check_network_interval(self):
# Default interval
self.assertEqual(10.0, self.network_iterator.check_network_interval)
def test_network_error_wait_time(self):
# Default wait time
self.assertEqual(60.0, self.network_iterator.network_error_wait_time)
def test_check_network_timeout(self):
# Default timeout
self.assertEqual(5.0, self.network_iterator.check_network_timeout)
def test_start_network(self):
self.assertFalse(self.network_iterator._start_network_event.is_set())
self.assertFalse(self.network_iterator._stop_network_event.is_set())
self.ev_loop.run_until_complete(self.network_iterator.start_network())
self.assertTrue(self.network_iterator._start_network_event.is_set())
self.assertFalse(self.network_iterator._stop_network_event.is_set())
def test_stop_network(self):
self.assertFalse(self.network_iterator._start_network_event.is_set())
self.assertFalse(self.network_iterator._stop_network_event.is_set())
self.ev_loop.run_until_complete(self.network_iterator.stop_network())
self.assertFalse(self.network_iterator._start_network_event.is_set())
self.assertTrue(self.network_iterator._stop_network_event.is_set())
self.assertEqual(NetworkStatus.STOPPED, self.network_iterator.network_status)
def test_start(self):
self.assertEqual(NetworkStatus.STOPPED, self.network_iterator.network_status)
self.network_iterator.start(self.clock, self.clock.current_timestamp)
self.assertIsNotNone(self.network_iterator.check_network_task)
self.assertEqual(NetworkStatus.NOT_CONNECTED, self.network_iterator.network_status)
def test_stop(self):
self.assertEqual(NetworkStatus.STOPPED, self.network_iterator.network_status)
self.network_iterator.start(self.clock, self.clock.current_timestamp)
self.assertEqual(NetworkStatus.NOT_CONNECTED, self.network_iterator.network_status)
self.network_iterator.stop(self.clock)
self.assertEqual(NetworkStatus.STOPPED, self.network_iterator.network_status)
self.assertIsNone(self.network_iterator.check_network_task)
| null |
1,838 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
import json
class CreateRestoreJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'CreateRestoreJob')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TargetPrefix(self): # String
return self.get_query_params().get('TargetPrefix')
def set_TargetPrefix(self, TargetPrefix): # String
self.add_query_param('TargetPrefix', TargetPrefix)
def get_SnapshotId(self): # String
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self, SnapshotId): # String
self.add_query_param('SnapshotId', SnapshotId)
def get_TargetCreateTime(self): # Long
return self.get_query_params().get('TargetCreateTime')
def set_TargetCreateTime(self, TargetCreateTime): # Long
self.add_query_param('TargetCreateTime', TargetCreateTime)
def get_VaultId(self): # String
return self.get_query_params().get('VaultId')
def set_VaultId(self, VaultId): # String
self.add_query_param('VaultId', VaultId)
def get_CrossAccountType(self): # String
return self.get_query_params().get('CrossAccountType')
def set_CrossAccountType(self, CrossAccountType): # String
self.add_query_param('CrossAccountType', CrossAccountType)
def get_CrossAccountRoleName(self): # String
return self.get_query_params().get('CrossAccountRoleName')
def set_CrossAccountRoleName(self, CrossAccountRoleName): # String
self.add_query_param('CrossAccountRoleName', CrossAccountRoleName)
def get_SnapshotHash(self): # String
return self.get_query_params().get('SnapshotHash')
def set_SnapshotHash(self, SnapshotHash): # String
self.add_query_param('SnapshotHash', SnapshotHash)
def get_TargetTime(self): # Long
return self.get_query_params().get('TargetTime')
def set_TargetTime(self, TargetTime): # Long
self.add_query_param('TargetTime', TargetTime)
def get_TargetInstanceName(self): # String
return self.get_query_params().get('TargetInstanceName')
def set_TargetInstanceName(self, TargetInstanceName): # String
self.add_query_param('TargetInstanceName', TargetInstanceName)
def get_SourceType(self): # String
return self.get_query_params().get('SourceType')
def set_SourceType(self, SourceType): # String
self.add_query_param('SourceType', SourceType)
def get_Exclude(self): # String
return self.get_body_params().get('Exclude')
def set_Exclude(self, Exclude): # String
self.add_body_params('Exclude', Exclude)
def get_TargetContainer(self): # String
return self.get_query_params().get('TargetContainer')
def set_TargetContainer(self, TargetContainer): # String
self.add_query_param('TargetContainer', TargetContainer)
def get_TargetBucket(self): # String
return self.get_query_params().get('TargetBucket')
def set_TargetBucket(self, TargetBucket): # String
self.add_query_param('TargetBucket', TargetBucket)
def get_TargetContainerClusterId(self): # String
return self.get_query_params().get('TargetContainerClusterId')
def set_TargetContainerClusterId(self, TargetContainerClusterId): # String
self.add_query_param('TargetContainerClusterId', TargetContainerClusterId)
def get_Include(self): # String
return self.get_body_params().get('Include')
def set_Include(self, Include): # String
self.add_body_params('Include', Include)
def get_UdmDetail(self): # String
return self.get_query_params().get('UdmDetail')
def set_UdmDetail(self, UdmDetail): # String
self.add_query_param('UdmDetail', UdmDetail)
def get_TargetTableName(self): # String
return self.get_query_params().get('TargetTableName')
def METHOD_NAME(self, TargetTableName): # String
self.add_query_param('TargetTableName', TargetTableName)
def get_InitiatedByAck(self): # Boolean
return self.get_query_params().get('InitiatedByAck')
def set_InitiatedByAck(self, InitiatedByAck): # Boolean
self.add_query_param('InitiatedByAck', InitiatedByAck)
def get_RestoreType(self): # String
return self.get_query_params().get('RestoreType')
def set_RestoreType(self, RestoreType): # String
self.add_query_param('RestoreType', RestoreType)
def get_TargetInstanceId(self): # String
return self.get_body_params().get('TargetInstanceId')
def set_TargetInstanceId(self, TargetInstanceId): # String
self.add_body_params('TargetInstanceId', TargetInstanceId)
def get_OtsDetail(self): # Struct
return self.get_body_params().get('OtsDetail')
def set_OtsDetail(self, OtsDetail): # Struct
self.add_body_params("OtsDetail", json.dumps(OtsDetail))
def get_TargetFileSystemId(self): # String
return self.get_query_params().get('TargetFileSystemId')
def set_TargetFileSystemId(self, TargetFileSystemId): # String
self.add_query_param('TargetFileSystemId', TargetFileSystemId)
def get_TargetPath(self): # String
return self.get_body_params().get('TargetPath')
def set_TargetPath(self, TargetPath): # String
self.add_body_params('TargetPath', TargetPath)
def get_CrossAccountUserId(self): # Long
return self.get_query_params().get('CrossAccountUserId')
def set_CrossAccountUserId(self, CrossAccountUserId): # Long
self.add_query_param('CrossAccountUserId', CrossAccountUserId)
def get_UdmRegionId(self): # String
return self.get_query_params().get('UdmRegionId')
def set_UdmRegionId(self, UdmRegionId): # String
self.add_query_param('UdmRegionId', UdmRegionId)
| null |
1,839 |
"""common fixtures for use by all test classes"""
import os.path
import shutil
import unittest.mock
import py.path
import pytest
import annif
import annif.analyzer
import annif.corpus
import annif.project
import annif.registry
@pytest.fixture(scope="module")
def app():
# make sure the dummy vocab is in place because many tests depend on it
subjfile = os.path.join(os.path.dirname(__file__), "corpora", "dummy-subjects.csv")
app = annif.create_app(config_name="annif.default_config.TestingConfig")
with app.app_context():
project = annif.registry.get_project("dummy-en")
# the vocab is needed for both English and Finnish language projects
vocab = annif.corpus.SubjectFileCSV(subjfile)
project.vocab.load_vocabulary(vocab)
return app
@pytest.fixture(scope="module")
def app_with_initialize():
app = annif.create_app(config_name="annif.default_config.TestingInitializeConfig")
return app
@pytest.fixture
def app_client(app):
with app.test_client() as app_client:
yield app_client
@pytest.fixture(scope="module")
def registry(app):
with app.app_context():
return app.annif_registry
@pytest.fixture(scope="module")
def datadir(tmpdir_factory):
return tmpdir_factory.mktemp("data")
@pytest.fixture(scope="module")
def testdatadir(app):
"""a fixture to access the tests/data directory as a py.path.local
object"""
with app.app_context():
dir = py.path.local(app.config["DATADIR"])
# clean up previous state of datadir
shutil.rmtree(os.path.join(str(dir), "projects"), ignore_errors=True)
return dir
@pytest.fixture(scope="module")
def METHOD_NAME():
docfile = os.path.join(
os.path.dirname(__file__), "corpora", "archaeology", "subjects.tsv"
)
return annif.corpus.SubjectFileTSV(docfile, "fi")
@pytest.fixture(scope="module")
def dummy_subject_index(testdatadir):
"""a fixture to access the subject index of the dummy vocabulary"""
vocab = annif.vocab.AnnifVocabulary("dummy", testdatadir)
return vocab.subjects
@pytest.fixture(scope="module")
def vocabulary(datadir):
vocab = annif.vocab.AnnifVocabulary("my-vocab", datadir)
subjfile = os.path.join(
os.path.dirname(__file__), "corpora", "archaeology", "yso-archaeology.ttl"
)
subjects = annif.corpus.SubjectFileSKOS(subjfile)
vocab.load_vocabulary(subjects)
return vocab
@pytest.fixture(scope="module")
def subject_index(vocabulary):
return vocabulary.subjects
@pytest.fixture(scope="module")
def document_corpus(subject_index):
docfile = os.path.join(
os.path.dirname(__file__), "corpora", "archaeology", "documents.tsv"
)
doc_corpus = annif.corpus.DocumentFile(docfile, subject_index)
return doc_corpus
@pytest.fixture(scope="module")
def fulltext_corpus(subject_index):
ftdir = os.path.join(
os.path.dirname(__file__), "corpora", "archaeology", "fulltext"
)
ft_corpus = annif.corpus.DocumentDirectory(
ftdir, subject_index, "fi", require_subjects=True
)
return ft_corpus
@pytest.fixture(scope="module")
def pretrained_vectors():
return py.path.local(
os.path.join(
os.path.dirname(__file__), "corpora", "archaeology", "fasttext.vec"
)
)
@pytest.fixture(scope="module")
def project(subject_index, datadir, registry, vocabulary):
proj = unittest.mock.Mock()
proj.analyzer = annif.analyzer.get_analyzer("snowball(finnish)")
proj.language = "fi"
proj.vocab = vocabulary
proj.subjects = subject_index
proj.datadir = str(datadir)
proj.registry = registry
return proj
@pytest.fixture(scope="module")
def app_project(app):
with app.app_context():
dir = py.path.local(app.config["DATADIR"])
shutil.rmtree(os.path.join(str(dir), "projects"), ignore_errors=True)
return annif.registry.get_project("dummy-en")
@pytest.fixture(scope="function")
def empty_corpus(tmpdir, subject_index):
empty_file = tmpdir.ensure("empty.tsv")
return annif.corpus.DocumentFile(str(empty_file), subject_index)
| null |
1,840 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeCheckWarningSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeCheckWarningSummary')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TargetType(self): # String
return self.get_query_params().get('TargetType')
def set_TargetType(self, TargetType): # String
self.add_query_param('TargetType', TargetType)
def METHOD_NAME(self): # String
return self.get_query_params().get('ContainerFieldName')
def set_ContainerFieldName(self, ContainerFieldName): # String
self.add_query_param('ContainerFieldName', ContainerFieldName)
def get_RiskName(self): # String
return self.get_query_params().get('RiskName')
def set_RiskName(self, RiskName): # String
self.add_query_param('RiskName', RiskName)
def get_SourceIp(self): # String
return self.get_query_params().get('SourceIp')
def set_SourceIp(self, SourceIp): # String
self.add_query_param('SourceIp', SourceIp)
def get_ContainerFieldValue(self): # String
return self.get_query_params().get('ContainerFieldValue')
def set_ContainerFieldValue(self, ContainerFieldValue): # String
self.add_query_param('ContainerFieldValue', ContainerFieldValue)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_GroupId(self): # Long
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # Long
self.add_query_param('GroupId', GroupId)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RiskStatus(self): # Integer
return self.get_query_params().get('RiskStatus')
def set_RiskStatus(self, RiskStatus): # Integer
self.add_query_param('RiskStatus', RiskStatus)
def get_StrategyId(self): # Long
return self.get_query_params().get('StrategyId')
def set_StrategyId(self, StrategyId): # Long
self.add_query_param('StrategyId', StrategyId)
def get_TypeName(self): # String
return self.get_query_params().get('TypeName')
def set_TypeName(self, TypeName): # String
self.add_query_param('TypeName', TypeName)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
def get_Uuids(self): # String
return self.get_query_params().get('Uuids')
def set_Uuids(self, Uuids): # String
self.add_query_param('Uuids', Uuids)
| null |
1,841 |
""" Statistics calculation utility """
import time
import math
import sys
from onmt.utils.logging import logger
class Statistics(object):
"""
Accumulator for loss statistics.
Currently calculates:
* accuracy
* perplexity
* elapsed time
"""
def __init__(
self, loss=0, n_batchs=0, n_sents=0, n_words=0, n_correct=0, computed_metrics={}
):
self.loss = loss
self.n_batchs = n_batchs
self.n_sents = n_sents
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.computed_metrics = computed_metrics
self.start_time = time.time()
@staticmethod
def all_gather_stats(stat, max_size=4096):
"""
Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object
"""
stats = Statistics.METHOD_NAME([stat], max_size=max_size)
return stats[0]
@staticmethod
def METHOD_NAME(stat_list, max_size=4096):
"""
Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats
"""
from torch.distributed import get_rank
from onmt.utils.distributed import all_gather_list
# Get a list of world_size lists with len(stat_list) Statistics objects
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for other_rank, stats in enumerate(all_stats):
if other_rank == our_rank:
continue
for i, stat in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats
def update(self, stat, update_n_src_words=False):
"""
Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not
"""
self.loss += stat.loss
self.n_batchs += stat.n_batchs
self.n_sents += stat.n_sents
self.n_words += stat.n_words
self.n_correct += stat.n_correct
self.computed_metrics = stat.computed_metrics
if update_n_src_words:
self.n_src_words += stat.n_src_words
def accuracy(self):
"""compute accuracy"""
return 100 * (self.n_correct / self.n_words)
def xent(self):
"""compute cross entropy"""
return self.loss / self.n_words
def ppl(self):
"""compute perplexity"""
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
"""compute elapsed time"""
return time.time() - self.start_time
def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = self.elapsed_time()
step_fmt = "%2d" % step
if num_steps > 0:
step_fmt = "%s/%5d" % (step_fmt, num_steps)
logger.info(
(
"Step %s; acc: %2.1f; ppl: %5.1f; xent: %2.1f; "
+ "lr: %7.5f; sents: %7.0f; bsz: %4.0f/%4.0f/%2.0f; "
+ "%3.0f/%3.0f tok/s; %6.0f sec;"
)
% (
step_fmt,
self.accuracy(),
self.ppl(),
self.xent(),
learning_rate,
self.n_sents,
self.n_src_words / self.n_batchs,
self.n_words / self.n_batchs,
self.n_sents / self.n_batchs,
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start,
)
+ "".join(
[
" {}: {}".format(k, round(v, 2))
for k, v in self.computed_metrics.items()
]
)
)
sys.stdout.flush()
def log_tensorboard(self, prefix, writer, learning_rate, patience, step):
"""display statistics to tensorboard"""
t = self.elapsed_time()
writer.add_scalar(prefix + "/xent", self.xent(), step)
writer.add_scalar(prefix + "/ppl", self.ppl(), step)
for k, v in self.computed_metrics.items():
writer.add_scalar(prefix + "/" + k, round(v, 4), step)
writer.add_scalar(prefix + "/accuracy", self.accuracy(), step)
writer.add_scalar(prefix + "/tgtper", self.n_words / t, step)
writer.add_scalar(prefix + "/lr", learning_rate, step)
if patience is not None:
writer.add_scalar(prefix + "/patience", patience, step)
| null |
1,842 |
import asyncio
import logging
from typing import Any, Dict, Optional
import aiohttp
from hummingbot.core.network_base import NetworkBase
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_retry import async_retry
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.METHOD_NAME import HummingbotLogger
class LogServerClient(NetworkBase):
lsc_logger: Optional[HummingbotLogger] = None
_lsc_shared_instance: "LogServerClient" = None
@classmethod
def get_instance(cls, log_server_url: str = "https://api.coinalpha.com/reporting-proxy-v2/") -> "LogServerClient":
if cls._lsc_shared_instance is None:
cls._lsc_shared_instance = LogServerClient(log_server_url=log_server_url)
return cls._lsc_shared_instance
@classmethod
def METHOD_NAME(cls) -> HummingbotLogger:
if cls.lsc_logger is None:
cls.lsc_logger = logging.getLogger(__name__)
return cls.lsc_logger
def __init__(self, log_server_url: str = "https://api.coinalpha.com/reporting-proxy-v2/"):
super().__init__()
self.queue: asyncio.Queue = asyncio.Queue()
self.consume_queue_task: Optional[asyncio.Task] = None
self.log_server_url: str = log_server_url
def request(self, req):
if not self.started:
self.start()
self.queue.put_nowait(req)
@async_retry(retry_count=3, exception_types=[asyncio.TimeoutError, EnvironmentError], raise_exp=True)
async def send_log(self, session: aiohttp.ClientSession, request_dict: Dict[str, Any]):
async with session.request(request_dict["method"], request_dict["url"], **request_dict["request_obj"]) as resp:
resp_text = await resp.text()
self.METHOD_NAME().debug(f"Sent logs: {resp.status} {resp.url} {resp_text} ",
extra={"do_not_send": True})
if resp.status != 200 and resp.status not in {404, 405, 400}:
raise EnvironmentError("Failed sending logs to log server.")
async def consume_queue(self, session):
while True:
try:
req = await self.queue.get()
self.METHOD_NAME().debug(f"Remote logging payload: {req}")
await self.send_log(session, req)
except asyncio.CancelledError:
raise
except aiohttp.ClientError:
self.METHOD_NAME().network("Network error sending logs.", exc_info=True, extra={"do_not_send": True})
return
except Exception:
self.METHOD_NAME().network("Unexpected error sending logs.", exc_info=True, extra={"do_not_send": True})
return
async def request_loop(self):
while True:
loop = asyncio.get_event_loop()
try:
async with aiohttp.ClientSession(loop=loop,
connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
await self.consume_queue(session)
except asyncio.CancelledError:
raise
except Exception:
self.METHOD_NAME().network("Unexpected error running logging task.",
exc_info=True, extra={"do_not_send": True})
await asyncio.sleep(5.0)
async def start_network(self):
self.consume_queue_task = safe_ensure_future(self.request_loop())
async def stop_network(self):
if self.consume_queue_task is not None:
self.consume_queue_task.cancel()
self.consume_queue_task = None
async def check_network(self) -> NetworkStatus:
try:
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession(loop=loop,
connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(self.log_server_url) as resp:
if resp.status != 200:
raise Exception("Log proxy server is down.")
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
| null |
1,843 |
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import tarfile
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def METHOD_NAME(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup paths
for del_path in [backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def test_ib_stream(self):
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
tar_file_path = os.path.join(backup_path,'out.tar')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# Add desired option to config file
config_file = open(master_server.cnf_file,'a')
config_file.write("innodb_flush_method=O_DIRECT\n")
config_file.close()
# take a backup
try:
os.mkdir(backup_path)
except OSError:
pass
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--stream=tar"
, "--user=root"
, "--port=%d" %master_server.master_port
, "--host=127.0.0.1"
, "--no-timestamp"
, "--ibbackup=%s" %xtrabackup
, "%s > %s" %(backup_path,tar_file_path)
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
expected_output = "xtrabackup: using O_DIRECT"
self.assertTrue(expected_output in output, msg=output)
# stop the server
master_server.stop()
# extract our backup tarball
cmd = "tar -ivxf %s" %tar_file_path
retcode, output = self.execute_cmd(cmd, output_path, backup_path, True)
self.assertEqual(retcode,0,output)
# Check for Bug 723318 - seems quicker than separate test case
self.assertTrue('xtrabackup_binary' in os.listdir(backup_path)
, msg = "Bug723318: xtrabackup_binary not included in tar archive when streaming")
# do prepare on backup
cmd = [ innobackupex
, "--apply-log"
, "--no-timestamp"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0,output)
# remove old datadir
shutil.rmtree(master_server.datadir)
os.mkdir(master_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %master_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %(xtrabackup)
, backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, output)
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertEqual(master_server.status,1, 'Server failed restart from restored datadir...')
# Check the server is ok
query = "SELECT COUNT(*) FROM test.DD"
expected_output = ((100L,),)
retcode, output = self.execute_query(query, master_server)
self.assertEqual(output, expected_output, msg = "%s || %s" %(output, expected_output))
| null |
1,844 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filesystem registry managing filesystem plugins."""
import re
from threading import Lock
from typing import TYPE_CHECKING, Dict, Type
from zenml.logger import get_logger
if TYPE_CHECKING:
from zenml.io.filesystem import BaseFilesystem, PathType
logger = get_logger(__name__)
class FileIORegistry:
"""Registry of pluggable filesystem implementations."""
def __init__(self) -> None:
"""Initialize the registry."""
self._filesystems: Dict["PathType", Type["BaseFilesystem"]] = {}
self._registration_lock = Lock()
def register(self, filesystem_cls: Type["BaseFilesystem"]) -> None:
"""Register a filesystem implementation.
Args:
filesystem_cls: Subclass of `zenml.io.filesystem.Filesystem`.
"""
with self._registration_lock:
for scheme in filesystem_cls.SUPPORTED_SCHEMES:
current_preferred = self._filesystems.get(scheme)
if current_preferred is not None:
logger.debug(
"Overwriting previously registered filesystem for "
"scheme `%s`. Old class: %s, new class: %s",
scheme,
current_preferred.__name__,
filesystem_cls.__name__,
)
self._filesystems[scheme] = filesystem_cls
def METHOD_NAME(
self, scheme: "PathType"
) -> Type["BaseFilesystem"]:
"""Get filesystem plugin for given scheme string.
Args:
scheme: The scheme to get the filesystem for.
Returns:
The filesystem plugin for the given scheme.
Raises:
ValueError: If no filesystem plugin is registered for the given
scheme.
"""
if isinstance(scheme, bytes):
scheme = scheme.decode("utf-8")
if scheme not in self._filesystems:
raise ValueError(
f"No file systems were found for the scheme: "
f"{scheme}. Please make sure that you are using "
f"the right path and the all the necessary "
f"integrations are properly installed."
)
return self._filesystems[scheme]
def get_filesystem_for_path(
self, path: "PathType"
) -> Type["BaseFilesystem"]:
"""Get filesystem plugin for given path.
Args:
path: The path to get the filesystem for.
Returns:
The filesystem plugin for the given path.
Raises:
ValueError: If no filesystem plugin is registered for the given
path.
"""
# Assume local path by default, but extract filesystem prefix if available.
if isinstance(path, str):
path_bytes = path.encode("utf-8")
elif isinstance(path, bytes):
path_bytes = path
else:
raise ValueError("Invalid path type: %r." % path)
result = re.match(b"^([a-z0-9]+://)", path_bytes)
if result:
scheme = result.group(1).decode("utf-8")
else:
scheme = ""
return self.METHOD_NAME(scheme)
# Default global instance of the filesystem registry.
default_filesystem_registry = FileIORegistry()
| null |
1,845 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateDataServiceApiRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateDataServiceApi')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ScriptDetails(self): # String
return self.get_body_params().get('ScriptDetails')
def set_ScriptDetails(self, ScriptDetails): # String
self.add_body_params('ScriptDetails', ScriptDetails)
def get_RequestMethod(self): # Integer
return self.get_body_params().get('RequestMethod')
def set_RequestMethod(self, RequestMethod): # Integer
self.add_body_params('RequestMethod', RequestMethod)
def get_ApiDescription(self): # String
return self.get_body_params().get('ApiDescription')
def set_ApiDescription(self, ApiDescription): # String
self.add_body_params('ApiDescription', ApiDescription)
def get_Timeout(self): # Integer
return self.get_body_params().get('Timeout')
def set_Timeout(self, Timeout): # Integer
self.add_body_params('Timeout', Timeout)
def METHOD_NAME(self): # Long
return self.get_body_params().get('FolderId')
def set_FolderId(self, FolderId): # Long
self.add_body_params('FolderId', FolderId)
def get_ResourceGroupId(self): # Long
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # Long
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_SqlMode(self): # Long
return self.get_body_params().get('SqlMode')
def set_SqlMode(self, SqlMode): # Long
self.add_body_params('SqlMode', SqlMode)
def get_TenantId(self): # Long
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # Long
self.add_body_params('TenantId', TenantId)
def get_RequestContentType(self): # Integer
return self.get_body_params().get('RequestContentType')
def set_RequestContentType(self, RequestContentType): # Integer
self.add_body_params('RequestContentType', RequestContentType)
def get_Protocols(self): # String
return self.get_body_params().get('Protocols')
def set_Protocols(self, Protocols): # String
self.add_body_params('Protocols', Protocols)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_ResponseContentType(self): # Integer
return self.get_body_params().get('ResponseContentType')
def set_ResponseContentType(self, ResponseContentType): # Integer
self.add_body_params('ResponseContentType', ResponseContentType)
def get_GroupId(self): # String
return self.get_body_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_body_params('GroupId', GroupId)
def get_ApiPath(self): # String
return self.get_body_params().get('ApiPath')
def set_ApiPath(self, ApiPath): # String
self.add_body_params('ApiPath', ApiPath)
def get_WizardDetails(self): # String
return self.get_body_params().get('WizardDetails')
def set_WizardDetails(self, WizardDetails): # String
self.add_body_params('WizardDetails', WizardDetails)
def get_ApiMode(self): # Integer
return self.get_body_params().get('ApiMode')
def set_ApiMode(self, ApiMode): # Integer
self.add_body_params('ApiMode', ApiMode)
def get_VisibleRange(self): # Integer
return self.get_body_params().get('VisibleRange')
def set_VisibleRange(self, VisibleRange): # Integer
self.add_body_params('VisibleRange', VisibleRange)
def get_RegistrationDetails(self): # String
return self.get_body_params().get('RegistrationDetails')
def set_RegistrationDetails(self, RegistrationDetails): # String
self.add_body_params('RegistrationDetails', RegistrationDetails)
def get_ApiName(self): # String
return self.get_body_params().get('ApiName')
def set_ApiName(self, ApiName): # String
self.add_body_params('ApiName', ApiName)
| null |
1,846 |
import click
import logging
from tre.api_client import ApiClient
from tre.commands.workspaces.airlock.contexts import WorkspaceAirlockContext, pass_workspace_airlock_context
from tre.output import output, output_option, query_option
_default_table_query_item = r"airlockRequest.{id:id,workspace_id:workspaceId,type:type, title:title,status:status,business_justification:businessJustification}"
def airlock_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str):
log = logging.getLogger(__name__)
parent_ctx = ctx.parent
workspace_id = parent_ctx.params["workspace_id"]
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(log, 'GET', f'/api/workspaces/{workspace_id}/requests', scope_id=workspace_scope)
if response.is_success:
ids = [request["airlockRequest"]["id"] for request in response.json()["airlockRequests"]]
return [id for id in ids if id.startswith(incomplete)]
@click.group(name="airlock-request", invoke_without_command=True, help="Perform actions on an airlock request")
@click.argument('airlock_id', required=True, type=click.UUID, shell_complete=airlock_id_completion)
@click.pass_context
def airlock(ctx: click.Context, airlock_id: str) -> None:
ctx.obj = WorkspaceAirlockContext.add_airlock_id_to_context_obj(ctx, airlock_id)
@click.command(name="show", help="Show airlock request")
@output_option()
@query_option()
@pass_workspace_airlock_context
def airlock_show(airlock_context: WorkspaceAirlockContext, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_id = airlock_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
airlock_id = airlock_context.airlock_id
if airlock_id is None:
raise click.UsageError('Missing airlock request ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'GET',
f'/api/workspaces/{workspace_id}/requests/{airlock_id}',
scope_id=workspace_scope,
)
output(response, output_format=output_format, query=query, default_table_query=_default_table_query_item)
@click.command(name="get-url", help="Get URL to access airlock request")
@output_option()
@query_option()
@pass_workspace_airlock_context
def airlock_get_url(airlock_context: WorkspaceAirlockContext, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_id = airlock_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
airlock_id = airlock_context.airlock_id
if airlock_id is None:
raise click.UsageError('Missing service ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'GET',
f'/api/workspaces/{workspace_id}/requests/{airlock_id}/link',
scope_id=workspace_scope,
)
output(response, output_format=output_format, query=query, default_table_query=r"{container_url:containerUrl}")
@click.command(name="submit", help="Submit an airlock request (after uploading content)")
@output_option()
@query_option()
@pass_workspace_airlock_context
def METHOD_NAME(airlock_context: WorkspaceAirlockContext, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_id = airlock_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
airlock_id = airlock_context.airlock_id
if airlock_id is None:
raise click.UsageError('Missing airlock request ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'POST',
f'/api/workspaces/{workspace_id}/requests/{airlock_id}/submit',
scope_id=workspace_scope,
)
output(
response,
output_format=output_format,
query=query,
default_table_query=_default_table_query_item)
@click.command(name="review", help="Provide a review response for an airlock request")
@click.option('--approve/--reject', 'approve', required=True, help="Approved/rejected")
@click.option('--reason', required=True, help="Reason for approval/rejection")
@output_option()
@query_option()
@pass_workspace_airlock_context
def airlock_review(airlock_context: WorkspaceAirlockContext, approve, reason, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_id = airlock_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
airlock_id = airlock_context.airlock_id
if airlock_id is None:
raise click.UsageError('Missing airlock request ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'POST',
f'/api/workspaces/{workspace_id}/requests/{airlock_id}/review',
json_data={
"approval": approve,
"decisionExplanation": reason,
},
scope_id=workspace_scope,
)
output(
response,
output_format=output_format,
query=query,
default_table_query=_default_table_query_item)
@click.command(name="cancel", help="Cancel an airlock request")
@output_option()
@query_option()
@pass_workspace_airlock_context
def airlock_cancel(airlock_context: WorkspaceAirlockContext, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_id = airlock_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
airlock_id = airlock_context.airlock_id
if airlock_id is None:
raise click.UsageError('Missing airlock request ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'POST',
f'/api/workspaces/{workspace_id}/requests/{airlock_id}/cancel',
scope_id=workspace_scope,
)
output(
response,
output_format=output_format,
query=query,
default_table_query=_default_table_query_item)
airlock.add_command(airlock_show)
airlock.add_command(airlock_get_url)
airlock.add_command(METHOD_NAME)
airlock.add_command(airlock_review)
airlock.add_command(airlock_cancel)
| null |
1,847 |
#!/usr/bin/env python
# general imports
from numpy import *
from random import uniform
# imp general
import IMP
# our project
from IMP.isd import Scale, MolecularDynamicsMover
# unit testing framework
import IMP.test
vel_key_xyz = IMP.FloatsKey("linvel")
vel_key_nuisance = IMP.FloatKey("vel")
class TestMolecularDynamicsMover(IMP.test.TestCase):
def setUp(self):
IMP.test.TestCase.setUp(self)
IMP.set_log_level(0)
self.m = IMP.Model()
self.xyzs = []
self.nuisances = []
self.restraints = []
self.METHOD_NAME()
self.setup_mover()
def setup_xyz(self, coords, mass):
a = IMP.Particle(self.m)
IMP.core.XYZ.setup_particle(a, coords)
IMP.core.XYZ(a).set_coordinates_are_optimized(True)
IMP.atom.Mass.setup_particle(a, mass)
return a
def setup_scale(self, coords, mass):
a = IMP.Particle(self.m)
IMP.isd.Scale.setup_particle(a, coords)
IMP.isd.Scale(a).set_scale_is_optimized(True)
IMP.atom.Mass.setup_particle(a, mass)
return a
def METHOD_NAME(self):
"""setup two xyzs and two nuisances linked by a Lognormal restraint"""
a = self.setup_xyz(IMP.algebra.Vector3D((0, 0, 0)), 1.0)
b = self.setup_xyz(IMP.algebra.Vector3D((1, 1, 1)), 1.0)
si = self.setup_scale(1.0, 1.0)
ga = self.setup_scale(1.0, 1.0)
ln = IMP.isd.NOERestraint(self.m, a, b, si, ga, 1.0)
self.xyzs.append(a)
self.xyzs.append(b)
self.nuisances.append(si)
self.nuisances.append(ga)
self.restraints.append(ln)
def setup_mover(self, nsteps=10, tstep=1.0):
self.mv = IMP.isd.MolecularDynamicsMover(self.m, nsteps, tstep)
self.mv.set_was_used(True)
self.mv.get_md().assign_velocities(300.)
self.mv.get_md().set_scoring_function(self.restraints)
def get_nuisance_coordinates(self):
a = [i.get_value(IMP.isd.Scale.get_scale_key())
for i in self.nuisances]
b = [i.get_value(vel_key_nuisance) for i in self.nuisances]
return a + b
def get_xyz_coordinates(self):
a = [[i.get_value(fl) for fl in IMP.core.XYZ.get_xyz_keys()]
for i in self.xyzs]
b = [i.get_value(vel_key_xyz) for i in self.xyzs]
return a + b
def test_move(self):
"""test that the mover moves the particles"""
self.mv.get_md().optimize(0)
oldn = self.get_nuisance_coordinates()
oldx = self.get_xyz_coordinates()
self.mv.propose()
newn = self.get_nuisance_coordinates()
newx = self.get_xyz_coordinates()
for i, j in zip(newx, oldx):
self.assertNotAlmostEqual(i[0], j[0], delta=1e-7)
self.assertNotAlmostEqual(i[1], j[1], delta=1e-7)
self.assertNotAlmostEqual(i[2], j[2], delta=1e-7)
for i, j in zip(newn, oldn):
self.assertNotAlmostEqual(i, j, delta=1e-7)
def test_reject(self):
"""reject should revert to the first set of coordinates"""
self.mv.get_md().optimize(0)
oldn = self.get_nuisance_coordinates()
oldx = self.get_xyz_coordinates()
self.mv.propose()
self.mv.reject()
newn = self.get_nuisance_coordinates()
newx = self.get_xyz_coordinates()
for i, j in zip(newx, oldx):
self.assertAlmostEqual(i[0], j[0], delta=1e-7)
self.assertAlmostEqual(i[1], j[1], delta=1e-7)
self.assertAlmostEqual(i[2], j[2], delta=1e-7)
for i, j in zip(newn, oldn):
self.assertAlmostEqual(i, j, delta=1e-7)
def test_consistence(self):
"""rejectting the move without redrawing velocities should lead to the
same point
"""
self.mv.get_md().optimize(0)
self.mv.propose()
oldn = self.get_nuisance_coordinates()
oldx = self.get_xyz_coordinates()
self.mv.reject()
self.mv.propose()
newn = self.get_nuisance_coordinates()
newx = self.get_xyz_coordinates()
for i, j in zip(newx, oldx):
self.assertAlmostEqual(i[0], j[0], delta=1e-7)
self.assertAlmostEqual(i[1], j[1], delta=1e-7)
self.assertAlmostEqual(i[2], j[2], delta=1e-7)
for i, j in zip(newn, oldn):
self.assertAlmostEqual(i, j, delta=1e-7)
def test_consistence_2(self):
"""rejectting the move by redrawing velocities should lead to a different
point
"""
self.mv.get_md().optimize(0)
self.mv.propose()
oldn = self.get_nuisance_coordinates()
oldx = self.get_xyz_coordinates()
self.mv.reject()
self.mv.get_md().assign_velocities(300.)
self.mv.propose()
newn = self.get_nuisance_coordinates()
newx = self.get_xyz_coordinates()
for i, j in zip(newx, oldx):
self.assertNotAlmostEqual(i[0], j[0], delta=1e-7)
self.assertNotAlmostEqual(i[1], j[1], delta=1e-7)
self.assertNotAlmostEqual(i[2], j[2], delta=1e-7)
for i, j in zip(newn, oldn):
self.assertNotAlmostEqual(i, j, delta=1e-7)
def test_get_set(self):
"""test get/set the number of MD steps.
"""
self.assertEqual(self.mv.get_number_of_md_steps(), 10)
self.mv.set_number_of_md_steps(100)
self.assertEqual(self.mv.get_number_of_md_steps(), 100)
def test_n_md(self):
"""changing the length of the simulation should lead to a different
point
"""
self.mv.get_md().optimize(0)
self.mv.set_number_of_md_steps(100)
self.mv.propose()
oldn = self.get_nuisance_coordinates()
oldx = self.get_xyz_coordinates()
self.mv.reject()
self.mv.set_number_of_md_steps(10)
self.mv.propose()
newn = self.get_nuisance_coordinates()
newx = self.get_xyz_coordinates()
for i, j in zip(newx, oldx):
self.assertNotAlmostEqual(i[0], j[0], delta=1e-7)
self.assertNotAlmostEqual(i[1], j[1], delta=1e-7)
self.assertNotAlmostEqual(i[2], j[2], delta=1e-7)
for i, j in zip(newn, oldn):
self.assertNotAlmostEqual(i, j, delta=1e-7)
if __name__ == '__main__':
IMP.test.main()
| null |
1,848 |
from typing import Any, AnyStr, Callable, ContextManager, Generic, IO, Iterable, Iterator, List, Optional, Text, Type, Union
from typing_extensions import Final, Literal
import os
import sys
class _FNMatcher(Generic[AnyStr]):
pattern: AnyStr = ...
def __init__(self, pattern: AnyStr) -> None: ...
def __call__(self, path: local) -> bool: ...
class _Stat:
path: Final[local] = ...
mode: Final[int]
ino: Final[int]
dev: Final[int]
nlink: Final[int]
uid: Final[int]
gid: Final[int]
size: Final[int]
atime: Final[float]
mtime: Final[float]
ctime: Final[float]
atime_ns: Final[int]
mtime_ns: Final[int]
ctime_ns: Final[int]
if sys.version_info >= (3, 8) and sys.platform == "win32":
reparse_tag: Final[int]
blocks: Final[int]
blksize: Final[int]
rdev: Final[int]
flags: Final[int]
gen: Final[int]
birthtime: Final[int]
rsize: Final[int]
creator: Final[int]
type: Final[int]
if sys.platform != 'win32':
@property
def owner(self) -> str: ...
@property
def group(self) -> str: ...
def isdir(self) -> bool: ...
def isfile(self) -> bool: ...
def islink(self) -> bool: ...
if sys.version_info >= (3, 6):
_PathLike = os.PathLike
else:
class _PathLike(Generic[AnyStr]):
def __fspath__(self) -> AnyStr: ...
_PathType = Union[bytes, Text, _PathLike[str], _PathLike[bytes], local]
class local(_PathLike[str]):
class ImportMismatchError(ImportError): ...
sep: Final[str]
strpath: Final[str]
def __init__(self, path: _PathType = ..., expanduser: bool = ...) -> None: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __lt__(self, other: object) -> bool: ...
def __gt__(self, other: object) -> bool: ...
def __add__(self, other: object) -> local: ...
def __cmp__(self, other: object) -> int: ...
def __div__(self, other: _PathType) -> local: ...
def __truediv__(self, other: _PathType) -> local: ...
def __fspath__(self) -> str: ...
@classmethod
def get_temproot(cls) -> local: ...
@classmethod
def make_numbered_dir(
cls,
prefix: str = ...,
rootdir: Optional[local] = ...,
keep: Optional[int] = ...,
lock_timeout: int = ...,
) -> local: ...
@classmethod
def mkdtemp(cls, rootdir: Optional[local] = ...) -> local: ...
@classmethod
def sysfind(
cls,
name: _PathType,
checker: Optional[Callable[[local], bool]] = ...,
paths: Optional[Iterable[_PathType]] = ...,
) -> Optional[local]: ...
@property
def basename(self) -> str: ...
@property
def dirname(self) -> str: ...
@property
def purebasename(self) -> str: ...
@property
def ext(self) -> str: ...
def as_cwd(self) -> ContextManager[Optional[local]]: ...
def atime(self) -> float: ...
def bestrelpath(self, dest: local) -> str: ...
def chdir(self) -> local: ...
def check(
self,
*,
basename: int = ..., notbasename: int = ...,
basestarts: int = ..., notbasestarts: int = ...,
dir: int = ..., notdir: int = ...,
dotfile: int = ..., notdotfile: int = ...,
endswith: int = ..., notendswith: int = ...,
exists: int = ..., notexists: int = ...,
ext: int = ..., notext: int = ...,
file: int = ..., notfile: int = ...,
fnmatch: int = ..., notfnmatch: int = ...,
link: int = ..., notlink: int = ...,
relto: int = ..., notrelto: int = ...,
) -> bool: ...
def chmod(self, mode: int, rec: Union[int, str, Text, Callable[[local], bool]] = ...) -> None: ...
if sys.platform != 'win32':
def chown(self, user: Union[int, str], group: Union[int, str], rec: int = ...) -> None: ...
def METHOD_NAME(self, other: local) -> Optional[local]: ...
def computehash(self, hashtype: str = ..., chunksize: int = ...) -> str: ...
def copy(self, target: local, mode: bool = ..., stat: bool = ...) -> None: ...
def dirpath(self, *args: _PathType, abs: int = ...) -> local: ...
def dump(self, obj: Any, bin: Optional[int] = ...) -> None: ...
def ensure(self, *args: _PathType, dir: int = ...) -> local: ...
def ensure_dir(self, *args: _PathType) -> local: ...
def exists(self) -> bool: ...
def fnmatch(self, pattern: str): _FNMatcher
def isdir(self) -> bool: ...
def isfile(self) -> bool: ...
def islink(self) -> bool: ...
def join(self, *args: _PathType, abs: int = ...) -> local: ...
def listdir(
self,
fil: Optional[Union[str, Text, Callable[[local], bool]]] = ...,
sort: Optional[bool] = ...,
) -> List[local]: ...
def load(self) -> Any: ...
def lstat(self) -> _Stat: ...
def mkdir(self, *args: _PathType) -> local: ...
if sys.platform != 'win32':
def mklinkto(self, oldname: Union[str, local]) -> None: ...
def mksymlinkto(self, value: local, absolute: int = ...) -> None: ...
def move(self, target: local) -> None: ...
def mtime(self) -> float: ...
def new(
self,
*,
drive: str = ...,
dirname: str = ...,
basename: str = ...,
purebasename: str = ...,
ext: str = ...,
) -> local: ...
def open(self, mode: str = ..., ensure: bool = ..., encoding: Optional[str] = ...) -> IO[Any]: ...
def parts(self, reverse: bool = ...) -> List[local]: ...
def pyimport(
self,
modname: Optional[str] = ...,
ensuresyspath: Union[bool, Literal["append", "importlib"]] = ...,
) -> Any: ...
def pypkgpath(self) -> Optional[local]: ...
def read(self, mode: str = ...) -> Union[Text, bytes]: ...
def read_binary(self) -> bytes: ...
def read_text(self, encoding: str) -> Text: ...
def readlines(self, cr: int = ...) -> List[str]: ...
if sys.platform != 'win32':
def readlink(self) -> str: ...
def realpath(self) -> local: ...
def relto(self, relpath: Union[str, local]) -> str: ...
def remove(self, rec: int = ..., ignore_errors: bool = ...) -> None: ...
def rename(self, target: _PathType) -> None: ...
def samefile(self, other: _PathType) -> bool: ...
def setmtime(self, mtime: Optional[float] = ...) -> None: ...
def size(self) -> int: ...
def stat(self, raising: bool = ...) -> _Stat: ...
def sysexec(self, *argv: Any, **popen_opts: Any) -> Text: ...
def visit(
self,
fil: Optional[Union[str, Text, Callable[[local], bool]]] = ...,
rec: Optional[Union[Literal[1, True], str, Text, Callable[[local], bool]]] = ...,
ignore: Type[Exception] = ...,
bf: bool = ...,
sort: bool = ...,
) -> Iterator[local]: ...
def write(self, data: Any, mode: str = ..., ensure: bool = ...) -> None: ...
def write_binary(self, data: bytes, ensure: bool = ...) -> None: ...
def write_text(self, data: Union[str, Text], encoding: str, ensure: bool = ...) -> None: ...
# Untyped types below here.
svnwc: Any
svnurl: Any
SvnAuth: Any
| null |
1,849 |
import glob
import sys
from threading import Thread
from time import sleep
from acq4.devices.LightSource import LightSource
from acq4.drivers.SerialDevice import SerialDevice
from acq4.util.HelpfulException import HelpfulException
from acq4.util.Mutex import Mutex
class CoolLEDLightSource(LightSource):
"""
The Cool LED family of light sources should conform to the protocol used here.
Config options
--------------
port | string
The name of the serial port to connect to ( e.g. COM1, /dev/ttyS2 ). If the port is
set to "probe", this will scan through all the available ports in search of a device
that responds like a Cool LED device. This may produce unexpected behavior in other
devices.
"""
def __init__(self, dm, config, name):
super(CoolLEDLightSource, self).__init__(dm, config, name)
self._port = config["port"]
if self._port == "probe":
self._port = self._detectCoolLEDPort()
self._devConn = SerialDevice(port=self._port, baudrate=57600, timeout=0)
self.addSource("A", {"adjustableBrightness": True})
self.addSource("B", {"adjustableBrightness": True})
self.addSource("C", {"adjustableBrightness": True})
self._writeBuffer = ""
self._writeLock = Mutex()
self._ioThread = Thread(target=self._ioAsNeeded)
self._ioThread.start()
@staticmethod
def _detectCoolLEDPort():
if sys.platform.startswith("win"):
ports = ["COM%s" % (i + 1) for i in range(10)]
elif sys.platform.startswith("linux") or sys.platform.startswith("cygwin"):
# this excludes your current terminal "/dev/tty"
ports = glob.glob("/dev/tty[A-Za-z]*")
elif sys.platform.startswith("darwin"):
ports = glob.glob("/dev/tty.*")
else:
raise EnvironmentError("Unsupported platform")
for port in ports:
try:
conn = SerialDevice(port=port, baudrate=57600, timeout=0.1)
if conn.readline()[0:7] == b"CoolLED" or conn.readline() == 4:
conn.close()
return port
elif conn.readline() == b"":
conn.write("XVER\n".encode("utf-8"))
out = conn.read(7)
if out == b"XFW_VER":
conn.close()
return port
else:
conn.close()
except (OSError, TimeoutError):
pass
raise HelpfulException("Could not detect a usb CoolLED light source. Are the drivers installed?")
def _ioAsNeeded(self):
while True:
if len(self._writeBuffer) > 0:
with self._writeLock:
dataToWrite = self._writeBuffer
self._writeBuffer = ""
self._devConn.write(dataToWrite.encode("utf-8"))
while self._devConn.hasDataToRead():
self._handleData(self._devConn.readline().decode("utf-8"))
sleep(0.2)
def _requestStatus(self):
self._sendCommand("CSS?")
def _sendCommand(self, cmd):
with self._writeLock:
self._writeBuffer += f"{cmd}\n"
def _handleData(self, resp):
try:
self.sourceConfigs["A"]["active"] = (resp[5] == "N")
self.sourceConfigs["A"]["brightness"] = int(resp[6:9])
self.sourceConfigs["B"]["active"] = (resp[11] == "N")
self.sourceConfigs["B"]["brightness"] = int(resp[12:15])
self.sourceConfigs["C"]["active"] = (resp[17] == "N")
self.sourceConfigs["C"]["brightness"] = int(resp[18:21])
except (IndexError, ValueError):
pass
@staticmethod
def _makeSetterCommand(channel, onOrOff, brightness):
onOrOff = "N" if onOrOff else "F"
return f"CSS{channel}S{onOrOff}{brightness:03d}"
def quit(self):
self._devConn.close()
def sourceActive(self, name):
return self.sourceConfigs[name].get("active", False)
def METHOD_NAME(self, name, active):
cmd = self._makeSetterCommand(name, active, int(self.getSourceBrightness(name) * 100))
self._sendCommand(cmd)
def getSourceBrightness(self, name):
return self.sourceConfigs[name].get("brightness", 0) / 100.
def setSourceBrightness(self, name, percent):
cmd = self._makeSetterCommand(name, percent > 0, int(percent * 100))
self._sendCommand(cmd)
| null |
1,850 |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# Description:
# unit test for glideinwms/lib/xmlParse.py
#
# Author:
# Dennis Box [email protected]
#
import unittest
import xml
import xmlrunner
# TODO: should OrderedDict be removed, it is the one from the stdlib. But tests are texting XML conversion as well
# should be directly: from collections import OrderedDict
from glideinwms.lib.xmlParse import (
domel2dict,
getXMLAttributes,
getXMLElements,
is_singular_of,
OrderedDict,
xmlfile2dict,
xmlstring2dict,
)
xmlstr = """
<test date="1/2/07">
<params what="xx">
<param name="x" value="12"/>
<param name="y" value="88"/>
</params>
<files>
<file absname="/tmp/abc.txt"/>
<file absname="/tmp/w.log" mod="-rw-r--r--"/>
</files>
<temperature F="100" C="40"/>
</test>
"""
xmlstr_dict_repr = """{'date': '1/2/07', 'params': {'what': 'xx', 'x': {'value': '12'}, 'y': {'value': '88'}}, 'files': [{'absname': '/tmp/abc.txt'}, {'absname': '/tmp/w.log', 'mod': '-rw-r--r--'}], 'temperature': {'F': '100', 'C': '40'}}"""
ordered_dict_values_repr = """['1/2/07', {'what': 'xx', 'x': {'value': '12'}, 'y': {'value': '88'}}, [{'absname': '/tmp/abc.txt'}, {'absname': '/tmp/w.log', 'mod': '-rw-r--r--'}], {'F': '100', 'C': '40'}]"""
ordered_dict_items_repr = """[('date', '1/2/07'), ('params', {'what': 'xx', 'x': {'value': '12'}, 'y': {'value': '88'}}), ('files', [{'absname': '/tmp/abc.txt'}, {'absname': '/tmp/w.log', 'mod': '-rw-r--r--'}]), ('temperature', {'F': '100', 'C': '40'})]"""
expected = ""
class TestOrderedDict(unittest.TestCase):
def test___delitem__(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
od2 = ordered_dict.copy()
ordered_dict.__delitem__("temperature")
self.assertTrue("temperature" in od2)
self.assertFalse("temperature" in ordered_dict)
def test___init__(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
self.assertNotEqual(ordered_dict, None)
def test___setitem__(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
ordered_dict.__setitem__("foo", "bar")
self.assertTrue("foo" in ordered_dict)
def test_clear(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
ordered_dict.clear()
self.assertEqual("{}", ordered_dict.__repr__())
def test_copy(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
od2 = ordered_dict.copy()
self.assertEqual(od2.__repr__(), ordered_dict.__repr__())
def test_items(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
self.assertEqual(ordered_dict_items_repr, list(ordered_dict.items()).__repr__())
def test_keys(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
self.assertEqual("['date', 'params', 'files', 'temperature']", list(ordered_dict.keys()).__repr__())
def METHOD_NAME(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
self.assertEqual("('temperature', {'F': '100', 'C': '40'})", ordered_dict.popitem().__repr__())
def test_setdefault(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
failobj = "not here"
ordered_dict.setdefault("Dave", failobj)
self.assertEqual(ordered_dict.get("Dave"), failobj)
ordered_dict["Dave"] = "here"
self.assertNotEqual(ordered_dict.get("Dave"), failobj)
self.assertEqual(ordered_dict.get("Dave"), "here")
def test_update(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
upd = {"foo": "bar"}
ordered_dict.update(upd)
self.assertTrue("foo" in ordered_dict)
def test_values(self):
dict1 = xmlstring2dict(xmlstr, use_ord_dict=False, always_singular_list=[])
ordered_dict = OrderedDict(dict1)
self.assertEqual(ordered_dict_values_repr, list(ordered_dict.values()).__repr__())
class TestXmlfile2dict(unittest.TestCase):
def test_xmlfile2dict(self):
infile = "fixtures/test_lib_parse.xml"
dict1 = xmlfile2dict(infile, use_ord_dict=True, always_singular_list=[])
self.assertEqual(xmlstr_dict_repr, dict1.__repr__())
class TestXmlstring2dict(unittest.TestCase):
def test_xmlstring2dict(self):
self.assertEqual(
xmlstr_dict_repr, xmlstring2dict(xmlstr, use_ord_dict=True, always_singular_list=[]).__repr__()
)
#
# These are all private
#
class TestGetXMLElements(unittest.TestCase):
def test_get_xml_elements(self):
doc = xml.dom.minidom.parseString("<xml><foo></foo></xml>")
self.assertTrue("DOM Element: foo" in getXMLElements(doc.documentElement).__repr__())
class TestGetXMLAttributes(unittest.TestCase):
def test_get_xml_attributes(self):
doc = xml.dom.minidom.parseString("""<xml><foo><param name="x" value="12"/></foo></xml>""")
self.assertEqual("{}", getXMLAttributes(doc.documentElement, use_ord_dict=True).__repr__())
class TestIsSingularOf(unittest.TestCase):
def test_is_singular_of(self):
self.assertEqual(True, is_singular_of(mysin="dog", myplu="dogs", always_singular_list=[]))
self.assertEqual(True, is_singular_of(mysin="goose", myplu="geese", always_singular_list=["goose", "dog"]))
self.assertEqual(False, is_singular_of(mysin="moose", myplu="meese", always_singular_list=["goose", "dog"]))
self.assertEqual(True, is_singular_of(mysin="miss", myplu="misses", always_singular_list=["goose", "dog"]))
self.assertEqual(True, is_singular_of(mysin="army", myplu="armies", always_singular_list=["goose", "dog"]))
class TestDomel2dict(unittest.TestCase):
def test_domel2dict(self):
doc = xml.dom.minidom.parseString(xmlstr)
self.assertTrue(isinstance(domel2dict(doc.documentElement), dict))
if __name__ == "__main__":
unittest.main(testRunner=xmlrunner.XMLTestRunner(output="unittests-reports"))
| null |
1,851 |
import pytest
from osf.utils.workflows import DefaultStates, RequestTypes
from osf_tests.factories import (
AuthUserFactory,
NodeRequestFactory,
PreprintFactory,
PreprintProviderFactory,
PreprintRequestFactory,
ProjectFactory,
)
from osf.utils import permissions
@pytest.mark.django_db
class NodeRequestTestMixin(object):
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def requester(self):
return AuthUserFactory()
@pytest.fixture()
def noncontrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, admin, write_contrib):
proj = ProjectFactory(creator=admin)
proj.save()
proj.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
send_email='access_request',
save=True
)
return proj
@pytest.fixture()
def node_request(self, project, requester):
node_request = NodeRequestFactory(
creator=requester,
target=project,
request_type=RequestTypes.ACCESS.value,
machine_state=DefaultStates.INITIAL.value
)
node_request.run_submit(requester)
return node_request
@pytest.fixture()
def second_admin(self, project):
second_admin = AuthUserFactory()
project.add_contributor(
contributor=second_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True
)
return second_admin
@pytest.mark.django_db
class PreprintRequestTestMixin(object):
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def noncontrib(self):
return AuthUserFactory()
@pytest.fixture()
def moderator(self):
return AuthUserFactory()
@pytest.fixture()
def requester(self):
return AuthUserFactory()
@pytest.fixture()
def pre_mod_provider(self, moderator):
ppp = PreprintProviderFactory(reviews_workflow='pre-moderation')
ppp.get_group('moderator').user_set.add(moderator)
return ppp
@pytest.fixture()
def post_mod_provider(self, moderator):
ppp = PreprintProviderFactory(reviews_workflow='post-moderation')
ppp.get_group('moderator').user_set.add(moderator)
return ppp
@pytest.fixture()
def none_mod_provider(self):
return PreprintProviderFactory(reviews_workflow=None)
@pytest.fixture()
def pre_mod_preprint(self, admin, write_contrib, pre_mod_provider):
pre = PreprintFactory(
creator=admin,
provider=pre_mod_provider,
is_published=False,
machine_state='pending'
)
pre.ever_public = True
pre.save()
pre.add_contributor(
contributor=write_contrib,
permissions=permissions.WRITE,
save=True
)
pre.is_public = True
pre.save()
return pre
@pytest.fixture()
def auto_withdrawable_pre_mod_preprint(self, admin, write_contrib, pre_mod_provider):
pre = PreprintFactory(
creator=admin,
provider=pre_mod_provider,
is_published=False,
machine_state='pending'
)
pre.save()
pre.add_contributor(
contributor=write_contrib,
permissions=permissions.WRITE,
save=True
)
return pre
@pytest.fixture()
def METHOD_NAME(self, admin, write_contrib, post_mod_provider):
post = PreprintFactory(
creator=admin,
provider=post_mod_provider,
)
post.save()
post.add_contributor(
contributor=write_contrib,
permissions=permissions.WRITE,
save=True
)
return post
@pytest.fixture()
def none_mod_preprint(self, admin, write_contrib, none_mod_provider):
preprint = PreprintFactory(
creator=admin,
provider=none_mod_provider,
)
preprint.save()
preprint.add_contributor(
contributor=write_contrib,
permissions=permissions.WRITE,
save=True
)
return preprint
@pytest.fixture()
def pre_request(self, pre_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def post_request(self, METHOD_NAME, admin):
request = PreprintRequestFactory(
creator=admin,
target=METHOD_NAME,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def none_request(self, none_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=none_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def auto_approved_pre_request(self, auto_withdrawable_pre_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=auto_withdrawable_pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def nonadmin_pre_request(self, pre_mod_preprint, requester):
request = PreprintRequestFactory(
creator=requester,
target=pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(requester)
return request
@pytest.fixture()
def nonadmin_post_request(self, METHOD_NAME, requester):
request = PreprintRequestFactory(
creator=requester,
target=METHOD_NAME,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(requester)
return request
@pytest.fixture()
def nonadmin_none_request(self, none_mod_preprint, requester):
request = PreprintRequestFactory(
creator=requester,
target=none_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(requester)
return request
@pytest.fixture()
def nonadmin_auto_approved_pre_request(self, auto_withdrawable_pre_mod_preprint, requester):
request = PreprintRequestFactory(
creator=requester,
target=auto_withdrawable_pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(requester)
return request
| null |
1,852 |
import json
import interfaceIR
import sys
from json import JSONEncoder
def flatten_list(l):
"""
Recursively dig into a list of lists or values (or even a single value)
and pull out all the values in a list.
"""
def flatten_list_helper(l, accumulator):
if isinstance(l, list):
# it's a list, so descend into lists/values inside it
for sublist in l:
flatten_list_helper(sublist, accumulator)
else:
# we've reached a value (leaf node), so it should be returned
accumulator.append(l)
result = []
flatten_list_helper(l, result)
return result
def clean_comments(comments):
"""
Flatten the unpredicable comments structure (which can be an arbitrary-depth
list of lists), and remove blank lines from the beginning and end.
"""
flat = flatten_list(comments)
comments = flatten_list([line.split("\n") for line in flat])
# often comments have blank lines at the beginning/end, so strip them:
while len(comments) > 0 and comments[0].strip() == "":
comments.pop(0)
while len(comments) > 0 and comments[-1].strip() == "":
comments.pop(-1)
return comments
class MyEncoder(JSONEncoder):
def default(self, o):
o.kind = type(o).__name__
# don't serialize location if it's None
if hasattr(o, "location"):
if o.location is None:
del o.location
elif isinstance(o.location, tuple):
o.location = {
"file": o.location[0],
"line": o.location[1],
"column": o.location[2],
}
if hasattr(o, "iface"):
# no need to encode interface, as the position of the node in the tree already gives us that information.
del o.iface
if hasattr(o, "comment"):
# coalesce into a single attribute name for easier processing
o.comments = o.comment
del o.comment
if hasattr(o, "comments"):
o.comments = clean_comments(o.comments)
if isinstance(o, interfaceIR.Function):
return o.__dict__
if isinstance(o, interfaceIR.Parameter):
def convert(k, v):
if k == "direction":
if v == interfaceIR.DIR_IN:
return "in"
elif v == interfaceIR.DIR_OUT:
return "out"
elif v == interfaceIR.DIR_INOUT:
return "inout"
else:
return v
return v
return {k: convert(k, v) for k, v in o.__dict__.iteritems()}
if isinstance(o, interfaceIR.Type):
return o.__dict__
if isinstance(o, interfaceIR.Interface):
return o.__dict__
if isinstance(o, interfaceIR.NamedValue):
return o.__dict__
if isinstance(o, interfaceIR.Event):
return o.__dict__
if isinstance(o, interfaceIR.Definition):
return o.__dict__
if isinstance(o, interfaceIR.StructMember):
return o.__dict__
return super(MyEncoder, self).default(o)
def METHOD_NAME(iface):
"""
Serialize the given IR.Interface as JSON.
"""
# change interfaces under 'import' into objects with their (real) name and path.
# otherwise they get serialized as nothing more than their names.
if hasattr(iface, "imports"):
def convert(k, v):
if hasattr(v, "comments"):
v.comments = clean_comments(v.comments)
# convert imported interfaces into dictionaries, otherwise they get serialized as strings
if isinstance(v, interfaceIR.Interface):
return v.__dict__
else:
return v
iface.imports = {k: convert(k, v) for k, v in iface.imports.iteritems()}
if hasattr(iface, "comments"):
iface.comments = clean_comments(iface.comments)
return json.dumps(iface.__dict__, cls=MyEncoder, indent=2)
| null |
1,853 |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
"""CLI feature tests."""
import os
import pytest
from apm.ApmCli import ApmCli
from pytest_bdd import scenario, given, when, then, parsers
from unittest.mock import patch, MagicMock
@pytest.fixture
def apm_cli(cmd_args):
return ApmCli(cmd_args)
@pytest.fixture
def cmd_args():
return []
@pytest.fixture
def parent_id_file(tmpdir):
d = tmpdir.mkdir("apmCli")
fh = d.join("parent_id.txt")
return fh
@scenario('features/cli_params.feature', 'Basic transaction')
def test_basic_transaction():
"""Basic transaction."""
pass
@scenario('features/cli_params.feature', 'Basic transaction and span')
def test_basic_transaction_and_span():
"""Basic transaction and span."""
pass
@scenario('features/cli_params.feature', 'Basic transaction with api key')
def test_basic_transaction_with_api_key():
"""Basic transaction with api key."""
pass
@scenario('features/cli_params.feature', 'Basic transaction with custom context')
def test_basic_transaction_with_custom_context():
"""Basic transaction with custom context."""
pass
@scenario('features/cli_params.feature', 'Basic transaction with a transaction result')
def test_basic_transaction_with_transaction_result():
"""Basic transaction with a transaction result."""
pass
@pytest.mark.xfail
@scenario('features/cli_params.feature', 'Missing APM config Command line parameter')
def test_missing_apm_config_command_line_parameter():
"""Missing APM config Command line parameter."""
pass
@pytest.mark.xfail
@scenario('features/cli_params.feature', 'Missing service name Command line parameter')
def test_missing_service_name_command_line_parameter():
"""Missing service name Command line parameter."""
pass
@pytest.mark.xfail
@scenario('features/cli_params.feature', 'Missing transaction Command line parameter')
def test_missing_transaction_command_line_parameter():
"""Missing transaction Command line parameter."""
pass
@pytest.mark.xfail
@scenario('features/cli_params.feature', 'Api Key and Token Command line parameters passed')
def test_api_key_and_token_command_line_parameter_passed():
"""Api Key and Token Command line parameters passed."""
pass
@scenario('features/cli_params.feature', 'Set parent transaction ID')
def test_set_parent_id():
"""Set parent transaction ID."""
pass
@scenario('features/cli_params.feature', 'Load parent transaction ID')
def testload_parent_id():
"""Load parent transaction ID."""
pass
@scenario('features/cli_params.feature', 'Save parent transaction ID')
def test_save_parent_id():
"""Save parent transaction ID."""
pass
@given("an APM server URL")
def set_apm_url(cmd_args):
cmd_args.append('--apm-server-url')
cmd_args.append('https://apm.example.com:8200')
@given("a token")
def set_apm_token(cmd_args):
cmd_args.append('--apm-token')
cmd_args.append('token_example')
@given("a api key")
def set_apm_token(cmd_args):
cmd_args.append('--apm-api-key')
cmd_args.append('api_key_example')
@given("a service name")
def set_service_name(cmd_args):
cmd_args.append('--service-name')
cmd_args.append('example_svc')
@given("a transaction name")
def set_transaction_name(cmd_args):
cmd_args.append('--transaction-name')
cmd_args.append('transaction_test')
@given("a span name")
def set_spann_name(cmd_args):
cmd_args.append('--span-name')
cmd_args.append('span_test')
@given("a span command")
def set_span_command(cmd_args):
cmd_args.append('--span-command')
cmd_args.append('echo hello')
@given("a span type")
def set_span_type(cmd_args):
cmd_args.append('--span-type')
cmd_args.append('span_type')
@given("a span subtype")
def set_span_subtype(cmd_args):
cmd_args.append('--span-subtype')
cmd_args.append('span_subtype')
@given("a span action")
def METHOD_NAME(cmd_args):
cmd_args.append('--span-action')
cmd_args.append('span_action')
@given("a span label")
def set_span_label(cmd_args):
cmd_args.append('--span-labels')
cmd_args.append('{"label": "foo"}')
@given("a custom context")
def set_span_command(cmd_args):
cmd_args.append('--custom-context')
cmd_args.append('{"var": "foo"}')
@given(parsers.parse("a transaction result {result:S}"))
def set_span_command(cmd_args, result):
cmd_args.append('--transaction-result')
cmd_args.append(result)
@given("a file to save the parent transaction ID")
def set_save_parent_id(cmd_args, parent_id_file):
cmd_args.append('--parent-transaction-save')
filename = os.path.join(parent_id_file.dirname, parent_id_file.basename)
cmd_args.append(filename)
@given("a file to load the parent transaction ID")
def set_save_parent_id(cmd_args, parent_id_file):
cmd_args.append('--parent-transaction-load')
filename = os.path.join(parent_id_file.dirname, parent_id_file.basename)
cmd_args.append(filename)
parent_id_file.write('01-1234567890-00')
@given("a parent transaction ID")
def set_save_parent_id(cmd_args):
cmd_args.append('--parent-transaction')
cmd_args.append('01-1234567890-00')
@when("I launch the apm-cly.py")
def launch_cli(apm_cli):
# maybe we can use ELASTIC_APM_DISABLE_SEND instead
mock_urlopen_patcher = patch('elasticapm.transport.http.Transport.send')
mock_urlopen = mock_urlopen_patcher.start()
mock_urlopen.return_value.status = 200
mock_urlopen.return_value.read = 'body'
apm_cli.run()
@then("a transaction is reported")
def check_transaction(apm_cli):
assert apm_cli.transaction
@then("a span is reported")
def check_span(apm_cli):
assert apm_cli.span
assert apm_cli.span.name == 'span_test'
assert apm_cli.span.type == 'span_type'
assert apm_cli.span.subtype == 'span_subtype'
assert apm_cli.span.action == 'span_action'
assert apm_cli.span.labels['label'] == 'foo'
@then("a parent ID is set")
def check_parent(apm_cli):
assert apm_cli.transaction.trace_parent
@then('it fails to start')
def fails_to_start(apm_cli):
assert apm_cli is None
@then("the context is set")
def the_context_is_set(apm_cli):
assert apm_cli.transaction.context['custom']['var'] == 'foo'
@then(parsers.parse("the transaction result is {result:S}"))
def the_context_is_set(apm_cli, result):
assert apm_cli.transaction.result == result
@then("the file with the parent transaction ID exits")
def check_parent_id_file_exists(parent_id_file):
id = parent_id_file.read()
assert len(id) > 0
| null |
1,854 |
"""
This module manages loading/etc of Galaxy interactive tours.
"""
import logging
import os
from typing import (
List,
Union,
)
import yaml
from pydantic import parse_obj_as
from galaxy.exceptions import ObjectNotFound
from galaxy.navigation.data import load_root_component
from galaxy.util import config_directories_from_setting
from ._interface import ToursRegistry
from ._schema import TourList
log = logging.getLogger(__name__)
TOUR_EXTENSIONS = (".yml", ".yaml")
ROOT_COMPONENT = load_root_component()
def build_tours_registry(tour_directories: str):
return ToursRegistryImpl(tour_directories)
def noop_warn(str):
pass
def METHOD_NAME(contents_dict, warn=None, resolve_components=True):
warn = warn or noop_warn
# Some of this can be done on the clientside. Maybe even should?
title_default = contents_dict.get("title_default")
if "requirements" not in contents_dict:
contents_dict["requirements"] = []
for step in contents_dict["steps"]:
# Remove attributes no longer used, so they are attempted to be
# validated.
if "backdrop" in step:
warn(f"Deprecated and dropped property backdrop found in step {step}")
step.pop("backdrop")
if "component" in step and resolve_components:
component = step.pop("component")
step["element"] = ROOT_COMPONENT.resolve_component_locator(component).locator
if "intro" in step:
step["content"] = step.pop("intro")
if "position" in step:
step["placement"] = step.pop("position")
if "element" not in step:
step["orphan"] = True
if title_default and "title" not in step:
step["title"] = title_default
def get_tour_id_from_path(tour_path: Union[str, os.PathLike]) -> str:
filename = os.path.basename(tour_path)
return os.path.splitext(filename)[0]
def load_tour_from_path(tour_path: Union[str, os.PathLike], warn=None, resolve_components=True) -> dict:
with open(tour_path) as f:
tour = yaml.safe_load(f)
METHOD_NAME(tour, warn=warn, resolve_components=resolve_components)
return tour
def is_yaml(filename: str) -> bool:
for ext in TOUR_EXTENSIONS:
if filename.endswith(ext):
return True
return False
def tour_paths(target_path: Union[str, os.PathLike]) -> List[str]:
paths = []
if os.path.isdir(target_path):
for filename in os.listdir(target_path):
if is_yaml(filename):
paths.append(str(os.path.join(target_path, filename)))
else:
paths.append(str(target_path))
return paths
@ToursRegistry.register
class ToursRegistryImpl:
def __init__(self, tour_directories):
self.tour_directories = config_directories_from_setting(tour_directories)
self._load_tours()
def get_tours(self):
"""Return list of tours."""
tours = []
for k in self.tours.keys():
tourdata = {
"id": k,
"name": self.tours[k].get("name"),
"description": self.tours[k].get("description"),
"tags": self.tours[k].get("tags"),
"requirements": self.tours[k].get("requirements"),
}
tours.append(tourdata)
return parse_obj_as(TourList, tours)
def tour_contents(self, tour_id):
"""Return tour contents."""
# Extra format translation could happen here (like the previous intro_to_tour)
# For now just return the loaded contents.
if tour_id not in self.tours:
raise ObjectNotFound(f"tour {tour_id} not found")
return self.tours.get(tour_id)
def load_tour(self, tour_id):
"""Reload tour and return its contents."""
tour_path = self._get_path_from_tour_id(tour_id)
self._load_tour_from_path(tour_path)
return self.tours.get(tour_id)
def reload_tour(self, path):
"""Reload tour."""
# We may safely assume that the path is within the tour directory
filename = os.path.basename(path)
if is_yaml(filename):
self._load_tour_from_path(path)
def _load_tours(self):
self.tours = {}
for tour_dir in self.tour_directories:
for tour_path in tour_paths(tour_dir):
self._load_tour_from_path(tour_path)
def _load_tour_from_path(self, tour_path):
tour_id = get_tour_id_from_path(tour_path)
try:
tour = load_tour_from_path(tour_path)
except OSError:
log.exception(f"Tour '{tour_id}' could not be loaded, error reading file.")
except yaml.error.YAMLError:
log.exception(f"Tour '{tour_id}' could not be loaded, error within file. Please check your YAML syntax.")
except TypeError:
log.exception(
f"Tour '{tour_id}' could not be loaded, error within file."
" Possibly spacing related. Please check your YAML syntax."
)
self.tours[tour_id] = tour
log.info(f"Loaded tour '{tour_id}'")
def _get_path_from_tour_id(self, tour_id):
for tour_dir in self.tour_directories:
for ext in TOUR_EXTENSIONS:
tour_path = os.path.join(tour_dir, tour_id + ext)
if os.path.exists(tour_path):
return tour_path
| null |
1,855 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeInstanceHistoryEventsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeInstanceHistoryEvents','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EventIds(self): # RepeatList
return self.get_query_params().get('EventId')
def set_EventIds(self, EventId): # RepeatList
for depth1 in range(len(EventId)):
self.add_query_param('EventId.' + str(depth1 + 1), EventId[depth1])
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_EventCycleStatus(self): # String
return self.get_query_params().get('EventCycleStatus')
def set_EventCycleStatus(self, EventCycleStatus): # String
self.add_query_param('EventCycleStatus', EventCycleStatus)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ImpactLevel(self): # String
return self.get_query_params().get('ImpactLevel')
def set_ImpactLevel(self, ImpactLevel): # String
self.add_query_param('ImpactLevel', ImpactLevel)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_InstanceEventCycleStatuss(self): # RepeatList
return self.get_query_params().get('InstanceEventCycleStatus')
def set_InstanceEventCycleStatuss(self, InstanceEventCycleStatus): # RepeatList
for depth1 in range(len(InstanceEventCycleStatus)):
self.add_query_param('InstanceEventCycleStatus.' + str(depth1 + 1), InstanceEventCycleStatus[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_EventPublishTimeEnd(self): # String
return self.get_query_params().get('EventPublishTime.End')
def set_EventPublishTimeEnd(self, EventPublishTimeEnd): # String
self.add_query_param('EventPublishTime.End', EventPublishTimeEnd)
def get_ResourceIds(self): # RepeatList
return self.get_query_params().get('ResourceId')
def set_ResourceIds(self, ResourceId): # RepeatList
for depth1 in range(len(ResourceId)):
self.add_query_param('ResourceId.' + str(depth1 + 1), ResourceId[depth1])
def get_InstanceEventTypes(self): # RepeatList
return self.get_query_params().get('InstanceEventType')
def set_InstanceEventTypes(self, InstanceEventType): # RepeatList
for depth1 in range(len(InstanceEventType)):
self.add_query_param('InstanceEventType.' + str(depth1 + 1), InstanceEventType[depth1])
def METHOD_NAME(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_NotBeforeStart(self): # String
return self.get_query_params().get('NotBefore.Start')
def set_NotBeforeStart(self, NotBeforeStart): # String
self.add_query_param('NotBefore.Start', NotBeforeStart)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ResourceType(self): # String
return self.get_query_params().get('ResourceType')
def set_ResourceType(self, ResourceType): # String
self.add_query_param('ResourceType', ResourceType)
def get_EventPublishTimeStart(self): # String
return self.get_query_params().get('EventPublishTime.Start')
def set_EventPublishTimeStart(self, EventPublishTimeStart): # String
self.add_query_param('EventPublishTime.Start', EventPublishTimeStart)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_NotBeforeEnd(self): # String
return self.get_query_params().get('NotBefore.End')
def set_NotBeforeEnd(self, NotBeforeEnd): # String
self.add_query_param('NotBefore.End', NotBeforeEnd)
def get_EventType(self): # String
return self.get_query_params().get('EventType')
def set_EventType(self, EventType): # String
self.add_query_param('EventType', EventType)
| null |
1,856 |
import datetime
import decimal
import functools
import typing
import uuid
from dateutil import parser, tz
class CustomIsoparser(parser.isoparser):
def __init__(self, sep: typing.Optional[str] = None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
used_sep = sep.encode('ascii')
else:
used_sep = None
self._sep = used_sep
@staticmethod
def __get_ascii_bytes(str_in: str) -> bytes:
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
# ASCII is the same in UTF-8
try:
return str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
raise ValueError(msg) from e
def __parse_isodate(self, dt_str: str) -> typing.Tuple[typing.Tuple[int, int, int], int]:
dt_str_ascii = self.__get_ascii_bytes(dt_str)
values = self._parse_isodate(dt_str_ascii) # type: ignore
values = typing.cast(typing.Tuple[typing.List[int], int], values)
components = typing.cast( typing.Tuple[int, int, int], tuple(values[0]))
pos = values[1]
return components, pos
def __parse_isotime(self, dt_str: str) -> typing.Tuple[int, int, int, int, typing.Optional[typing.Union[tz.tzutc, tz.tzoffset]]]:
dt_str_ascii = self.__get_ascii_bytes(dt_str)
values = self._parse_isotime(dt_str_ascii) # type: ignore
components: typing.Tuple[int, int, int, int, typing.Optional[typing.Union[tz.tzutc, tz.tzoffset]]] = tuple(values) # type: ignore
return components
def parse_isodatetime(self, dt_str: str) -> datetime.datetime:
date_components, pos = self.__parse_isodate(dt_str)
if len(dt_str) <= pos:
# len(components) <= 3
raise ValueError('Value is not a datetime')
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
hour, minute, second, microsecond, tzinfo = self.__parse_isotime(dt_str[pos + 1:])
if hour == 24:
hour = 0
components = (*date_components, hour, minute, second, microsecond, tzinfo)
return datetime.datetime(*components) + datetime.timedelta(days=1)
else:
components = (*date_components, hour, minute, second, microsecond, tzinfo)
else:
raise ValueError('String contains unknown ISO components')
return datetime.datetime(*components)
def parse_isodate_str(self, datestr: str) -> datetime.date:
components, pos = self.__parse_isodate(datestr)
if len(datestr) > pos:
raise ValueError('String contains invalid time components')
if len(components) > 3:
raise ValueError('String contains invalid time components')
return datetime.date(*components)
DEFAULT_ISOPARSER = CustomIsoparser()
@functools.lru_cache()
def as_date(arg: str) -> datetime.date:
"""
type = "string"
format = "date"
"""
return DEFAULT_ISOPARSER.parse_isodate_str(arg)
@functools.lru_cache()
def METHOD_NAME(arg: str) -> datetime.datetime:
"""
type = "string"
format = "date-time"
"""
return DEFAULT_ISOPARSER.parse_isodatetime(arg)
@functools.lru_cache()
def as_decimal(arg: str) -> decimal.Decimal:
"""
Applicable when storing decimals that are sent over the wire as strings
type = "string"
format = "number"
"""
return decimal.Decimal(arg)
@functools.lru_cache()
def as_uuid(arg: str) -> uuid.UUID:
"""
type = "string"
format = "uuid"
"""
return uuid.UUID(arg
| null |
1,857 |
# -*- coding: utf-8 -*-
from PySide2 import QtWidgets, QtCore
from ...signals import signals
import logging
from activity_browser.logger import ABHandler
logger = logging.getLogger('ab_logs')
log = ABHandler.setup_with_logger(logger, __name__)
class ABTab(QtWidgets.QTabWidget):
def __init__(self, parent=None):
super(ABTab, self).__init__(parent)
self.setMovable(True)
self.tabs = dict() # keys: tab name; values: tab widget
# signals
signals.show_tab.connect(self.show_tab)
signals.hide_tab.connect(self.hide_tab)
signals.toggle_show_or_hide_tab.connect(self.toggle_tab_visibility)
signals.hide_when_empty.connect(self.hide_when_empty)
self.connect(self, QtCore.SIGNAL('currentChanged(int)'), self.current_index_changed)
def current_index_changed(self, current_index: int):
"""Optional function to accept the index of the selected tab."""
pass # NotImplementedError is not used as this function gets called often and not neccecarily used.
def METHOD_NAME(self, obj, tab_name):
"""Default addTab method and add item to self.tabs
"""
self.tabs[tab_name] = obj
self.addTab(obj, tab_name)
def select_tab(self, obj):
"""Brings tab to focus."""
self.setCurrentIndex(self.indexOf(obj))
def toggle_tab_visibility(self, tab_name):
"""Show or hide a tab.
Used, e.g. for Windows-->show/hide menu."""
if tab_name in self.tabs:
if self.indexOf(self.tabs[tab_name]) != -1:
self.hide_tab(tab_name)
else:
self.show_tab(tab_name)
def hide_tab(self, tab_name, current_index=0):
"""Hides tab, but does not delete the QTabWidget itself."""
if tab_name in self.tabs:
tab = self.tabs[tab_name]
if self.indexOf(tab) != -1:
log.info("-hiding tab:", tab_name)
tab.setVisible(False)
# Only explicitly alter the tab index if we're hiding the
# current tab itself.
if self.currentIndex() == self.indexOf(tab):
self.setCurrentIndex(current_index)
self.removeTab(self.indexOf(tab))
def show_tab(self, tab_name):
"""Makes existing tab visible."""
if tab_name in self.tabs:
tab = self.tabs[tab_name]
log.info("+showing tab:", tab_name)
tab.setVisible(True)
self.addTab(tab, tab_name)
self.select_tab(tab)
def get_tab_name(self, obj):
"""Returns the name of a tab."""
tab_names = [name for name, o in self.tabs.items() if o == obj]
if len(tab_names) == 1:
return tab_names[0]
else:
log.warning("found", len(tab_names), "occurences of this object.")
def get_tab_name_from_index(self, index):
"""Return the name of a tab based on its index."""
tab_names = [self.tabText(i) for i in range(self.count()) if i == index]
if len(tab_names) == 1:
return tab_names[0]
else:
log.warning("Did not find instance of tab")
def hide_when_empty(self):
"""Show tab if it has sub-tabs (not empty) or hide if it has no sub-tabs (empty)."""
for tab_name, tab in self.tabs.items():
if hasattr(tab, "tabs"):
if not tab.tabs:
self.hide_tab(tab_name)
# else: # leads to strange behaviour of setCurrentIndex/select_tab
# self.show_tab(tab_name)
def close_tab(self, index):
"""Close tab by index."""
widget = self.widget(index)
tab_name = self.get_tab_name(widget)
if widget in self.tabs.values():
del self.tabs[tab_name]
widget.deleteLater()
self.removeTab(index)
signals.hide_when_empty.emit() # needs to be a signal as we want the super-tab to receive this...
def close_tab_by_tab_name(self, tab_name):
"""Close tab by tab name (key in self.tabs)."""
if tab_name in self.tabs:
self.close_tab(self.indexOf(self.tabs[tab_name]))
def close_all(self):
"""Close all tabs."""
open_tab_count = len(self.tabs)
for i in reversed(range(open_tab_count)):
self.close_tab(i
| null |
1,858 |
# Copyright (c) 2023 Mira Geoscience Ltd.
#
# This file is part of geoh5py.
#
# geoh5py is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# geoh5py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with geoh5py. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
import uuid
import weakref
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, TypeVar, cast
if TYPE_CHECKING:
from .. import workspace as ws
EntityTypeT = TypeVar("EntityTypeT", bound="EntityType")
class EntityType(ABC):
_attribute_map = {"Description": "description", "ID": "uid", "Name": "name"}
def __init__(self, workspace: ws.Workspace, uid: uuid.UUID | None = None, **kwargs):
assert workspace is not None
self._workspace = weakref.ref(workspace)
assert uid is None or isinstance(uid, uuid.UUID)
self._description: str | None = "Entity"
self._name: str | None = "Entity"
self._on_file = False
self._uid: uuid.UUID = uid if uid is not None else uuid.uuid4()
for attr, item in kwargs.items():
try:
if attr in self._attribute_map:
attr = self._attribute_map[attr]
setattr(self, attr, item)
except AttributeError:
continue
@property
def METHOD_NAME(self):
"""
:obj:`dict` Correspondence map between property names used in geoh5py and
geoh5.
"""
return self._attribute_map
@property
def description(self) -> str | None:
return self._description
@description.setter
def description(self, description: str):
self._description = description
self.workspace.update_attribute(self, "attributes")
@classmethod
def find(
cls: type[EntityTypeT], workspace: ws.Workspace, type_uid: uuid.UUID
) -> EntityTypeT | None:
"""Finds in the given Workspace the EntityType with the given UUID for
this specific EntityType implementation class.
:return: EntityType of None
"""
return cast(EntityTypeT, workspace.find_type(type_uid, cls))
@property
def on_file(self) -> bool:
"""
:obj:`bool` Entity already present in
:obj:`~geoh5py.workspace.workspace.Workspace.h5file`.
"""
return self._on_file
@on_file.setter
def on_file(self, value: bool):
self._on_file = value
@staticmethod
@abstractmethod
def _is_abstract() -> bool:
"""Trick to prevent from instantiating abstract base class."""
return True
@property
def name(self) -> str | None:
return self._name
@name.setter
def name(self, name: str):
self._name = name
self.workspace.update_attribute(self, "attributes")
@property
def uid(self) -> uuid.UUID:
"""
:obj:`uuid.UUID` The unique identifier of an entity, either as stored
in geoh5 or generated in :func:`~uuid.UUID.uuid4` format.
"""
return self._uid
@uid.setter
def uid(self, uid: str | uuid.UUID):
if isinstance(uid, str):
uid = uuid.UUID(uid)
self._uid = uid
self.workspace.update_attribute(self, "attributes")
@property
def workspace(self) -> ws.Workspace:
"""
:obj:`~geoh5py.workspace.workspace.Workspace` registering this type.
"""
workspace = self._workspace()
# Workspace should never be null, unless this is a dangling type object,
# which means workspace has been deleted.
assert workspace is not None
return workspace
| null |
1,859 |
import bpy, os, sys, re, platform, subprocess
import numpy as np
class TLM_OIDN_Denoise:
image_array = []
image_output_destination = ""
denoised_array = []
def __init__(self, oidnProperties, img_array, dirpath):
self.oidnProperties = oidnProperties
self.image_array = img_array
self.image_output_destination = dirpath
self.check_binary()
def check_binary(self):
oidnPath = self.oidnProperties.tlm_oidn_path
if oidnPath != "":
file = oidnPath
filename, file_extension = os.path.splitext(file)
if platform.system() == 'Windows':
if(file_extension == ".exe"):
pass
else:
self.oidnProperties.tlm_oidn_path = os.path.join(self.oidnProperties.tlm_oidn_path,"oidnDenoise.exe")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Please provide OIDN path")
def denoise(self):
for image in self.image_array:
if image not in self.denoised_array:
image_path = os.path.join(self.image_output_destination, image)
#Save to pfm
loaded_image = bpy.data.images.load(image_path, check_existing=False)
width = loaded_image.size[0]
height = loaded_image.size[1]
image_output_array = np.zeros([width, height, 3], dtype="float32")
image_output_array = np.array(loaded_image.pixels)
image_output_array = image_output_array.reshape(height, width, 4)
image_output_array = np.float32(image_output_array[:,:,:3])
image_output_denoise_destination = image_path[:-4] + ".pfm"
image_output_denoise_result_destination = image_path[:-4] + "_denoised.pfm"
with open(image_output_denoise_destination, "wb") as fileWritePFM:
self.METHOD_NAME(fileWritePFM, image_output_array)
#Denoise
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Loaded image: " + str(loaded_image))
verbose = self.oidnProperties.tlm_oidn_verbose
affinity = self.oidnProperties.tlm_oidn_affinity
if verbose:
print("Denoiser search: " + bpy.path.abspath(self.oidnProperties.tlm_oidn_path))
v = "3"
else:
v = "0"
if affinity:
a = "1"
else:
a = "0"
threads = str(self.oidnProperties.tlm_oidn_threads)
maxmem = str(self.oidnProperties.tlm_oidn_maxmem)
if platform.system() == 'Windows':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath, '-f', 'RTLightmap', '-hdr', image_output_denoise_destination, '-o', image_output_denoise_result_destination, '-verbose', v, '-threads', threads, '-affinity', a, '-maxmem', maxmem]
elif platform.system() == 'Darwin':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
else:
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
oidnPath = oidnPath.replace(' ', '\\ ')
image_output_denoise_destination = image_output_denoise_destination.replace(' ', '\\ ')
image_output_denoise_result_destination = image_output_denoise_result_destination.replace(' ', '\\ ')
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
if not verbose:
denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)
else:
denoisePipe = subprocess.Popen(pipePath, shell=True)
denoisePipe.communicate()[0]
if platform.system() != 'Windows':
image_output_denoise_result_destination = image_output_denoise_result_destination.replace('\\', '')
with open(image_output_denoise_result_destination, "rb") as f:
denoise_data, scale = self.load_pfm(f)
ndata = np.array(denoise_data)
ndata2 = np.dstack((ndata, np.ones((width,height))))
img_array = ndata2.ravel()
loaded_image.pixels = img_array
loaded_image.filepath_raw = image_output_denoise_result_destination = image_path[:-10] + "_denoised.hdr"
loaded_image.file_format = "HDR"
loaded_image.save()
self.denoised_array.append(image)
print(image_path)
def clean(self):
self.denoised_array.clear()
self.image_array.clear()
for file in self.image_output_destination:
if file.endswith("_baked.hdr"):
baked_image_array.append(file)
#self.image_output_destination
#Clean temporary files here..
#...pfm
#...denoised.hdr
def load_pfm(self, file, as_flat_list=False):
#start = time()
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("utf-8").rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
if as_flat_list:
result = data
else:
result = np.reshape(data, shape)
#print("PFM import took %.3f s" % (time() - start))
return result, scale
def METHOD_NAME(self, file, image, scale=1):
#start = time()
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32 (got %s)" % image.dtype.name)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write(b"PF\n" if color else b"Pf\n")
file.write(b"%d %d\n" % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(b"%f\n" % scale)
image.tofile(file)
#print("PFM export took %.3f s" % (time() - start))
| null |
1,860 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class EnableCenVbrHealthCheckRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'EnableCenVbrHealthCheck')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_HealthCheckTargetIp(self): # String
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self, HealthCheckTargetIp): # String
self.add_query_param('HealthCheckTargetIp', HealthCheckTargetIp)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_VbrInstanceOwnerId(self): # Long
return self.get_query_params().get('VbrInstanceOwnerId')
def set_VbrInstanceOwnerId(self, VbrInstanceOwnerId): # Long
self.add_query_param('VbrInstanceOwnerId', VbrInstanceOwnerId)
def get_HealthCheckOnly(self): # Boolean
return self.get_query_params().get('HealthCheckOnly')
def set_HealthCheckOnly(self, HealthCheckOnly): # Boolean
self.add_query_param('HealthCheckOnly', HealthCheckOnly)
def get_VbrInstanceRegionId(self): # String
return self.get_query_params().get('VbrInstanceRegionId')
def set_VbrInstanceRegionId(self, VbrInstanceRegionId): # String
self.add_query_param('VbrInstanceRegionId', VbrInstanceRegionId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def METHOD_NAME(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_HealthCheckSourceIp(self): # String
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self, HealthCheckSourceIp): # String
self.add_query_param('HealthCheckSourceIp', HealthCheckSourceIp)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_VbrInstanceId(self): # String
return self.get_query_params().get('VbrInstanceId')
def set_VbrInstanceId(self, VbrInstanceId): # String
self.add_query_param('VbrInstanceId', VbrInstanceId)
| null |
1,861 |
# Support for filament width sensor
#
# Copyright (C) 2019 Mustafa YILDIZ <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
ADC_REPORT_TIME = 0.500
ADC_SAMPLE_TIME = 0.001
ADC_SAMPLE_COUNT = 8
MEASUREMENT_INTERVAL_MM = 10
class FilamentWidthSensor:
def __init__(self, config):
self.printer = config.get_printer()
self.reactor = self.printer.get_reactor()
self.pin = config.get('pin')
self.nominal_filament_dia = config.getfloat(
'default_nominal_filament_diameter', above=1.0)
self.measurement_delay = config.getfloat('measurement_delay', above=0.)
self.measurement_max_difference = config.getfloat('max_difference',
above=0.)
self.max_diameter = (self.nominal_filament_dia
+ self.measurement_max_difference)
self.min_diameter = (self.nominal_filament_dia
- self.measurement_max_difference)
self.is_active = True
# filament array [position, filamentWidth]
self.filament_array = []
self.lastFilamentWidthReading = 0
# printer objects
self.toolhead = self.ppins = self.mcu_adc = None
self.printer.register_event_handler("klippy:ready", self.handle_ready)
# Start adc
self.ppins = self.printer.lookup_object('pins')
self.mcu_adc = self.ppins.setup_pin('adc', self.pin)
self.mcu_adc.setup_minmax(ADC_SAMPLE_TIME, ADC_SAMPLE_COUNT)
self.mcu_adc.setup_adc_callback(ADC_REPORT_TIME, self.adc_callback)
# extrude factor updating
self.extrude_factor_update_timer = self.reactor.register_timer(
self.extrude_factor_update_event)
# Register commands
self.gcode = self.printer.lookup_object('gcode')
self.gcode.register_command('QUERY_FILAMENT_WIDTH', self.cmd_M407)
self.gcode.register_command('RESET_FILAMENT_WIDTH_SENSOR',
self.cmd_ClearFilamentArray)
self.gcode.register_command('DISABLE_FILAMENT_WIDTH_SENSOR',
self.cmd_M406)
self.gcode.register_command('ENABLE_FILAMENT_WIDTH_SENSOR',
self.cmd_M405)
# Initialization
def handle_ready(self):
# Load printer objects
self.toolhead = self.printer.lookup_object('toolhead')
# Start extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NOW)
def adc_callback(self, read_time, read_value):
# read sensor value
self.lastFilamentWidthReading = round(read_value * 5, 2)
def METHOD_NAME(self, last_epos):
# Fill array
if len(self.filament_array) > 0:
# Get last reading position in array & calculate next
# reading position
next_reading_position = (self.filament_array[-1][0]
+ MEASUREMENT_INTERVAL_MM)
if next_reading_position <= (last_epos + self.measurement_delay):
self.filament_array.append([last_epos + self.measurement_delay,
self.lastFilamentWidthReading])
else:
# add first item to array
self.filament_array.append([self.measurement_delay + last_epos,
self.lastFilamentWidthReading])
def extrude_factor_update_event(self, eventtime):
# Update extrude factor
pos = self.toolhead.get_position()
last_epos = pos[3]
# Update filament array for lastFilamentWidthReading
self.METHOD_NAME(last_epos)
# Does filament exists
if self.lastFilamentWidthReading > 0.5:
if len(self.filament_array) > 0:
# Get first position in filament array
pending_position = self.filament_array[0][0]
if pending_position <= last_epos:
# Get first item in filament_array queue
item = self.filament_array.pop(0)
filament_width = item[1]
if ((filament_width <= self.max_diameter)
and (filament_width >= self.min_diameter)):
percentage = round(self.nominal_filament_dia**2
/ filament_width**2 * 100)
self.gcode.run_script("M221 S" + str(percentage))
else:
self.gcode.run_script("M221 S100")
else:
self.gcode.run_script("M221 S100")
self.filament_array = []
if self.is_active:
return eventtime + 1
else:
return self.reactor.NEVER
def cmd_M407(self, gcmd):
response = ""
if self.lastFilamentWidthReading > 0:
response += ("Filament dia (measured mm): "
+ str(self.lastFilamentWidthReading))
else:
response += "Filament NOT present"
gcmd.respond_info(response)
def cmd_ClearFilamentArray(self, gcmd):
self.filament_array = []
gcmd.respond_info("Filament width measurements cleared!")
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
def cmd_M405(self, gcmd):
response = "Filament width sensor Turned On"
if self.is_active:
response = "Filament width sensor is already On"
else:
self.is_active = True
# Start extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NOW)
gcmd.respond_info(response)
def cmd_M406(self, gcmd):
response = "Filament width sensor Turned Off"
if not self.is_active:
response = "Filament width sensor is already Off"
else:
self.is_active = False
# Stop extrude factor update timer
self.reactor.update_timer(self.extrude_factor_update_timer,
self.reactor.NEVER)
# Clear filament array
self.filament_array = []
# Set extrude multiplier to 100%
self.gcode.run_script_from_command("M221 S100")
gcmd.respond_info(response)
def load_config(config):
return FilamentWidthSensor(config)
| null |
1,862 |
import os
from galaxy_test.driver import integration_util
THIS_DIR = os.path.dirname(__file__)
PANEL_VIEWS_DIR_1 = os.path.join(THIS_DIR, "panel_views_1")
class TestPanelViewsFromDirectoryIntegration(integration_util.IntegrationTestCase):
framework_tool_and_types = True
allow_tool_conf_override = False
@classmethod
def handle_galaxy_config_kwds(cls, config):
super().handle_galaxy_config_kwds(config)
config["panel_views_dir"] = PANEL_VIEWS_DIR_1
def test_section_copy(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="filter"))
index_as_list = index.json()
sections = [x for x in index_as_list if x["model_class"] == "ToolSection"]
section_names = [s["name"] for s in sections]
assert len(section_names) == 1
assert "For Tours" in section_names
def test_custom_label_order(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="my-custom"))
verify_my_custom(index)
def test_filtering_sections_by_tool_id(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_2"))
index.raise_for_status()
index_as_list = index.json()
sections = [x for x in index_as_list if x["model_class"] == "ToolSection"]
assert len(sections) == 1
section = sections[0]
tools = section["elems"]
assert len(tools) == 3, len(tools)
def test_filtering_sections_by_tool_id_regex(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_3"))
verify_custom_regex_filtered(index)
def test_filtering_root_by_type(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_4"))
index.raise_for_status()
index_as_list = index.json()
assert len(index_as_list) == 2
# Labels are filtered out...
assert METHOD_NAME(index_as_list) == ["Tool", "Tool"]
assert element_ids(index_as_list) == ["empty_list", "count_list"]
def test_custom_section_def(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_6"))
index.raise_for_status()
index_as_list = index.json()
assert len(index_as_list) == 1
assert METHOD_NAME(index_as_list) == ["ToolSection"]
section = index_as_list[0]
section_elems = section["elems"]
assert len(section_elems) == 4, METHOD_NAME(section_elems)
assert METHOD_NAME(section_elems) == ["ToolSectionLabel", "Tool", "ToolSectionLabel", "Tool"]
assert element_ids(section_elems) == ["the-start", "empty_list", "the-middle", "count_list"]
def test_section_embed(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_5"))
verify_custom_embed(index)
def test_section_embed_filtering(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_7"))
index.raise_for_status()
index_as_list = index.json()
assert len(index_as_list) == 1
assert METHOD_NAME(index_as_list) == ["ToolSection"]
section = index_as_list[0]
section_elems = section["elems"]
assert len(section_elems) == 5, METHOD_NAME(section_elems)
assert METHOD_NAME(section_elems) == ["Tool", "Tool", "Tool", "ToolSectionLabel", "Tool"]
elem_ids = element_ids(section_elems)
assert elem_ids[0:3] == ["multi_data_optional", "paths_as_file", "param_text_option"]
assert elem_ids[4] == "Filter1"
def test_section_reference_by_name(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_8"))
verify_custom_embed(index)
def test_section_alias(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_9"))
verify_custom_regex_filtered(index)
def test_expand_section_aliases(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_10"))
index.raise_for_status()
index_as_list = index.json()
assert len(index_as_list) == 2
assert METHOD_NAME(index_as_list) == ["ToolSection", "ToolSection"]
def test_global_filters(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_11"))
verify_custom_regex_filtered(index)
def test_global_filters_on_integrated_panel(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True, view="custom_12"))
index.raise_for_status()
index_as_list = index.json()
sections = [x for x in index_as_list if x["model_class"] == "ToolSection"]
assert len(sections) == 2
section = sections[0]
assert section["id"] == "test"
tools = section["elems"]
assert len(tools) == 2, len(tools)
class TestPanelViewsFromConfigIntegration(integration_util.IntegrationTestCase):
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
super().handle_galaxy_config_kwds(config)
config["panel_views"] = [
{
"id": "my-custom",
"name": "My Custom",
"type": "generic",
"items": [
{
"type": "label",
"text": "The Start",
},
{
"type": "tool",
"id": "empty_list",
},
{
"type": "label",
"text": "The Middle",
},
{
"type": "tool",
"id": "count_list",
},
{
"type": "label",
"text": "The End",
},
],
}
]
config["default_panel_view"] = "my-custom"
def test_custom_label_order(self):
index = self.galaxy_interactor.get("tools", data=dict(in_panel=True))
verify_my_custom(index)
def verify_my_custom(index):
index.raise_for_status()
index_as_list = index.json()
sections = [x for x in index_as_list if x["model_class"] == "ToolSection"]
assert len(sections) == 0
assert len(index_as_list) == 5
assert METHOD_NAME(index_as_list) == ["ToolSectionLabel", "Tool", "ToolSectionLabel", "Tool", "ToolSectionLabel"]
def verify_custom_embed(index):
# custom_5 / custom_8
index.raise_for_status()
index_as_list = index.json()
assert len(index_as_list) == 1
assert METHOD_NAME(index_as_list) == ["ToolSection"]
section = index_as_list[0]
assert section["name"] == "My New Section"
assert section["id"] == "my-new-section"
section_elems = section["elems"]
assert len(section_elems) == 5, METHOD_NAME(section_elems)
assert METHOD_NAME(section_elems) == ["Tool", "Tool", "Tool", "Tool", "Tool"]
assert element_ids(section_elems) == [
"multi_data_optional",
"paths_as_file",
"param_text_option",
"column_param",
"Filter1",
]
def verify_custom_regex_filtered(index):
# custom_3 / custom_9
index.raise_for_status()
index_as_list = index.json()
sections = [x for x in index_as_list if x["model_class"] == "ToolSection"]
assert len(sections) == 1
section = sections[0]
tools = section["elems"]
assert len(tools) == 2, len(tools)
def element_ids(elements):
return [x["id"] for x in elements]
def METHOD_NAME(elements):
return [x["model_class"] for x in elements]
| null |
1,863 |
from datetime import datetime
import json
import logging
from typing import Any, Dict, List, Optional, Set
import unittest
from flask import Flask, Response
from flask.testing import FlaskClient
from flask_appbuilder import AppBuilder, SQLA
from flask_appbuilder.const import (
API_SECURITY_PASSWORD_KEY,
API_SECURITY_PROVIDER_KEY,
API_SECURITY_REFRESH_KEY,
API_SECURITY_USERNAME_KEY,
API_SECURITY_VERSION,
)
from hiro import Timeline
import jinja2
from tests.const import (
PASSWORD_ADMIN,
PASSWORD_READONLY,
USERNAME_ADMIN,
USERNAME_READONLY,
)
class FABTestCase(unittest.TestCase):
@staticmethod
def auth_client_get(client, token, uri):
return client.get(uri, headers={"Authorization": f"Bearer {token}"})
@staticmethod
def auth_client_delete(client, token, uri):
return client.delete(uri, headers={"Authorization": f"Bearer {token}"})
@staticmethod
def auth_client_put(client, token, uri, json):
return client.put(uri, json=json, headers={"Authorization": f"Bearer {token}"})
@staticmethod
def auth_client_post(client, token, uri, json):
return client.post(uri, json=json, headers={"Authorization": f"Bearer {token}"})
@staticmethod
def _login(client, username, password, refresh: bool = False):
"""
Login help method
:param client: Flask test client
:param username: username
:param password: password
:return: Flask client response class
"""
return client.post(
f"api/{API_SECURITY_VERSION}/security/login",
json={
API_SECURITY_USERNAME_KEY: username,
API_SECURITY_PASSWORD_KEY: password,
API_SECURITY_PROVIDER_KEY: "db",
API_SECURITY_REFRESH_KEY: refresh,
},
)
def login(self, client, username, password):
rv = self._login(client, username, password)
try:
return json.loads(rv.data.decode("utf-8")).get("access_token")
except Exception:
return rv
def browser_login(
self,
client: FlaskClient,
username: str,
password: str,
next_url: Optional[str] = None,
follow_redirects: bool = True,
) -> Response:
login_url = "/login/"
if next_url:
login_url = f"{login_url}?next={next_url}"
return client.post(
login_url,
data=dict(username=username, password=password),
follow_redirects=follow_redirects,
)
def assert_response(
self,
response: List[Dict[str, Any]],
expected_results: List[Dict[str, Any]],
exclude_cols: Optional[List[str]] = None,
):
exclude_cols = exclude_cols or []
for idx, expected_result in enumerate(expected_results):
for field_name, field_value in expected_result.items():
if field_name not in exclude_cols:
self.assertEqual(
response[idx][field_name], expected_result[field_name]
)
@staticmethod
def browser_logout(client):
return client.get("/logout/")
def create_default_users(self, appbuilder) -> None:
with Timeline(start=datetime(2020, 1, 1), scale=0).freeze():
self.create_admin_user(self.appbuilder, USERNAME_ADMIN, PASSWORD_ADMIN)
with Timeline(start=datetime(2020, 1, 1), scale=0).freeze():
self.create_user(
self.appbuilder,
USERNAME_READONLY,
PASSWORD_READONLY,
"ReadOnly",
first_name="readonly",
last_name="readonly",
email="[email protected]",
)
def create_admin_user(self, appbuilder, username, password):
self.create_user(appbuilder, username, password, "Admin")
@staticmethod
def create_user(
appbuilder,
username,
password,
role_name,
first_name="admin",
last_name="user",
email="[email protected]",
role_names=None,
):
user = appbuilder.sm.find_user(username=username)
if user:
appbuilder.session.delete(user)
appbuilder.session.commit()
roles = (
[appbuilder.sm.find_role(role_name) for role_name in role_names]
if role_names
else [appbuilder.sm.find_role(role_name)]
)
return appbuilder.sm.add_user(
username, first_name, last_name, email, roles, password
)
class BaseMVCTestCase(FABTestCase):
def METHOD_NAME(self):
self.app = Flask(__name__)
self.app.jinja_env.undefined = jinja2.StrictUndefined
self.app.config.from_object("tests.config_api")
logging.basicConfig(level=logging.ERROR)
self.db = SQLA(self.app)
self.appbuilder = AppBuilder(self.app, self.db.session)
self.create_default_users(self.appbuilder)
@property
def registered_endpoints(self) -> Set:
return {item.endpoint for item in self.app.url_map.iter_rules()}
def get_registered_view_endpoints(self, view_name) -> Set:
return {
item.endpoint
for item in self.app.url_map.iter_rules()
if item.endpoint.split(".")[0] == view_name
}
| null |
1,864 |
"""Annif backend using a SVM classifier"""
from __future__ import annotations
import os.path
from typing import TYPE_CHECKING, Any
import joblib
import numpy as np
import scipy.special
from sklearn.svm import LinearSVC
import annif.util
from annif.exception import NotInitializedException, NotSupportedException
from annif.suggestion import SubjectSuggestion, SuggestionBatch
from . import backend, mixins
if TYPE_CHECKING:
from scipy.sparse._csr import csr_matrix
from annif.corpus.document import DocumentCorpus
class SVCBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend):
"""Support vector classifier backend for Annif"""
name = "svc"
# defaults for uninitialized instances
_model = None
MODEL_FILE = "svc-model.gz"
DEFAULT_PARAMETERS = {"min_df": 1, "ngram": 1}
def _initialize_model(self) -> None:
if self._model is None:
path = os.path.join(self.datadir, self.MODEL_FILE)
self.debug("loading model from {}".format(path))
if os.path.exists(path):
self._model = joblib.load(path)
else:
raise NotInitializedException(
"model {} not found".format(path), backend_id=self.backend_id
)
def initialize(self, parallel: bool = False) -> None:
self.initialize_vectorizer()
self._initialize_model()
def _corpus_to_texts_and_classes(
self, corpus: DocumentCorpus
) -> tuple[list[str], list[int]]:
texts = []
classes = []
for doc in corpus.documents:
if len(doc.subject_set) > 1:
self.warning(
"training on a document with multiple subjects is not "
+ "supported by SVC; selecting one random subject."
)
elif not doc.subject_set:
continue # skip documents with no subjects
texts.append(doc.text)
classes.append(doc.subject_set[0])
return texts, classes
def METHOD_NAME(self, veccorpus: csr_matrix, classes: list[int]) -> None:
self.info("creating classifier")
self._model = LinearSVC(dual="auto")
self._model.fit(veccorpus, classes)
annif.util.atomic_save(
self._model, self.datadir, self.MODEL_FILE, method=joblib.dump
)
def _train(
self, corpus: DocumentCorpus, params: dict[str, Any], jobs: int = 0
) -> None:
if corpus == "cached":
raise NotSupportedException(
"SVC backend does not support reuse of cached training data."
)
if corpus.is_empty():
raise NotSupportedException("Cannot train SVC project with no documents")
texts, classes = self._corpus_to_texts_and_classes(corpus)
vecparams = {
"min_df": int(params["min_df"]),
"tokenizer": self.project.analyzer.tokenize_words,
"ngram_range": (1, int(params["ngram"])),
}
veccorpus = self.create_vectorizer(texts, vecparams)
self.METHOD_NAME(veccorpus, classes)
def _scores_to_suggestions(
self, scores: np.ndarray, params: dict[str, Any]
) -> list[SubjectSuggestion]:
results = []
limit = int(params["limit"])
for class_id in np.argsort(scores)[::-1][:limit]:
subject_id = self._model.classes_[class_id]
if subject_id is not None:
results.append(
SubjectSuggestion(subject_id=subject_id, score=scores[class_id])
)
return results
def _suggest_batch(
self, texts: list[str], params: dict[str, Any]
) -> SuggestionBatch:
vector = self.vectorizer.transform(texts)
confidences = self._model.decision_function(vector)
# convert to 0..1 score range using logistic function
scores_list = scipy.special.expit(confidences)
return SuggestionBatch.from_sequence(
[
[] if row.nnz == 0 else self._scores_to_suggestions(scores, params)
for scores, row in zip(scores_list, vector)
],
self.project.subjects,
)
| null |
1,865 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class ModifyGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'ModifyGroup')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CaptureVideo(self):
return self.get_query_params().get('CaptureVideo')
def set_CaptureVideo(self,CaptureVideo):
self.add_query_param('CaptureVideo',CaptureVideo)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Enabled(self):
return self.get_query_params().get('Enabled')
def set_Enabled(self,Enabled):
self.add_query_param('Enabled',Enabled)
def get_CaptureOssPath(self):
return self.get_query_params().get('CaptureOssPath')
def set_CaptureOssPath(self,CaptureOssPath):
self.add_query_param('CaptureOssPath',CaptureOssPath)
def METHOD_NAME(self):
return self.get_query_params().get('PushDomain')
def set_PushDomain(self,PushDomain):
self.add_query_param('PushDomain',PushDomain)
def get_CaptureImage(self):
return self.get_query_params().get('CaptureImage')
def set_CaptureImage(self,CaptureImage):
self.add_query_param('CaptureImage',CaptureImage)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_PlayDomain(self):
return self.get_query_params().get('PlayDomain')
def set_PlayDomain(self,PlayDomain):
self.add_query_param('PlayDomain',PlayDomain)
def get_OutProtocol(self):
return self.get_query_params().get('OutProtocol')
def set_OutProtocol(self,OutProtocol):
self.add_query_param('OutProtocol',OutProtocol)
def get_CaptureInterval(self):
return self.get_query_params().get('CaptureInterval')
def set_CaptureInterval(self,CaptureInterval):
self.add_query_param('CaptureInterval',CaptureInterval)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InProtocol(self):
return self.get_query_params().get('InProtocol')
def set_InProtocol(self,InProtocol):
self.add_query_param('InProtocol',InProtocol)
def get_LazyPull(self):
return self.get_query_params().get('LazyPull')
def set_LazyPull(self,LazyPull):
self.add_query_param('LazyPull',LazyPull)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Callback(self):
return self.get_query_params().get('Callback')
def set_Callback(self,Callback):
self.add_query_param('Callback',Callback)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
def get_CaptureOssBucket(self):
return self.get_query_params().get('CaptureOssBucket')
def set_CaptureOssBucket(self,CaptureOssBucket):
self.add_query_param('CaptureOssBucket',CaptureOssBucket
| null |
1,866 |
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, Coroutine, Mapping, MutableMapping, NamedTuple, Optional, Sequence
from aiohttp import web
from ai.backend.agent.types import WebMiddleware
from ai.backend.common.logging import BraceStyleAdapter
from ai.backend.common.plugin import AbstractPlugin
log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined]
NewMetadataPluginResponse = NamedTuple(
"NewMetadataPluginResponse",
[("app", web.Application), ("global_middlewares", Sequence[WebMiddleware])],
)
InitMetadataPluginResponse = NamedTuple(
"InitMetadataPluginResponse",
[
("app", web.Application),
("global_middlewares", Sequence[WebMiddleware]),
("structure", Mapping[str, Any]),
],
)
MetadataPluginRoute = NamedTuple(
"MetadataPluginRoute",
[
("method", str),
("route", str),
("route_handler", Callable[[web.Request], Coroutine[Any, Any, web.Response]]),
("route_name", Optional[str]),
],
)
class MetadataPlugin(AbstractPlugin, metaclass=ABCMeta):
"""
Metadata plugins should create a valid aiohttp.web.Application instance. The returned app
instance will be a subapp of the root app defined by the manager, and additional user-properties
will be set as defined in ``ai.backend.gateway.server.PUBLIC_INTERFACES``.
The init/cleanup methods of the plugin are ignored and the manager uses the standard aiohttp's
application lifecycle handlers attached to the returned app instance.
"""
route_prefix: Optional[str]
@abstractmethod
async def prepare_app(self) -> NewMetadataPluginResponse:
pass
@abstractmethod
async def routes(self) -> Sequence[MetadataPluginRoute]:
pass
async def METHOD_NAME(self) -> InitMetadataPluginResponse:
app, global_middlewares = await self.prepare_app()
routes = await self.routes()
# Parse registered webapp's hierarchy to show it to user later
# e.g. structure with four routes
# (GET /hello, GET /hello/world, GET /hello/bar, POST /foo)
# will be:
# {
# '/hello': {
# '_': (/hello's handler),
# '/world': (/hello/world's handler),
# '/bar': (/hello/bar's handler),
# }
# '/foo': (/foo's handler)
# }
# Note that route defined /hello will automatically converted to /hello/_
# upon actual webapp creation.
structure: MutableMapping[str, Any] = {}
for route in routes:
method, path, handler, name = route
# This variable will work as a 'traversal pointer' when creating structure object.
# See for loop below for usage.
structure_pointer = structure
_path = path
if not _path.startswith("/"):
_path = "/" + _path
raw_splitted = _path.split("/")
splitted = []
chunks = []
for i in range(len(raw_splitted)):
s = raw_splitted[i]
chunks.append(s)
if not (s.startswith("{") and s.endswith("}")):
splitted.append("/".join(chunks))
chunks = []
if len(chunks) > 0:
splitted.append("/".join(chunks))
# e.g. if route is /a/b/c/d:
# components will be ['a', 'b', 'c']
# resource_name will be 'd'
components, resource_name = splitted[1:-1], splitted[-1]
# traverse into subroute
for component in components:
if structure_pointer.get(component) is None:
structure_pointer[component] = {}
elif not isinstance(structure_pointer.get(component), dict):
structure_pointer[component] = {"_": structure_pointer[component]}
structure_pointer = structure_pointer[component]
if isinstance(structure_pointer.get(resource_name), dict):
structure_pointer[resource_name]["_"] = resource_name
app.router.add_route(method, path + "/_", handler, name=name)
else:
structure_pointer[resource_name] = resource_name
app.router.add_route(method, path, handler, name=name)
return InitMetadataPluginResponse(app, global_middlewares, structure)
| null |
1,867 |
#!/usr/bin/env python3
"""
Installation script generated from a Bazel `install` target.
"""
import argparse
import collections
import filecmp
import itertools
import os
import re
import shutil
import stat
import sys
from subprocess import check_output, check_call, Popen, PIPE
import xml.etree.ElementTree as ET
prefix = None
pkg_name = None
dbg = False
gpu = False
dev = True
def shell_cmd(cmd, alert_on_failure=True):
"""Execute shell command and return (ret-code, stdout, stderr)."""
print("SHELL > {}".format(cmd))
proc = Popen(cmd, shell=True, close_fds=True, stdout=PIPE, stderr=PIPE)
ret = proc.wait()
stdout = proc.stdout.read().decode('utf-8') if proc.stdout else None
stderr = proc.stderr.read().decode('utf-8') if proc.stderr else None
if alert_on_failure and stderr and ret != 0:
sys.stderr.write('{}\n'.format(stderr))
return (ret, stdout, stderr)
def get_pkg_real_name(name, dev=False, dbg=False, gpu=False):
"""Get real package name by install parameters"""
new_name = name
if dev:
new_name += "-dev"
return new_name
def rename_package_name(dest):
"""Get packages name from file install destination."""
if not dev and not dbg and not gpu:
return dest
if dest.startswith("lib/"):
return dest
curr_pkg_name = dest.split("/")[0]
new_pkg_name = get_pkg_real_name(curr_pkg_name, dev, dbg, gpu)
# Local build package version is fiexed `local`
pkg_name_with_ver = new_pkg_name + "/local"
return dest.replace(curr_pkg_name, pkg_name_with_ver, 1)
def setup_cyberfile(cyberfile_path, replace=True, dev=False, dbg=False, gpu=False):
"""Setup final cyberfile by install parameters. """
cyberfile = ET.parse(cyberfile_path)
root = cyberfile.getroot()
name = root.find("name")
old_name = name.text
name.text = get_pkg_real_name(name.text, dev, dbg, gpu)
for dep in root.findall("depend"):
if dep.get("condition"):
if not eval(dep.get("condition")):
root.remove(dep)
else:
del dep.attrib["condition"]
if replace:
cyberfile.write(cyberfile_path)
return old_name, name.text, ET.tostring(root)
def setup_cyberfiles(pkg_name=None):
"""Setup final cyberfiles by install parameters. """
for d in os.listdir(prefix):
if pkg_name is not None and d != pkg_name:
# only handle target package
return
cyberfile_path = prefix + d + "/local/cyberfile.xml"
if os.path.exists(cyberfile_path):
old_name, new_name, _ = setup_cyberfile(cyberfile_path, True, dev, dbg, gpu)
if os.path.exists(prefix + d + "/" + old_name + ".BUILD"):
os.rename(prefix + d + "/" + old_name + ".BUILD", prefix + d + "/" + new_name + ".BUILD")
def METHOD_NAME(cyberfile_path):
if not os.path.exists(cyberfile_path):
return None
cyberfile = ET.parse(cyberfile_path)
root = cyberfile.getroot()
src_path = root.find("src_path")
if src_path is None:
return None
pkg_type = root.find("type")
if pkg_type is None:
return None
if pkg_type.text == "module" or pkg_type.text == "module-wrapper":
return src_path.text.replace("//", "", 1).replace("/", "\/")
return None
def replace_config(pkg_module_dict, prefix, config_file):
"""
e.g.
1. /apollo/bazel-bin/modules/planning/libplanning_component.so ==> /opt/apollo/neo/packages/planning-dev/latest/lib/libplanning_component.so
# 2. /apollo/modules/planning/conf/planning_config_navi.pb.txt ==> /opt/apollo/neo/packages/planning-dev/latest/conf/planning_config_navi.pb.txt
# 3. /apollo/modules/planning/dag/planning.dag ==> /opt/apollo/neo/packages/planning-dev/local/dag/planning.dag
"""
for pkg_name, module_name in pkg_module_dict.items():
shell_cmd("sed -i 's/\/apollo\/bazel-bin\/{}\//{}{}\/latest\/lib\//g' {}".format(module_name, prefix.replace("/", "\/"), pkg_name, config_file))
#shell_cmd("sed -i 's/\/apollo\/{}\//{}{}\/local\//g' {}".format(module_name, prefix.replace("/", "\/"), pkg_name, config_file))
def replace_config_dir(full_c_d, prefix, pkg_module_dict):
if not os.path.exists(full_c_d):
return
for c in os.listdir(full_c_d):
c_f = full_c_d + "/" + c
if os.path.isdir(c_f):
replace_config_dir(c_f, prefix, pkg_module_dict)
elif c_f.endswith(".dag"):
replace_config(pkg_module_dict, prefix, c_f)
else:
continue
def fix_configs_in_pkg():
conf_dirs = [
"/local",
# "/local/launch",
# "/local/conf"
]
pkg_module_dict = {}
for d in os.listdir(prefix):
pkg_name = d.replace("/", "\/")
module_name = METHOD_NAME(prefix + d + "/local/cyberfile.xml")
if module_name is None:
continue
pkg_module_dict[pkg_name] = module_name
for d in os.listdir(prefix):
for c_d in conf_dirs:
full_c_d = prefix + d + c_d
replace_config_dir(full_c_d, prefix, pkg_module_dict)
def install_src(src, dst, filter):
dst = rename_package_name(dst)
if not os.path.isdir(src):
sys.stderr.write("install_src only support dir, {} is not dir.".format(dst))
sys.exit(-1)
dst_full = os.path.join(prefix, dst)
if not os.path.exists(dst_full):
os.makedirs(dst_full)
shell_cmd("cd {} && find . -name '{}'|xargs -i -I@@ cp -rvnfP --parents @@ {}/ > /dev/null"
.format(src, filter, dst_full))
def main(args):
global prefix
global pkg_name
global dbg
global gpu
global dev
# Set up options.
parser = argparse.ArgumentParser()
parser.add_argument('prefix', type=str, help='Install prefix')
parser.add_argument('--pkg_name', type=str, default=None,
help='Install target package name.')
parser.add_argument('--dbg', action='store_true', default=False,
help='debug package with debugging symbols.')
parser.add_argument('--gpu', action='store_true', default=False,
help='build with gpu.')
parser.add_argument('--dev', action='store_true', default=True,
help='dev package with headers.')
args = parser.parse_args(args)
# Get install prefix.
prefix = args.prefix
pkg_name = args.pkg_name
dbg = args.dbg
gpu = args.gpu
dev = True
# Transform install prefix if DESTDIR is set.
# https://www.gnu.org/prep/standards/html_node/DESTDIR.html
destdir = os.environ.get('DESTDIR')
if destdir:
prefix = destdir + prefix
# Because Bazel executes us in a strange working directory and not the
# working directory of the user's shell, enforce that the install
# location is an absolute path so that the user is not surprised.
if not os.path.isabs(prefix):
parser.error("Install prefix must be an absolute path (got '{}')\n".format(prefix))
# Execute the install actions.
<<actions>>
# Setup cyberfile
setup_cyberfiles(pkg_name)
# Fix config path
fix_configs_in_pkg()
if __name__ == "__main__":
main(sys.argv[1:])
| null |
1,868 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkretailcloud.endpoint import endpoint_data
class CreateAppRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'CreateApp')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BizTitle(self): # String
return self.get_body_params().get('BizTitle')
def set_BizTitle(self, BizTitle): # String
self.add_body_params('BizTitle', BizTitle)
def METHOD_NAME(self): # String
return self.get_body_params().get('OperatingSystem')
def set_OperatingSystem(self, OperatingSystem): # String
self.add_body_params('OperatingSystem', OperatingSystem)
def get_Description(self): # String
return self.get_body_params().get('Description')
def set_Description(self, Description): # String
self.add_body_params('Description', Description)
def get_Language(self): # String
return self.get_body_params().get('Language')
def set_Language(self, Language): # String
self.add_body_params('Language', Language)
def get_Title(self): # String
return self.get_body_params().get('Title')
def set_Title(self, Title): # String
self.add_body_params('Title', Title)
def get_GroupName(self): # String
return self.get_body_params().get('GroupName')
def set_GroupName(self, GroupName): # String
self.add_body_params('GroupName', GroupName)
def get_MiddleWareIdLists(self): # RepeatList
return self.get_body_params().get('MiddleWareIdList')
def set_MiddleWareIdLists(self, MiddleWareIdList): # RepeatList
for depth1 in range(len(MiddleWareIdList)):
self.add_body_params('MiddleWareIdList.' + str(depth1 + 1), MiddleWareIdList[depth1])
def get_StateType(self): # Integer
return self.get_body_params().get('StateType')
def set_StateType(self, StateType): # Integer
self.add_body_params('StateType', StateType)
def get_ServiceType(self): # String
return self.get_body_params().get('ServiceType')
def set_ServiceType(self, ServiceType): # String
self.add_body_params('ServiceType', ServiceType)
def get_UserRoless(self): # RepeatList
return self.get_body_params().get('UserRoles')
def set_UserRoless(self, UserRoles): # RepeatList
for depth1 in range(len(UserRoles)):
if UserRoles[depth1].get('RoleName') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.RoleName', UserRoles[depth1].get('RoleName'))
if UserRoles[depth1].get('UserType') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.UserType', UserRoles[depth1].get('UserType'))
if UserRoles[depth1].get('UserId') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.UserId', UserRoles[depth1].get('UserId'))
def get_BizCode(self): # String
return self.get_body_params().get('BizCode')
def set_BizCode(self, BizCode): # String
self.add_body_params('BizCode', BizCode)
def get_Namespace(self): # String
return self.get_body_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_body_params('Namespace', Namespace)
| null |
1,869 |
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Helper functions for running power sweep experiments.
'''
import sys
import argparse
import geopmpy.agent
from experiment import launch_util
from experiment import common_args
from experiment import machine
from experiment.frequency_sweep import frequency_sweep
def setup_run_args(parser):
common_args.setup_run_args(parser)
common_args.add_run_max_turbo(parser)
common_args.add_min_frequency(parser)
common_args.add_max_frequency(parser)
common_args.add_step_frequency(parser)
common_args.add_min_uncore_frequency(parser)
common_args.add_max_uncore_frequency(parser)
common_args.add_step_uncore_frequency(parser)
def setup_uncore_frequency_bounds(mach, min_uncore_freq, max_uncore_freq,
step_uncore_freq):
sys_min = 1.2e9
sys_max = 2.7e9
sys_step = mach.frequency_step()
if min_uncore_freq is None:
min_uncore_freq = sys_min
if max_uncore_freq is None:
max_uncore_freq = sys_max
if step_uncore_freq is None:
step_uncore_freq = sys_step
if step_uncore_freq < sys_step or step_uncore_freq % sys_step != 0:
sys.stderr.write('<geopm> Warning: uncore frequency step size may be incompatible with p-states.\n')
if (max_uncore_freq - min_uncore_freq) % step_uncore_freq != 0:
sys.stderr.write('<geopm> Warning: uncore frequency range not evenly divisible by step size.\n')
if min_uncore_freq < sys_min or max_uncore_freq > sys_max:
raise RuntimeError('Uncore frequency bounds are out of range for this system')
if min_uncore_freq > max_uncore_freq:
raise RuntimeError('Uncore frequency min is greater than max')
num_step = 1 + int((max_uncore_freq - min_uncore_freq) // step_uncore_freq)
uncore_freqs = [step_uncore_freq * ss + min_uncore_freq for ss in range(num_step)]
uncore_freqs = sorted(uncore_freqs, reverse=True)
return uncore_freqs
def report_signals():
return ["CPU_CYCLES_THREAD@package", "CPU_CYCLES_REFERENCE@package",
"TIME@package", "CPU_ENERGY@package"]
def METHOD_NAME():
return ['MSR::UNCORE_PERF_STATUS:FREQ@package', 'MSR::UNCORE_RATIO_LIMIT:MAX_RATIO@package',
'MSR::UNCORE_RATIO_LIMIT:MIN_RATIO@package']
def launch_configs(app_conf, core_freq_range, uncore_freq_range):
agent = 'frequency_map'
targets = []
for freq in core_freq_range:
for uncore_freq in uncore_freq_range:
name = '{:.1e}c_{:.1e}u'.format(freq, uncore_freq)
options = {'FREQ_CPU_DEFAULT': freq,
'FREQ_CPU_UNCORE': uncore_freq}
agent_conf = geopmpy.agent.AgentConf('{}_agent_{}c_{}u.config'.format(agent, freq, uncore_freq), agent, options)
targets.append(launch_util.LaunchConfig(app_conf=app_conf,
agent_conf=agent_conf,
name=name))
return targets
def launch(app_conf, args, experiment_cli_args):
'''
Run the application over a range of core and uncore frequencies.
'''
mach = machine.init_output_dir(args.output_dir)
core_freq_range = frequency_sweep.setup_frequency_bounds(mach,
args.min_frequency,
args.max_frequency,
args.step_frequency,
args.run_max_turbo)
uncore_freq_range = setup_uncore_frequency_bounds(mach,
args.min_uncore_frequency,
args.max_uncore_frequency,
args.step_uncore_frequency)
targets = launch_configs(app_conf, core_freq_range, uncore_freq_range)
extra_cli_args = launch_util.geopm_signal_args(report_signals=report_signals(),
METHOD_NAME=METHOD_NAME())
extra_cli_args += list(experiment_cli_args)
launch_util.launch_all_runs(targets=targets,
num_nodes=args.node_count,
iterations=args.trial_count,
extra_cli_args=extra_cli_args,
output_dir=args.output_dir,
cool_off_time=args.cool_off_time,
enable_traces=args.enable_traces,
enable_profile_traces=args.enable_profile_traces,
init_control_path=args.init_control)
def main(app_conf, **defaults):
parser = argparse.ArgumentParser()
setup_run_args(parser)
parser.set_defaults(**defaults)
args, extra_cli_args = parser.parse_known_args()
launch(app_conf=app_conf, args=args,
experiment_cli_args=extra_cli_args)
| null |
1,870 |
from typing import (
List,
Optional,
)
from pcs import settings
from pcs.common import (
reports,
services,
)
from pcs.common.types import StringSequence
from pcs.lib.errors import LibraryError
from pcs.lib.external import CommandRunner
class _CmdExecutor(services.interfaces.ExecutorInterface):
def __init__(self, cmd_runner: CommandRunner) -> None:
self._cmd_runner = cmd_runner
def run(self, args: StringSequence) -> services.types.ExecutorResult:
stdout, stderr, retval = self._cmd_runner.run(args)
return services.types.ExecutorResult(retval, stdout, stderr)
class _NoOpDriver(services.interfaces.ServiceManagerInterface):
def __init__(self, report_processor: reports.ReportProcessor) -> None:
self._report_processor = report_processor
def _warn(
self,
service: str,
instance: Optional[str],
action: reports.types.ServiceAction,
) -> None:
self._report_processor.report(
reports.ReportItem.warning(
reports.messages.ServiceActionSkipped(
action,
service,
"Unknown init system",
instance=instance or "",
)
)
)
def start(self, service: str, instance: Optional[str] = None) -> None:
self._warn(service, instance, reports.const.SERVICE_ACTION_START)
def stop(self, service: str, instance: Optional[str] = None) -> None:
self._warn(service, instance, reports.const.SERVICE_ACTION_STOP)
def enable(self, service: str, instance: Optional[str] = None) -> None:
self._warn(service, instance, reports.const.SERVICE_ACTION_ENABLE)
def disable(self, service: str, instance: Optional[str] = None) -> None:
self._warn(service, instance, reports.const.SERVICE_ACTION_DISABLE)
def kill(self, service: str, instance: Optional[str] = None) -> None:
self._warn(service, instance, reports.const.SERVICE_ACTION_KILL)
def is_enabled(self, service: str, instance: Optional[str] = None) -> bool:
return False
def is_running(self, service: str, instance: Optional[str] = None) -> bool:
return False
def is_installed(self, service: str) -> bool:
return True
def get_available_services(self) -> List[str]:
return []
def is_current_system_supported(self) -> bool:
return True
def get_service_manager(
cmd_runner: CommandRunner,
report_processor: reports.ReportProcessor,
) -> services.interfaces.ServiceManagerInterface:
executor = _CmdExecutor(cmd_runner)
drivers: List[services.interfaces.ServiceManagerInterface] = [
services.drivers.SystemdDriver(
executor, settings.systemctl_exec, settings.systemd_unit_path
),
services.drivers.SysVInitRhelDriver(
executor, settings.service_exec, settings.chkconfig_exec
),
]
for driver in drivers:
if driver.is_current_system_supported():
return driver
report_processor.report(
reports.ReportItem.warning(
reports.messages.ServiceUnableToDetectInitSystem()
)
)
return _NoOpDriver(report_processor)
def service_exception_to_report(
exception: services.errors.ManageServiceError,
) -> reports.ReportItem:
action = None
if isinstance(exception, services.errors.DisableServiceError):
action = reports.const.SERVICE_ACTION_DISABLE
elif isinstance(exception, services.errors.EnableServiceError):
action = reports.const.SERVICE_ACTION_ENABLE
elif isinstance(exception, services.errors.StartServiceError):
action = reports.const.SERVICE_ACTION_START
elif isinstance(exception, services.errors.StopServiceError):
action = reports.const.SERVICE_ACTION_STOP
if action is None:
raise AssertionError()
return reports.ReportItem.error(
reports.messages.ServiceActionFailed(
action,
exception.service,
exception.message,
instance=exception.instance or "",
)
)
def METHOD_NAME(
service_manager: services.interfaces.ServiceManagerInterface,
) -> bool:
return isinstance(service_manager, services.drivers.SystemdDriver)
def ensure_is_systemd(
service_manager: services.interfaces.ServiceManagerInterface,
) -> None:
"""
Raise a LibraryError if the current system is not a systemd system
"""
if not METHOD_NAME(service_manager):
raise LibraryError(
reports.ReportItem.error(
reports.messages.UnsupportedOperationOnNonSystemdSystems()
)
)
| null |
1,871 |
######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""An angle type."""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = ["Angle"]
from sire.legacy import Units as _SireUnits
from ._type import Type as _Type
class Angle(_Type):
"""An angle type."""
# A list of the supported Sire unit names.
_sire_units = ["radian", "degree"]
# Dictionary of allowed units.
_supported_units = {"RADIAN": _SireUnits.radian, "DEGREE": _SireUnits.degree}
# Map unit abbreviations to the full name.
_abbreviations = {"R": "RADIAN", "D": "DEGREE"}
# Print format.
_print_format = {"RADIAN": "radian", "DEGREE": "degree"}
# Documentation strings.
_doc_strings = {"RADIAN": "An angle in radians.", "DEGREE": "An angle in degrees."}
# Null type unit for avoiding issue printing configargparse help.
_default_unit = "RADIAN"
# The dimension mask:
# Angle, Charge, Length, Mass, Quantity, Temperature, Time
_dimensions = (1, 0, 0, 0, 0, 0, 0)
def __init__(self, *args):
"""
Constructor.
``*args`` can be a value and unit, or a string representation
of the angle, e.g. "3 radians".
Parameters
----------
value : float
The value.
unit : str
The unit.
string : str
A string representation of the angle.
Examples
--------
Create an object representing an angle of 3.14 radians then
print the length in degrees.
>>> import BioSimSpace as BSS
>>> length = BSS.Types.Angle(3.14, "R")
>>> print(length.degrees())
The same as above, except passing a string representation of the
angle to the constructor.
>>> import BioSimSpace as BSS
>>> length = BSS.Types.Angle("3.14 R")
>>> print(length.degrees())
The string matching is extremeley flexible, so all of the following
would be valid arguments: "3.14 R", "3.14 radians", "314e-2 Radians".
"""
# Call the base class constructor.
super().__init__(*args)
def __str__(self):
"""Return a human readable string representation of the object."""
abbrev = self._print_format[self._unit]
if self._value != 1:
if abbrev[-1] != "s":
abbrev = abbrev + "s"
if abs(self._value) > 1e4 or abs(self._value) < 1e-4:
return "%.4e %s" % (self._value, abbrev)
else:
return "%5.4f %s" % (self._value, abbrev)
def radians(self):
"""
Return the angle in radians.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in radians.
"""
return Angle(
(self._value * self._supported_units[self._unit]).to(_SireUnits.radian),
"RADIAN",
)
def degrees(self):
"""
Return the angle in degrees.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in degrees.
"""
return Angle(
(self._value * self._supported_units[self._unit]).to(_SireUnits.degree),
"DEGREE",
)
def _to_default_unit(self, mag=None):
"""
Internal method to return an object of the same type in the default unit.
Parameters
----------
mag : float
The value (optional).
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The length in the default unit of radians.
"""
if mag is None:
return self.radians()
else:
return Angle(mag, "RADIAN")
def _convert_to(self, unit):
"""
Return the angle in a different unit.
Parameters
----------
unit : str
The unit to convert to.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in the specified unit.
"""
if unit == "RADIAN":
return self.radians()
elif unit == "DEGREE":
return self.degrees()
else:
raise ValueError(
"Supported units are: '%s'" % list(self._supported_units.keys())
)
def METHOD_NAME(self, unit):
"""Validate that the unit are supported."""
# Strip whitespace and convert to upper case.
unit = unit.replace(" ", "").upper()
# Strip any "S" characters.
unit = unit.replace("S", "")
# Strip "EGREE".
unit = unit.replace("EGREE", "")
# Strip "EG".
unit = unit.replace("EG", "")
# Strip "ADIAN".
unit = unit.replace("ADIAN", "")
# Strip "AD".
unit = unit.replace("AD", "")
# Check that the unit is supported.
if unit in self._supported_units:
return unit
elif unit in self._abbreviations:
return self._abbreviations[unit]
else:
raise ValueError(
"Supported units are: '%s'" % list(self._supported_units.keys())
)
@staticmethod
def _to_sire_format(unit):
"""
Reformat the unit string so it adheres to the Sire unit formatting.
Parameters
----------
unit : str
A string representation of the unit.
Returns
-------
sire_unit : str
The unit string in Sire compatible format.
"""
# First, handle plurals and abbreviations.
unit = unit.replace("radians", "rad")
unit = unit.replace("radian", "rad")
unit = unit.replace("rads", "rad")
# Now convert back to correct format.
unit = unit.replace("rad", "radian")
# Convert powers. (Limited selection, for now.)
unit = unit.replace("radian2", "(radian*radian)")
unit = unit.replace("radian3", "(radian*radian*radian)")
unit = unit.replace("degree2", "(degree*degree)")
unit = unit.replace("degree3", "(degree*degree*degree)")
unit = unit.replace("radian-1", "(1/(radian))")
unit = unit.replace("radian-2", "(1/(radian*radian))")
unit = unit.replace("radian-3", "(1/(radian*radian*radian))")
unit = unit.replace("degree-1", "(1/(degree))")
unit = unit.replace("degree-2", "(1/(degree*degree))")
unit = unit.replace("degree-3", "(1/(degree*degree*degree))")
return unit
| null |
1,872 |
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fhir_types functionality."""
from absl.testing import absltest
from proto.google.fhir.proto.r4 import fhirproto_extensions_pb2
from proto.google.fhir.proto.r4 import uscore_pb2
from proto.google.fhir.proto.r4.core import datatypes_pb2
from proto.google.fhir.proto.r4.core.resources import patient_pb2
from google.fhir.core.utils import fhir_types
class FhirTypesTest(absltest.TestCase):
"""Tests functionality provided by the fhir_types module."""
def test_is_code_with_code_returns_true(self):
"""Tests that is_code returns True when given a Code."""
self.assertTrue(fhir_types.is_code(datatypes_pb2.Code()))
def test_is_code_with_profile_of_code_returns_false(self):
"""Tests that is_code returns False when given a profile of Code."""
self.assertFalse(fhir_types.is_code(datatypes_pb2.Address.UseCode()))
def test_is_profile_of_code_with_profile_of_code_returns_true(self):
"""Tests that is_profile_of_code returns True for a profile of Code."""
self.assertTrue(
fhir_types.is_profile_of_code(datatypes_pb2.Address.UseCode()))
def test_is_profile_of_code_with_code_returns_false(self):
"""Tests that is_profile_of_code returns False for a base Code."""
self.assertFalse(fhir_types.is_profile_of_code(datatypes_pb2.Code()))
def test_is_type_or_profile_of_code_with_profile_of_code_returns_true(self):
"""Tests that is_type_or_profile_of_code returns True for a profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_code(datatypes_pb2.Address.UseCode()))
def test_is_type_or_profile_of_code_with_code_returns_true(self):
"""Tests that is_type_or_profile_of_code returns True for a base Code."""
self.assertTrue(fhir_types.is_type_or_profile_of_code(datatypes_pb2.Code()))
def test_is_type_or_profile_of_code_with_non_code_returns_false(self):
"""Tests that is_type_or_profile_of_code returns False for a non-Code."""
self.assertFalse(
fhir_types.is_type_or_profile_of_code(patient_pb2.Patient()))
def test_is_coding_with_coding_returns_true(self):
"""Tests that is_coding returns True when given a Coding instance."""
self.assertTrue(fhir_types.is_coding(datatypes_pb2.Coding()))
def test_is_coding_with_profile_of_coding_returns_false(self):
"""Tests that is_coding returns False when given a profile."""
self.assertFalse(fhir_types.is_coding(datatypes_pb2.CodingWithFixedCode()))
def test_is_profile_of_coding_with_coding_returns_true(self):
"""Tests that is_profile_of_coding returns True for a profile."""
self.assertTrue(
fhir_types.is_profile_of_coding(datatypes_pb2.CodingWithFixedCode()))
def test_is_profile_of_coding_with_coding_returns_false(self):
"""Tests that is_profile_of_coding returns False for a base Coding type."""
self.assertFalse(fhir_types.is_profile_of_coding(datatypes_pb2.Coding()))
def test_is_type_or_profile_of_coding_with_coding_returns_true(self):
"""Tests that is_type_or_profile_of_coding returns True for profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_coding(
datatypes_pb2.CodingWithFixedCode()))
def test_is_type_or_profile_of_coding_with_non_coding_returns_false(self):
"""Tests that is_type_or_profile_of_coding returns False for non-Coding."""
self.assertFalse(
fhir_types.is_type_or_profile_of_coding(patient_pb2.Patient()))
def test_is_period_with_period_returns_true(self):
"""Tests that is_period returns True when given a Period instance."""
self.assertTrue(fhir_types.is_period(datatypes_pb2.Period()))
def test_is_period_with_coding_returns_false(self):
"""Tests that is_period returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_period(datatypes_pb2.Coding()))
def test_is_date_time_with_date_time_returns_true(self):
"""Tests that is_date_time returns True when given a DateTime instance."""
self.assertTrue(fhir_types.is_date_time(datatypes_pb2.DateTime()))
def test_is_date_time_with_coding_returns_false(self):
"""Tests that is_date_time returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_date_time(datatypes_pb2.Coding()))
def test_is_boolean_with_boolean_returns_true(self):
"""Tests that is_boolean returns True when given a Boolean instance."""
self.assertTrue(fhir_types.is_boolean(datatypes_pb2.Boolean()))
def test_is_boolean_with_coding_returns_false(self):
"""Tests that is_boolean returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_boolean(datatypes_pb2.Coding()))
def test_is_string_with_string_returns_true(self):
"""Tests that is_string returns True when given a String instance."""
self.assertTrue(fhir_types.is_string(datatypes_pb2.String()))
def test_is_string_with_coding_returns_false(self):
"""Tests that is_date_time returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_string(datatypes_pb2.Coding()))
def test_is_extension_with_extension_returns_true(self):
"""Tests that is_extension returns True when given an Extension."""
self.assertTrue(fhir_types.is_extension(datatypes_pb2.Extension()))
def test_is_extension_with_date_time_returns_false(self):
"""Tests that is_extension returns False when given a DateTime."""
self.assertFalse(fhir_types.is_extension(datatypes_pb2.DateTime()))
def test_is_profile_of_extension_with_base64_binary_separator_stride_returns_true(
self,
):
"""Tests that is_profile_of_extension returns True for valid profile."""
self.assertTrue(
fhir_types.is_profile_of_extension(
fhirproto_extensions_pb2.Base64BinarySeparatorStride()))
def test_is_type_or_profile_of_extension_with_extension_returns_true(self):
"""Tests that is_type_or_profile_of_extension returns True for Extension."""
self.assertTrue(
fhir_types.is_type_or_profile_of_extension(datatypes_pb2.Extension()))
def METHOD_NAME(
self,
):
"""Tests that is_type_or_profile_of_extension returns True for profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_extension(
fhirproto_extensions_pb2.Base64BinarySeparatorStride()))
def test_is_type_or_profile_of_extensions_with_date_time_returns_false(self):
"""Tests that is_type_or_profile_of_extension returns False for DateTime."""
self.assertFalse(
fhir_types.is_type_or_profile_of_extension(datatypes_pb2.DateTime()))
def test_is_type_or_profile_of_patient_with_patient_returns_true(self):
"""Tests that IsTypeOfProfileOfPatient returns True for a Patient type."""
self.assertTrue(
fhir_types.is_type_or_profile_of_patient(patient_pb2.Patient()))
def test_is_type_or_profile_of_patient_with_coding_returns_false(self):
"""Tests that IsTypeOfProfileOfPatient returns False for a Coding type."""
self.assertFalse(
fhir_types.is_type_or_profile_of_patient(datatypes_pb2.Coding()))
def test_is_type_or_profile_of_patient_with_patient_profile_returns_true(
self,
):
"""Tests that IsTypeOfProfileOfPatient returns True for Patient profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_patient(
uscore_pb2.USCorePatientProfile()))
if __name__ == '__main__':
absltest.main()
| null |
1,873 |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Actor.py: Contains the actor class.
import GemRB
import GUICommon
from GUIDefines import *
from ie_stats import *
#this import is primarily for the tables
import CommonTables
##############################################################################
## GLOBALS TO BE INITIALIZED ONCE
##############################################################################
dualswap = None
classcount = None
levelslots = [IE_LEVEL, IE_LEVEL2, IE_LEVEL3]
class Actor:
"""Holds information of a PC."""
def __init__ (self, pc):
"""Load up basic information."""
#setup our basic Actor
self.Reset (pc)
#setup globals if they are blank
if dualswap == None:
self.__setup_globals ()
def __setup_globals (self):
"""Initializes all globals used for quick referencing.
Will only be called by the first Actor created."""
global classcount, dualswap
classcount = CommonTables.Classes.GetRowCount ()
dualswap = [0]*classcount
for i in range(classcount):
rowname = CommonTables.Classes.GetRowName(i)
classid = CommonTables.Classes.GetValue (rowname, "ID")
classnames = rowname.split("_")
#set the MC_WAS_ID of the first class
if len(classnames) == 2:
dualswap[classid-1] = CommonTables.Classes.GetValue (rowname, "MC_WAS_ID")
def Classes (self):
"""Returns a list with all the class IDs."""
if self.__classes == None:
#already reversed in ClassNames
self.__classes = [CommonTables.Classes.GetValue (name, "ID", GTV_INT) for name in self.ClassNames()]
return self.__classes
def ClassNames (self):
"""Returns a list will all the class names."""
if self.__classnames == None:
self.__classnames = GUICommon.GetClassRowName (self.classid, "class").split("_")
if self.IsDualSwap():
self.__classnames.reverse()
return self.__classnames
def METHOD_NAME (self):
"""Returns the class title as a displayable string."""
if self.__classtitle != None:
return self.__classtitle
self.__classtitle = GemRB.GetPlayerStat (self.pc, IE_TITLE1)
self.ClassNames()
if self.__classtitle == 0:
if self.multiclass and self.isdual == 0:
self.__classtitle = CommonTables.Classes.GetValue ("_".join(self.__classnames), "CAP_REF", GTV_REF)
elif self.isdual:
# first (previous) kit or class of the dual class
self.Classes()
if self.KitIndex():
self.__classtitle = CommonTables.KitList.GetValue (self.__kitindex, 2, GTV_REF)
else:
self.__classtitle = CommonTables.Classes.GetValue (self.__classnames[1], "CAP_REF", GTV_REF)
self.__classtitle = self.__classtitle + " / " + \
CommonTables.Classes.GetValue (self.__classnames[0], "CAP_REF", GTV_REF)
else: # ordinary class or kit
if self.KitIndex():
self.__classtitle = CommonTables.KitList.GetValue (self.__kitindex, 2, GTV_REF)
else:
self.__classtitle = CommonTables.Classes.GetValue ("_".join(self.__classnames), "CAP_REF", GTV_REF)
if self.__classtitle == "*":
self.__classtitle = 0
return self.__classtitle
def IsDualSwap (self):
"""Returns true if IE_LEVEL is opposite of expectations."""
if self.__dualswap == None:
self.__dualswap = (self.isdual & CommonTables.Classes.GetValue \
(self.ClassNames()[0], "MC_WAS_ID", GTV_INT)) > 0
return self.__dualswap
def KitIndex (self):
"""Returns the kit index in relation to kitlist.2da."""
if self.__kitindex != None:
return self.__kitindex
Kit = GemRB.GetPlayerStat (self.pc, IE_KIT)
self.__kitindex = 0
if Kit & 0xc000 == 0x4000:
self.__kitindex = Kit & 0xfff
# carefully looking for kit by the usability flag
# since the barbarian kit id clashes with the no-kit value
if self.__kitindex == 0 and Kit != 0x4000:
self.__kitindex = CommonTables.KitList.FindValue (6, Kit)
if self.__kitindex is None:
self.__kitindex = 0
return self.__kitindex
def LevelDiffs (self):
"""Returns the differences between the current and next classes."""
return [(next-current) for current,next in zip(self.Levels(),
self.NextLevels())]
def Levels (self):
"""Returns the current level of each class."""
if self.__levels == None:
self.__levels = [level for slot in levelslots for level \
in [GemRB.GetPlayerStat (self.pc, slot)] if level>0]
if self.IsDualSwap():
self.__levels.reverse()
return self.__levels
def NextLevelExp (self):
"""Returns the experience required to level each class."""
#filtering the old dual class out seems unnecessary
#just be sure to use NumClasses() or isdual to check
return [CommonTables.NextLevel.GetValue (name, str(level+1)) for name,level \
in zip(self.ClassNames(), self.Levels())]
def NextLevels (self):
"""Returns the next level for each class."""
if self.__nextlevels != None:
return self.__nextlevels
xp = GemRB.GetPlayerStat (self.pc, IE_XP) // self.NumClasses()
self.__nextlevels = []
for name, level in zip(self.ClassNames(), self.Levels() ):
nextLevel = level
#we only want the current level for the old part of a dual-class
if len(self.__nextlevels) < self.__numclasses:
for current in range(level+1, CommonTables.NextLevel.GetColumnCount () ):
if CommonTables.NextLevel.GetValue (name, str(current)) <= xp:
nextLevel = current
else:
break
self.__nextlevels.append(nextLevel)
return self.__nextlevels
def NumClasses (self):
"""Returns the number of *active* classes."""
if self.__numclasses == None:
if self.isdual:
self.__numclasses = 1
else:
self.__numclasses = len(self.ClassNames() )
return self.__numclasses
def RaceName (self):
"""Returns the race string."""
pass
def Reset (self, pc):
"""Resets all internal variables.
This should be called after any fundemental changes to the pc.
This includes: dualclassing, leveling."""
#accessible variables
self.pc = pc
self.classid = GemRB.GetPlayerStat (self.pc, IE_CLASS)
self.isdual = GemRB.GetPlayerStat (self.pc, IE_MC_FLAGS) & MC_WAS_ANY_CLASS
self.multiclass = CommonTables.Classes.GetValue (GUICommon.GetClassRowName (pc), "MULTI")
#internal variables - these are only intialized on the first
#call to their respective function, and stored thereafter
self.__classes = None
self.__classnames = None
self.__classtitle = None
self.__dualswap = None
self.__kitindex = None
self.__levels = None
self.__nextlevels = None
self.__numclasses = None
| null |
1,874 |
""" signs activitypub activities """
import hashlib
from urllib.parse import urlparse
import datetime
from base64 import b64encode, b64decode
from Crypto import Random
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15 # pylint: disable=no-name-in-module
from Crypto.Hash import SHA256
MAX_SIGNATURE_AGE = 300
def create_key_pair():
"""a new public/private key pair, used for creating new users"""
random_generator = Random.new().read
key = RSA.generate(2048, random_generator)
private_key = key.export_key().decode("utf8")
public_key = key.public_key().export_key().decode("utf8")
return private_key, public_key
def make_signature(method, sender, destination, date, **kwargs):
"""uses a private key to sign an outgoing message"""
inbox_parts = urlparse(destination)
signature_headers = [
f"(request-target): {method} {inbox_parts.path}",
f"host: {inbox_parts.netloc}",
f"date: {date}",
]
headers = "(request-target) host date"
digest = kwargs.get("digest")
if digest is not None:
signature_headers.append(f"digest: {digest}")
headers = "(request-target) host date digest"
message_to_sign = "\n".join(signature_headers)
signer = pkcs1_15.new(RSA.import_key(sender.key_pair.private_key))
signed_message = signer.sign(SHA256.new(message_to_sign.encode("utf8")))
# For legacy reasons we need to use an incorrect keyId for older Bookwyrm versions
key_id = (
f"{sender.remote_id}#main-key"
if kwargs.get("use_legacy_key")
else f"{sender.remote_id}/#main-key"
)
signature = {
"keyId": key_id,
"algorithm": "rsa-sha256",
"headers": headers,
"signature": b64encode(signed_message).decode("utf8"),
}
return ",".join(f'{k}="{v}"' for (k, v) in signature.items())
def make_digest(data):
"""creates a message digest for signing"""
return "SHA-256=" + b64encode(hashlib.sha256(data.encode("utf-8")).digest()).decode(
"utf-8"
)
def METHOD_NAME(request):
"""checks if a digest is syntactically valid and matches the message"""
algorithm, digest = request.headers["digest"].split("=", 1)
if algorithm == "SHA-256":
hash_function = hashlib.sha256
elif algorithm == "SHA-512":
hash_function = hashlib.sha512
else:
raise ValueError(f"Unsupported hash function: {algorithm}")
expected = hash_function(request.body).digest()
if b64decode(digest) != expected:
raise ValueError("Invalid HTTP Digest header")
class Signature:
"""read and validate incoming signatures"""
def __init__(self, key_id, headers, signature):
self.key_id = key_id
self.headers = headers
self.signature = signature
# pylint: disable=invalid-name
@classmethod
def parse(cls, request):
"""extract and parse a signature from an http request"""
signature_dict = {}
for pair in request.headers["Signature"].split(","):
k, v = pair.split("=", 1)
v = v.replace('"', "")
signature_dict[k] = v
try:
key_id = signature_dict["keyId"]
headers = signature_dict["headers"]
signature = b64decode(signature_dict["signature"])
except KeyError:
raise ValueError("Invalid auth header")
return cls(key_id, headers, signature)
def verify(self, public_key, request):
"""verify rsa signature"""
if http_date_age(request.headers["date"]) > MAX_SIGNATURE_AGE:
raise ValueError(f"Request too old: {request.headers['date']}")
public_key = RSA.import_key(public_key)
comparison_string = []
for signed_header_name in self.headers.split(" "):
if signed_header_name == "(request-target)":
comparison_string.append(f"(request-target): post {request.path}")
else:
if signed_header_name == "digest":
METHOD_NAME(request)
comparison_string.append(
f"{signed_header_name}: {request.headers[signed_header_name]}"
)
comparison_string = "\n".join(comparison_string)
signer = pkcs1_15.new(public_key)
digest = SHA256.new()
digest.update(comparison_string.encode())
# raises a ValueError if it fails
signer.verify(digest, self.signature)
def http_date_age(datestr):
"""age of a signature in seconds"""
parsed = datetime.datetime.strptime(datestr, "%a, %d %b %Y %H:%M:%S GMT")
delta = datetime.datetime.utcnow() - parsed
return delta.total_seconds()
| null |
1,875 |
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import yaml
def encode_yaml(val, default_flow_style=False, strict=False):
"""Returns val encoded as YAML.
Uses PyYAML `safe_dump` to serialize `val`. `default_flow_style`
is passed through to `safe_dump`.
`strict` patches PyYAML to comply with the YAML standard code for
single characters 'y' and 'n', which need to be quote to retain
their string type as strict YAML. This is for compatibility
outside PyYAML.
"""
with StrictPatch(strict):
encoded = yaml.safe_dump(
val,
default_flow_style=default_flow_style,
indent=2,
)
return _strip_encoded_yaml(encoded)
def _strip_encoded_yaml(encoded):
stripped = encoded.strip()
if stripped.endswith("\n..."):
stripped = stripped[:-4]
return stripped
def decode_yaml(s):
try:
return yaml.safe_load(s)
except yaml.scanner.ScannerError as e:
raise ValueError(e) from e
def METHOD_NAME(filename):
fm_s = _yaml_front_matter_s(filename)
if not fm_s:
return {}
return yaml.safe_load(fm_s)
def _yaml_front_matter_s(filename):
lines = []
reading = False
with open(filename) as f:
for line in f:
trimmed = line.rstrip()
if not trimmed.lstrip():
continue
if trimmed == "---":
if reading:
break
reading = True
elif reading:
lines.append(trimmed)
else:
break
return "\n".join(lines) if lines else None
class StrictPatch:
"""Patches `yaml` to strictly adhere to the YAML spec*.
Maybe used as a no-op with `StrictPatch(False)`.
* This patch makes no guarantee of strict correctness but rather
fixes known issues with PyYAML:
- Encoding/decoding of single char boolean chars `[yYnN]`
"""
implicit_resolver_patches = [
(
"tag:yaml.org,2002:bool",
re.compile(r"^(?:y|Y|n|N)$", re.X),
list('yYnN'),
)
]
bool_value_patches = {
"y": True,
"n": False,
}
def __init__(self, strict=True):
self.strict = strict
def __enter__(self):
if not self.strict:
return
self._apply_implicit_resolver_patches()
self._apply_bool_value_patches()
def _apply_implicit_resolver_patches(self):
for tag, pattern, first in self.implicit_resolver_patches:
yaml.resolver.Resolver.add_implicit_resolver(tag, pattern, first)
def _apply_bool_value_patches(self):
for key, val in self.bool_value_patches.items():
assert key not in yaml.constructor.SafeConstructor.bool_values, key
yaml.constructor.SafeConstructor.bool_values[key] = val
def __exit__(self, *_exc):
if not self.strict:
return
self._unapply_implicit_resolver_patches()
self._unapply_bool_value_patches()
def _unapply_implicit_resolver_patches(self):
for tag, pattern, first in self.implicit_resolver_patches:
for ch in first:
resolvers = yaml.resolver.Resolver.yaml_implicit_resolvers.get(ch)
assert resolvers
assert resolvers[-1] == (tag, pattern), (resolvers, tag, pattern)
resolvers.pop()
def _unapply_bool_value_patches(self):
for key in self.bool_value_patches:
del yaml.constructor.SafeConstructor.bool_values[key]
def patch_yaml_resolver():
"""Patch yaml parsing to support Guild specific resolution rules.
- Make '+' or '-' optional in scientific notation
- Make use of decimal '.' optional in scientific notation
This patch replaces the default 'tag:yaml.org,2002:float' resolver
with an augmented set of regex patterns. Refer to
`yaml/resolver.py` for the original patterns.
"""
yaml.resolver.Resolver.add_implicit_resolver(
"tag:yaml.org,2002:float",
# The patterns below are modified from the original set in two
# ways: the first pattern makes `[-+]` optional and the second
# is a new pattern to match scientific notation that
# does not include a decimal (e.g. `1e2`).
re.compile(
r"""^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
if os.getenv("NO_PATCH_YAML") != "1":
patch_yaml_resolver()
| null |
1,876 |
# Copyright 2017 Akretion (http://www.akretion.com)
# Benoît GUILLOT <[email protected]>
# Copyright 2020 Camptocamp (http://www.camptocamp.com).
# @author Simone Orsi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import os
from odoo.addons.connector_algolia.components.adapter import AlgoliaAdapter
from odoo.addons.connector_search_engine.tests.test_all import TestBindingIndexBase
from odoo.addons.shopinvader.tests.common import _install_lang_odoo
try:
from vcr_unittest import VCRMixin
except ImportError:
VCRMixin = None
class TestAlgoliaBackend(VCRMixin, TestBindingIndexBase):
@classmethod
def setUpClass(cls):
super(TestAlgoliaBackend, cls).setUpClass()
AlgoliaAdapter._build_component(cls._components_registry)
cls.backend_specific = cls.env.ref("connector_algolia.se_algolia_demo")
cls.backend = cls.backend_specific.se_backend_id
cls.backend_specific.algolia_app_id = os.environ.get(
"ALGOLIA_APP_ID", "FAKE_APP"
)
cls.backend_specific.algolia_api_key = os.environ.get(
"ALGOLIA_API_KEY", "FAKE_KEY"
)
cls.shopinvader_backend = cls.env.ref("shopinvader.backend_1")
cls.shopinvader_backend.bind_all_product()
cls.shopinvader_backend.bind_all_category()
cls.index_product = cls.env.ref("shopinvader_algolia.index_1")
cls.index_categ = cls.env.ref("shopinvader_algolia.index_2")
def _get_vcr_kwargs(self, **kwargs):
return {
"record_mode": "once",
"match_on": ["method", "path", "query"],
"filter_headers": ["Authorization"],
"decode_compressed_response": True,
}
def setUp(self):
super(TestAlgoliaBackend, self).setUp()
if self.vcr_enabled:
# TODO we should discuss about this
# @laurent @simone @guewen
# testing what we have in self.cassette.request
# is maybe not a good idea as the contain tested is the
# recorded contain and not the request done
# this hack give store the real request in requests
# maybe we should propose such helper in vcr-unitest?
self.requests = []
original = self.cassette.play_response
def play_response(request):
self.requests.append(request)
return original(request)
self.cassette.play_response = play_response
def test_10_export_one_product(self):
product = self.env.ref("product.product_product_3_product_template")
si_variant = product.shopinvader_bind_ids[0].shopinvader_variant_ids[0]
si_variant.recompute_json()
si_variant.synchronize()
self.assertEqual(len(self.requests), 1)
request = self.requests[0]
self.assertEqual(request.method, "POST")
self.assertEqual(
self.parse_path(request.uri),
"/1/indexes/demo_algolia_backend_shopinvader_variant_en_US/batch",
)
request_data = json.loads(request.body.decode("utf-8"))["requests"]
self.assertEqual(len(request_data), 1)
self.assertEqual(request_data[0]["action"], "updateObject")
self.assertEqual(request_data[0]["body"], si_variant.data)
def test_20_recompute_all_products(self):
bindings = self.env["shopinvader.variant"].search([])
bindings.write({"data": {}})
self.index_product.recompute_all_binding()
for binding in bindings:
self.assertEqual(binding.data["objectID"], binding.record_id.id)
def _test_export_all_binding(self, index):
index.recompute_all_binding()
index.batch_export()
binding_nbr = self.env[index.model_id.model].search_count([])
self.assertEqual(len(self.requests), 1)
request = self.requests[0]
self.assertEqual(request.method, "POST")
self.assertEqual(
self.parse_path(request.uri), "/1/indexes/%s/batch" % index.name
)
request_data = json.loads(request.body.decode("utf-8"))["requests"]
self.assertEqual(
len(request_data), binding_nbr, "All bindings should be exported"
)
self.assertEqual(request_data[0]["action"], "updateObject")
def METHOD_NAME(self):
self._test_export_all_binding(self.index_product)
def test_30_export_all_categories(self):
self._test_export_all_binding(self.index_categ)
def test_facet_settings(self):
_install_lang_odoo(self.env, "base.lang_fr")
filter1 = self.env.ref("shopinvader.product_filter_1")
filter2 = self.env.ref("shopinvader.product_filter_2")
attr1 = filter1.variant_attribute_id
attr2 = filter2.variant_attribute_id
attr1.with_context(lang="fr_FR").name = attr1.name + " FR"
attr2.with_context(lang="fr_FR").name = attr2.name + " FR"
self.shopinvader_backend.filter_ids = filter1 + filter2
settings_en = self.env["shopinvader.variant"]._get_facetting_values(
self.backend, self.env.ref("base.lang_en")
)
settings_fr = self.env["shopinvader.variant"]._get_facetting_values(
self.backend, self.env.ref("base.lang_fr")
)
self.assertEqual(
settings_en,
[
"categories.id",
"Categories.lvl0hierarchical",
"Categories.lvl1hierarchical",
"Categories.lvl2hierarchical",
"main",
"redirect_url_key",
"url_key",
"sku",
"price.default.value",
"variant_attributes.legs",
"variant_attributes.color",
],
)
self.assertEqual(
settings_fr,
[
"categories.id",
"Categories.lvl0hierarchical",
"Categories.lvl1hierarchical",
"Categories.lvl2hierarchical",
"main",
"redirect_url_key",
"url_key",
"sku",
"price.default.value",
"variant_attributes.legs_fr",
"variant_attributes.color_fr",
],
)
| null |
1,877 |
from creme.creme_core.utils import chunktools
from ..base import CremeTestCase
class ChunkToolsTestCase(CremeTestCase):
DATA_UNIX = """04 05 99 66 54
055 6 5322 1 2
98
456456 455 12
45 156
dfdsfds
s556"""
DATA_WINDOWS = DATA_UNIX.replace('\n', '\r\n')
DATA_MAC = DATA_UNIX.replace('\n', '\r')
DATA_RANDOM_LINESEP = (
'04 05 99 66 54\r\n055 6 5322 1 2\r\r\n98\n\n '
'456456 455 12\r 45 156\rdfdsfds\r\ns556'
)
def assertFilteredEntries(self, entries):
self.assertListEqual(
['0405996654', '0556532212', '98', '45645645512', '45156', '556'],
entries
)
def assertSplitEntries(self, entries):
self.assertListEqual(
[
'04 05 99 66 54',
'055 6 5322 1 2',
'98',
' 456456 455 12',
' 45 156',
'dfdsfds',
's556'
],
entries
)
def METHOD_NAME(self, chunk_size, source=None):
source = source if source is not None else self.DATA_UNIX
for chunk in chunktools.iter_as_chunk(source, chunk_size):
yield ''.join(chunk)
@staticmethod
def filter(entry):
return ''.join(char for char in entry if char.isdigit())
def test_iter_as_slices01(self):
METHOD_NAME = [*chunktools.iter_as_slices(self.DATA_UNIX, 1000)]
self.assertEqual(1, len(METHOD_NAME))
self.assertEqual(self.DATA_UNIX, ''.join(METHOD_NAME))
def test_iter_as_slices02(self):
assert len(self.DATA_UNIX) % 5 == 0
METHOD_NAME = [*chunktools.iter_as_slices(self.DATA_UNIX, 5)]
self.assertEqual(16, len(METHOD_NAME))
for i, chunk in enumerate(METHOD_NAME):
self.assertEqual(5, len(chunk), f'Bad size for chunk {i}: {chunk}')
self.assertEqual(self.DATA_UNIX, ''.join(METHOD_NAME))
def test_iter_as_slices03(self):
data = self.DATA_UNIX + '9'
assert len(data) % 5 == 1
METHOD_NAME = [*chunktools.iter_as_slices(data, 5)]
self.assertEqual(17, len(METHOD_NAME))
for i, chunk in enumerate(METHOD_NAME[:-1]):
self.assertEqual(5, len(chunk), f'Bad size for chunk {i}: {chunk}')
self.assertEqual('9', METHOD_NAME[-1])
self.assertEqual(data, ''.join(METHOD_NAME))
def test_iter_as_chunks01(self):
chunk = self.get_alone_element(chunktools.iter_as_chunk(self.DATA_UNIX, 1000))
self.assertEqual(self.DATA_UNIX, ''.join(chunk))
def test_iter_as_chunks02(self):
assert len(self.DATA_UNIX) % 5 == 0
METHOD_NAME = [*chunktools.iter_as_chunk(self.DATA_UNIX, 5)]
self.assertEqual(16, len(METHOD_NAME))
for i, chunk in enumerate(METHOD_NAME):
self.assertEqual(5, len(chunk), f'Bad size for chunk {i}: {chunk}')
self.assertIsInstance(chunk, list)
self.assertEqual(self.DATA_UNIX, ''.join(''.join(chunk) for chunk in METHOD_NAME))
def test_iter_as_chunks03(self):
data = self.DATA_UNIX + '9'
assert len(data) % 5 == 1
METHOD_NAME = [*chunktools.iter_as_chunk(data, 5)]
self.assertEqual(17, len(METHOD_NAME))
for i, chunk in enumerate(METHOD_NAME[:-1]):
self.assertEqual(5, len(chunk), f'Bad size for chunk {i}: {chunk}')
self.assertEqual(['9'], METHOD_NAME[-1])
self.assertEqual(data, ''.join(''.join(chunk) for chunk in METHOD_NAME))
def test_iter_splitchunks_size_under_linesize(self):
"Tests small_chunks"
chunk_size = 5
entries = [
*chunktools.iter_splitchunks(
self.METHOD_NAME(chunk_size), '\n', ChunkToolsTestCase.filter
),
]
self.assertFilteredEntries(entries)
def test_iter_splitchunks_linesize_over_limit(self):
"Tests small_chunks."
chunk_size = 5
METHOD_NAME = self.METHOD_NAME(chunk_size, '0405996654\n0405996653\n0405996652')
entries = [*chunktools.iter_splitchunks(METHOD_NAME, '\n', ChunkToolsTestCase.filter, limit=10)]
self.assertListEqual(['0405996654', '0405996653', '0405996652'], entries)
METHOD_NAME = self.METHOD_NAME(chunk_size, '7777788888\n9999900000555\n1111122222')
with self.assertRaises(ValueError) as error:
[*chunktools.iter_splitchunks(METHOD_NAME, '\n', ChunkToolsTestCase.filter, limit=10)]
self.assertEqual(str(error.exception), 'line length is over %d characters' % 10)
def test_iter_splitchunks_size_1(self):
"Tests small_chunks."
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(chunk_size=1), '\n', ChunkToolsTestCase.filter,
)
])
def test_iter_splitchunks_size_over_linesize(self):
"Test big_chunks."
chunk_size = len(self.DATA_UNIX) / 2
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(chunk_size), '\n', ChunkToolsTestCase.filter,
)
])
def test_iter_splitchunks_one_chunk(self):
"Test with one chunk."
chunk_size = len(self.DATA_UNIX) * 2
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(chunk_size), '\n', ChunkToolsTestCase.filter,
),
])
def test_iter_splitchunks_no_filter(self):
self.assertSplitEntries([
*chunktools.iter_splitchunks(self.METHOD_NAME(5), '\n', None),
])
def test_iter_splitchunks_nbytes_key(self):
data = self.DATA_WINDOWS
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(5, data), '\r\n', ChunkToolsTestCase.filter,
)
])
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(len(data) / 2, data), '\r\n', ChunkToolsTestCase.filter,
)
])
self.assertFilteredEntries([
*chunktools.iter_splitchunks(
self.METHOD_NAME(len(data) * 2, data), '\r\n', ChunkToolsTestCase.filter,
),
])
def test_iter_splitchunks_nbytes_key_chunk_limits(self):
self.assertListEqual(
['1234', '56789012', '345', '12'],
[
*chunktools.iter_splitchunks(
['1234\r', '\n5678', '9012\r\n', '345\r\n', '12'],
'\r\n', ChunkToolsTestCase.filter,
),
]
)
| null |
1,878 |
"""
Extension to get the total derivative / gradient / Jacobian matrix.
"""
from builtins import str
from builtins import range
import mdp
import bimdp
np = mdp.numx
class NotDifferentiableException(mdp.NodeException):
"""Exception if the total derivative does not exist."""
pass
# Default implementation is needed to satisfy the "method" request.
class GradientExtensionNode(mdp.ExtensionNode, mdp.Node):
"""Base node of the extension to calculate the gradient at a certain point.
To get the gradient simply put 'method': 'gradient' into the msg dict.
The grad array is three dimensional, with shape
(len(x), self.output_dim, self.input_dim).
The matrix formed by the last two indices is also called the Jacobian
matrix.
Nodes which have no well defined total derivative should raise the
NotDifferentiableException.
"""
extension_name = "gradient"
def METHOD_NAME(self, x, grad=None):
"""Calculate the contribution to the grad for this node at point x.
The contribution is then combined with the given gradient, to get
the gradient for the original x.
This is a template function, derived classes should override _get_grad.
"""
if self.is_training():
raise mdp.TrainingException("The training is not completed yet.")
if grad is None:
grad = np.zeros((len(x), self.input_dim, self.input_dim))
diag_indices = np.arange(self.input_dim)
grad[:,diag_indices,diag_indices] = 1.0
new_grad = self._get_grad(x)
# combine the gradients
grad = np.asarray([np.dot(new_grad[i], grad[i])
for i in range(len(new_grad))])
# update the x value for the next node
result = self._execute(x)
if isinstance(result, tuple):
x = result[0]
msg = result[1]
else:
x = result
msg = {}
msg.update({"grad": grad})
return x, msg
def _get_grad(self, x):
"""Return the grad for the given points.
Override this method.
"""
err = "Gradient not implemented for class %s." % str(self.__class__)
raise NotImplementedError(err)
def _stop_gradient(self, x, grad=None):
"""Helper method to make gradient available for stop_message."""
result = self.METHOD_NAME(x, grad)
# FIXME: Is this really correct? x should be updated!
# Could remove this once we have the new stop signature.
return result[1], 1
## Implementations for specific nodes. ##
# TODO: cache the gradient for linear nodes?
# If there was a linear base class one could integrate this?
# TODO: add at least a PCA gradient implementation
@mdp.extension_method("gradient", mdp.nodes.IdentityNode, "_get_grad")
def _identity_grad(self, x):
grad = np.zeros((len(x), self.output_dim, self.input_dim))
diag_indices = np.arange(self.input_dim)
grad[:,diag_indices,diag_indices] = 1.0
return grad
@mdp.extension_method("gradient", mdp.nodes.SFANode, "_get_grad")
def _sfa_grad(self, x):
# the gradient is constant, but have to give it for each x point
return np.repeat(self.sf.T[np.newaxis,:,:], len(x), axis=0)
@mdp.extension_method("gradient", mdp.nodes.QuadraticExpansionNode,
"_get_grad")
def _quadex_grad(self, x):
# the exapansion is:
# [x1, x2, x3, x1x1, x1x2, x1x3, x2x2, x2x3, x3,x3]
dim = self.input_dim
grad = np.zeros((len(x), self.output_dim, dim))
# constant part
diag_indices = np.arange(dim)
grad[:,diag_indices,diag_indices] = 1.0
# quadratic part
i_start = dim
for i in range(dim):
grad[:, i_start:i_start+dim-i, i] = x[:,i:]
diag_indices = np.arange(dim - i)
grad[:, diag_indices+i_start, diag_indices+i] += x[:,i,np.newaxis]
i_start += (dim - i)
return grad
@mdp.extension_method("gradient", mdp.nodes.SFA2Node, "_get_grad")
def _sfa2_grad(self, x):
quadex_grad = self._expnode._get_grad(x)
sfa_grad = _sfa_grad(self, x)
return np.asarray([np.dot(sfa_grad[i], quadex_grad[i])
for i in range(len(sfa_grad))])
## mdp.hinet nodes ##
@mdp.extension_method("gradient", mdp.hinet.Layer, "_get_grad")
def _layer_grad(self, x):
in_start = 0
in_stop = 0
out_start = 0
out_stop = 0
grad = None
for node in self.nodes:
out_start = out_stop
out_stop += node.output_dim
in_start = in_stop
in_stop += node.input_dim
if grad is None:
node_grad = node._get_grad(x[:, in_start:in_stop])
grad = np.zeros([node_grad.shape[0], self.output_dim,
self.input_dim],
dtype=node_grad.dtype)
# note that the gradient is block-diagonal
grad[:, out_start:out_stop, in_start:in_stop] = node_grad
else:
grad[:, out_start:out_stop, in_start:in_stop] = \
node._get_grad(x[:, in_start:in_stop])
return grad
# this is an optimized implementation, the original implementation is
# used for reference in the unittest
@mdp.extension_method("gradient", mdp.hinet.Switchboard, "_gradient")
def _switchboard_gradient(self, x, grad=None):
if grad is None:
grad = np.zeros((len(x), self.input_dim, self.input_dim))
diag_indices = np.arange(self.input_dim)
grad[:,diag_indices,diag_indices] = 1.0
## custom implementation for greater speed
grad = grad[:, self.connections]
# update the x value for the next node
result = self._execute(x)
if isinstance(result, tuple):
x = result[0]
msg = result[1]
else:
x = result
msg = {}
msg.update({"grad": grad})
return x, msg
| null |
1,879 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class UpdateNetworkAclEntriesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'UpdateNetworkAclEntries','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_EgressAclEntriess(self): # RepeatList
return self.get_query_params().get('EgressAclEntries')
def set_EgressAclEntriess(self, EgressAclEntries): # RepeatList
for depth1 in range(len(EgressAclEntries)):
if EgressAclEntries[depth1].get('NetworkAclEntryId') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.NetworkAclEntryId', EgressAclEntries[depth1].get('NetworkAclEntryId'))
if EgressAclEntries[depth1].get('EntryType') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.EntryType', EgressAclEntries[depth1].get('EntryType'))
if EgressAclEntries[depth1].get('NetworkAclEntryName') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.NetworkAclEntryName', EgressAclEntries[depth1].get('NetworkAclEntryName'))
if EgressAclEntries[depth1].get('Policy') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.Policy', EgressAclEntries[depth1].get('Policy'))
if EgressAclEntries[depth1].get('Description') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.Description', EgressAclEntries[depth1].get('Description'))
if EgressAclEntries[depth1].get('Protocol') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.Protocol', EgressAclEntries[depth1].get('Protocol'))
if EgressAclEntries[depth1].get('DestinationCidrIp') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.DestinationCidrIp', EgressAclEntries[depth1].get('DestinationCidrIp'))
if EgressAclEntries[depth1].get('Port') is not None:
self.add_query_param('EgressAclEntries.' + str(depth1 + 1) + '.Port', EgressAclEntries[depth1].get('Port'))
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_NetworkAclId(self): # String
return self.get_query_params().get('NetworkAclId')
def set_NetworkAclId(self, NetworkAclId): # String
self.add_query_param('NetworkAclId', NetworkAclId)
def get_UpdateIngressAclEntries(self): # Boolean
return self.get_query_params().get('UpdateIngressAclEntries')
def set_UpdateIngressAclEntries(self, UpdateIngressAclEntries): # Boolean
self.add_query_param('UpdateIngressAclEntries', UpdateIngressAclEntries)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_UpdateEgressAclEntries(self): # Boolean
return self.get_query_params().get('UpdateEgressAclEntries')
def set_UpdateEgressAclEntries(self, UpdateEgressAclEntries): # Boolean
self.add_query_param('UpdateEgressAclEntries', UpdateEgressAclEntries)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_IngressAclEntriess(self): # RepeatList
return self.get_query_params().get('IngressAclEntries')
def set_IngressAclEntriess(self, IngressAclEntries): # RepeatList
for depth1 in range(len(IngressAclEntries)):
if IngressAclEntries[depth1].get('NetworkAclEntryId') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.NetworkAclEntryId', IngressAclEntries[depth1].get('NetworkAclEntryId'))
if IngressAclEntries[depth1].get('EntryType') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.EntryType', IngressAclEntries[depth1].get('EntryType'))
if IngressAclEntries[depth1].get('NetworkAclEntryName') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.NetworkAclEntryName', IngressAclEntries[depth1].get('NetworkAclEntryName'))
if IngressAclEntries[depth1].get('Policy') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.Policy', IngressAclEntries[depth1].get('Policy'))
if IngressAclEntries[depth1].get('SourceCidrIp') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.SourceCidrIp', IngressAclEntries[depth1].get('SourceCidrIp'))
if IngressAclEntries[depth1].get('Description') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.Description', IngressAclEntries[depth1].get('Description'))
if IngressAclEntries[depth1].get('Protocol') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.Protocol', IngressAclEntries[depth1].get('Protocol'))
if IngressAclEntries[depth1].get('Port') is not None:
self.add_query_param('IngressAclEntries.' + str(depth1 + 1) + '.Port', IngressAclEntries[depth1].get('Port'))
| null |
1,880 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstances')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ConnectionString(self): # String
return self.get_query_params().get('ConnectionString')
def set_ConnectionString(self, ConnectionString): # String
self.add_query_param('ConnectionString', ConnectionString)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_proxyId(self): # String
return self.get_query_params().get('proxyId')
def set_proxyId(self, proxyId): # String
self.add_query_param('proxyId', proxyId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceType(self): # String
return self.get_query_params().get('DBInstanceType')
def set_DBInstanceType(self, DBInstanceType): # String
self.add_query_param('DBInstanceType', DBInstanceType)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def set_Tags(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def set_InstanceNetworkType(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType)
def get_ConnectionMode(self): # String
return self.get_query_params().get('ConnectionMode')
def set_ConnectionMode(self, ConnectionMode): # String
self.add_query_param('ConnectionMode', ConnectionMode)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_InstanceLevel(self): # Integer
return self.get_query_params().get('InstanceLevel')
def set_InstanceLevel(self, InstanceLevel): # Integer
self.add_query_param('InstanceLevel', InstanceLevel)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Expired(self): # String
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # String
self.add_query_param('Expired', Expired)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def METHOD_NAME(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceStatus(self): # String
return self.get_query_params().get('DBInstanceStatus')
def set_DBInstanceStatus(self, DBInstanceStatus): # String
self.add_query_param('DBInstanceStatus', DBInstanceStatus)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_DedicatedHostGroupId(self): # String
return self.get_query_params().get('DedicatedHostGroupId')
def set_DedicatedHostGroupId(self, DedicatedHostGroupId): # String
self.add_query_param('DedicatedHostGroupId', DedicatedHostGroupId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DedicatedHostId(self): # String
return self.get_query_params().get('DedicatedHostId')
def set_DedicatedHostId(self, DedicatedHostId): # String
self.add_query_param('DedicatedHostId', DedicatedHostId)
def get_Filter(self): # String
return self.get_query_params().get('Filter')
def set_Filter(self, Filter): # String
self.add_query_param('Filter', Filter)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_Category(self): # String
return self.get_query_params().get('Category')
def set_Category(self, Category): # String
self.add_query_param('Category', Category)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
| null |
1,881 |
# Copyright 2018 Akretion (http://www.akretion.com)
# Copyright 2018 ACSONE SA/NV
# Sébastien BEAU <[email protected]>
# Copyright 2020 Camptocamp SA (http://www.camptocamp.com)
# Simone Orsi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.addons.shopinvader.tests.common import UtilsMixin
from .common import StockCommonCase
class TestProductProduct(StockCommonCase, UtilsMixin):
"""Tests for product stock info."""
def _expectect_qty_by_wh(self, warehouse_recs, prod):
res = {
"global": {
"qty": prod.with_context(
warehouse=list(warehouse_recs.ids)
).qty_available
},
}
for wh in warehouse_recs:
key = self.shopinvader_backend._make_warehouse_key(wh)
res[key] = {"qty": prod.with_context(warehouse=wh.id).qty_available}
return res
def test_update_qty_from_wizard(self):
"""Updating the quantity through an inventory create a job."""
job = self.job_counter()
self._add_stock_to_product(self.product, self.loc_1, 100)
self.assertEqual(job.count_created(), 1)
def test_update_stock_on_new_product(self):
"""Recompute binding not exported yet does nothing."""
self.assertEqual(self.product.shopinvader_bind_ids.sync_state, "new")
self.product.synchronize_all_binding_stock_level()
self.assertEqual(self.product.shopinvader_bind_ids.data, {})
def _test_update_stock_with_key(self, key_stock, sync_immediatly=True):
shopinvader_product = self.product.shopinvader_bind_ids
self._refresh_json_data(shopinvader_product, backend=self.shopinvader_backend)
shopinvader_product.sync_state = "to_update"
self.assertEqual(shopinvader_product.data[key_stock], {"global": {"qty": 0.0}})
jobs = self.job_counter()
self._add_stock_to_product(self.product, self.loc_1, 100)
self.assertEqual(jobs.count_created(), 1)
shopinvader_product.invalidate_cache(["stock_data"])
with self.se_adapter_fake.mocked_calls() as calls:
self.perform_jobs(jobs)
self.assertEqual(
shopinvader_product.data[key_stock], {"global": {"qty": 100.0}}
)
if sync_immediatly:
self.assertEqual(len(calls), 1)
call = calls[0]
self.assertEqual(call["method"], "index")
self.assertEqual(len(call["args"]), 1)
self.assertEqual(call["args"][0][key_stock], {"global": {"qty": 100.0}})
self.assertEqual(shopinvader_product.sync_state, "done")
else:
self.assertEqual(len(calls), 0)
self.assertEqual(shopinvader_product.sync_state, "to_update")
def test_update_stock(self):
"""Recompute product should update binding and export it."""
self._test_update_stock_with_key("stock")
def test_update_stock_differed(self):
"""Recompute product should update binding and not export it."""
self.shopinvader_backend.synchronize_stock = "in_batch"
self._test_update_stock_with_key("stock", sync_immediatly=False)
def test_update_stock_with_special_key(self):
"""Recompute product should update binding using custom key by user."""
export_line = self.env.ref(
"shopinvader_product_stock." "ir_exp_shopinvader_variant_stock_data"
)
export_line.target = "stock_data:custom_stock"
self._test_update_stock_with_key("custom_stock")
def test_update_stock_without_target(self):
"""Recompute product should update binding using the name as key."""
export_line = self.env.ref(
"shopinvader_product_stock." "ir_exp_shopinvader_variant_stock_data"
)
export_line.target = None
self._test_update_stock_with_key("stock_data")
def test_update_stock_without_key(self):
"""Recompute product should update binding without export line."""
export_line = self.env.ref(
"shopinvader_product_stock." "ir_exp_shopinvader_variant_stock_data"
)
export_line.unlink()
shopinvader_product = self.product.shopinvader_bind_ids
self._refresh_json_data(shopinvader_product, backend=self.shopinvader_backend)
shopinvader_product.sync_state = "to_update"
self.assertNotIn("stock", shopinvader_product.data)
jobs = self.job_counter()
self._add_stock_to_product(self.product, self.loc_1, 100)
self.assertEqual(jobs.count_created(), 1)
self.perform_jobs(jobs)
self.assertNotIn("stock", shopinvader_product.data)
def METHOD_NAME(self):
warehouses = self.warehouse_1 + self.warehouse_2
self.shopinvader_backend.write({"warehouse_ids": [(6, 0, warehouses.ids)]})
shopinvader_product = self.product.shopinvader_bind_ids
self._refresh_json_data(shopinvader_product, backend=self.shopinvader_backend)
shopinvader_product.sync_state = "to_update"
expected = self._expectect_qty_by_wh(warehouses, self.product)
self.assertEqual(shopinvader_product.data["stock"], expected)
jobs = self.job_counter()
self._add_stock_to_product(self.product, self.loc_1, 100)
self._add_stock_to_product(self.product, self.loc_2, 200)
shopinvader_product.invalidate_cache(["stock_data"])
self.assertEqual(jobs.count_created(), 1)
with self.se_adapter_fake.mocked_calls():
self.perform_jobs(jobs)
expected = self._expectect_qty_by_wh(warehouses, self.product)
self.assertEqual(shopinvader_product.data["stock"], expected)
| null |
1,882 |
from gettext import gettext as _
from django.db import transaction
from rest_framework import serializers
from rest_framework.exceptions import ValidationError as DRFValidationError
from pulpcore.app import models
from pulpcore.app.serializers import (
DetailIdentityField,
DetailRelatedField,
DomainUniqueValidator,
ModelSerializer,
)
from pulpcore.app.util import get_url
class AlternateContentSourcePathField(serializers.ListField):
"""Serializer field for AlternateContentSource."""
child = serializers.CharField()
def to_representation(self, paths):
"""
A serializer field for AlternateContentSourcePath models.
Args:
acs_pk (pk of AlternateContentSource instance): UUID of AlternateContentSource
Returns:
A list of paths related to AlternateContentSource
"""
return [acs_path.path for acs_path in paths.all()]
class AlternateContentSourceSerializer(ModelSerializer):
"""
Serializer for the AlternateContentSource.
"""
pulp_href = DetailIdentityField(view_name_pattern=r"acs(-.*/.*)-detail")
name = serializers.CharField(
help_text=_("Name of Alternate Content Source."),
required=True,
validators=[DomainUniqueValidator(queryset=models.AlternateContentSource.objects.all())],
)
last_refreshed = serializers.DateTimeField(
help_text=_("Date of last refresh of AlternateContentSource."),
allow_null=True,
required=False,
)
remote = DetailRelatedField(
help_text=_("The remote to provide alternate content source."),
view_name_pattern=r"remotes(-.*/.*)-detail",
queryset=models.Remote.objects.all(),
required=True,
)
paths = AlternateContentSourcePathField(
help_text=_(
"List of paths that will be appended to the Remote url when searching for content."
),
required=False,
)
def validate_remote(self, remote):
if remote.policy != "on_demand":
raise serializers.ValidationError(
_("Remote used with alternate content source must have the 'on_demand' policy.")
)
if type(remote) not in self.Meta.model.REMOTE_TYPES:
raise serializers.ValidationError(
detail=_("Type for Remote '{}' does not match ACS type.").format(remote.name)
)
return remote
@transaction.atomic
def create(self, validated_data):
"""Create Alternate Content Source and its path if specified."""
paths = validated_data.pop("paths", [])
acs = super().create(validated_data)
try:
self._update_paths(acs, paths)
except DRFValidationError as exc:
acs.delete()
raise exc
return acs
def _update_paths(self, acs, paths):
"""Update Alternate Content Source paths."""
existing_paths = {
acs_path.path
for acs_path in models.AlternateContentSourcePath.objects.filter(
alternate_content_source=acs.pk
)
}
if paths is None:
to_remove = set()
to_add = set()
else:
to_remove = existing_paths - set(paths)
to_add = set(paths) - existing_paths
if to_remove:
models.AlternateContentSourcePath.objects.filter(path__in=to_remove).delete()
if to_add:
for acs_path in to_add:
new_path = {
"alternate_content_source": get_url(acs),
"path": acs_path,
}
acs_path_serializer = AlternateContentSourcePathSerializer(data=new_path)
acs_path_serializer.is_valid(raise_exception=True)
acs_path_serializer.save()
# if no paths for an ACS, we need create empty path to use base path of ACS remote
if not models.AlternateContentSourcePath.objects.filter(
alternate_content_source=acs.pk
).exists():
empty_path_serializer_data = {"alternate_content_source": get_url(acs), "path": ""}
empty_path_serializer = AlternateContentSourcePathSerializer(
data=empty_path_serializer_data
)
empty_path_serializer.is_valid(raise_exception=True)
empty_path_serializer.save()
def METHOD_NAME(self, instance, validated_data):
"""Update an Alternate Content Source."""
instance.name = validated_data.get("name", instance.name)
instance.remote = validated_data.get("remote", instance.remote)
paths = validated_data.get("paths")
with transaction.atomic():
self._update_paths(instance, paths)
instance.save()
return instance
class Meta:
model = models.AlternateContentSource
fields = ModelSerializer.Meta.fields + (
"pulp_href",
"name",
"last_refreshed",
"paths",
"remote",
)
class AlternateContentSourcePathSerializer(ModelSerializer):
"""
Serializer for the AlternateContentSourcePath.
"""
alternate_content_source = DetailRelatedField(
view_name_pattern=r"acs(-.*/.*)-detail",
queryset=models.AlternateContentSource.objects.all(),
required=True,
)
path = serializers.CharField(help_text=_("Path for ACS."), allow_blank=True, required=False)
repository = DetailRelatedField(
view_name_pattern=r"repository(-.*/.*)-detail",
queryset=models.Repository.objects.all(),
required=False,
allow_null=True,
)
class Meta:
model = models.AlternateContentSourcePath
fields = ModelSerializer.Meta.fields + ("alternate_content_source", "path", "repository")
| null |
1,883 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateFlowLogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateFlowLog','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_ResourceId(self): # String
return self.get_query_params().get('ResourceId')
def set_ResourceId(self, ResourceId): # String
self.add_query_param('ResourceId', ResourceId)
def get_ProjectName(self): # String
return self.get_query_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_query_param('ProjectName', ProjectName)
def get_LogStoreName(self): # String
return self.get_query_params().get('LogStoreName')
def set_LogStoreName(self, LogStoreName): # String
self.add_query_param('LogStoreName', LogStoreName)
def METHOD_NAME(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_TrafficPaths(self): # RepeatList
return self.get_query_params().get('TrafficPath')
def set_TrafficPaths(self, TrafficPath): # RepeatList
for depth1 in range(len(TrafficPath)):
self.add_query_param('TrafficPath.' + str(depth1 + 1), TrafficPath[depth1])
def get_AggregationInterval(self): # Integer
return self.get_query_params().get('AggregationInterval')
def set_AggregationInterval(self, AggregationInterval): # Integer
self.add_query_param('AggregationInterval', AggregationInterval)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ResourceType(self): # String
return self.get_query_params().get('ResourceType')
def set_ResourceType(self, ResourceType): # String
self.add_query_param('ResourceType', ResourceType)
def get_TrafficType(self): # String
return self.get_query_params().get('TrafficType')
def set_TrafficType(self, TrafficType): # String
self.add_query_param('TrafficType', TrafficType)
def get_FlowLogName(self): # String
return self.get_query_params().get('FlowLogName')
def set_FlowLogName(self, FlowLogName): # String
self.add_query_param('FlowLogName', FlowLogName)
| null |
1,884 |
import shutil
import numpy as np
import pandas as pd
import pytest
from pharmpy.internals.fs.cwd import chdir
from pharmpy.modeling import read_model
from pharmpy.tools import fit, run_modelsearch
from pharmpy.workflows import ModelDatabase
def METHOD_NAME(tmp_path, model_count, start_model):
with chdir(tmp_path):
res = run_modelsearch(
'ABSORPTION(ZO);PERIPHERALS(1)',
'exhaustive',
results=start_model.modelfit_results,
model=start_model,
)
assert len(res.summary_tool) == 4
assert len(res.summary_models) == 4
assert len(res.models) == 3
assert all(model.modelfit_results for model in res.models)
assert not all(np.isnan(model.modelfit_results.ofv) for model in res.models)
rundir = tmp_path / 'modelsearch_dir1'
assert rundir.is_dir()
assert model_count(rundir) == 3
assert (rundir / 'results.json').exists()
assert (rundir / 'results.csv').exists()
assert (rundir / 'metadata.json').exists()
@pytest.mark.parametrize(
'search_space, no_of_models, last_model_parent_name, model_with_error, ref',
[
(
'ABSORPTION(ZO);PERIPHERALS(1)',
4,
'modelsearch_run2',
'modelsearch_run3',
('modelsearch_run2', ['PERIPHERALS(1)', 'VP1 = ']),
),
(
'ABSORPTION([ZO,SEQ-ZO-FO]);PERIPHERALS(1)',
7,
'modelsearch_run3',
'modelsearch_run5',
('modelsearch_run3', ['PERIPHERALS(1)', 'VP1 = ']),
),
],
)
def test_exhaustive_stepwise_basic(
tmp_path,
model_count,
start_model,
search_space,
no_of_models,
last_model_parent_name,
model_with_error,
ref,
):
with chdir(tmp_path):
res = run_modelsearch(
search_space,
'exhaustive_stepwise',
results=start_model.modelfit_results,
model=start_model,
)
assert len(res.summary_tool) == no_of_models + 1
assert len(res.summary_models) == no_of_models + 1
assert len(res.models) == no_of_models
assert res.models[-1].modelfit_results
assert res.models[0].parent_model == 'mox2'
assert res.models[-1].parent_model == last_model_parent_name
if last_model_parent_name != 'mox2':
last_model_features = res.summary_tool.loc[res.models[-1].name]['description']
parent_model_features = res.summary_tool.loc[last_model_parent_name]['description']
assert last_model_features[: len(parent_model_features)] == parent_model_features
if model_with_error:
assert model_with_error in res.summary_errors.index.get_level_values('model')
summary_tool_sorted_by_dbic = res.summary_tool.sort_values(by=['dbic'], ascending=False)
summary_tool_sorted_by_bic = res.summary_tool.sort_values(by=['bic'])
summary_tool_sorted_by_rank = res.summary_tool.sort_values(by=['rank'])
pd.testing.assert_frame_equal(summary_tool_sorted_by_dbic, summary_tool_sorted_by_rank)
pd.testing.assert_frame_equal(summary_tool_sorted_by_dbic, summary_tool_sorted_by_bic)
rundir = tmp_path / 'modelsearch_dir1'
assert rundir.is_dir()
assert model_count(rundir) == no_of_models
assert (rundir / 'results.json').exists()
assert (rundir / 'results.csv').exists()
assert (rundir / 'metadata.json').exists()
db: ModelDatabase = res.tool_database.model_database
model_name, code_ref = ref
path = db.retrieve_file(model_name, f'{model_name}.mod')
with open(path, 'r') as fh:
model_code = fh.read()
assert all(code in model_code for code in code_ref)
@pytest.mark.parametrize(
'search_space, iiv_strategy, no_of_models, no_of_added_etas',
[
('ABSORPTION(ZO);PERIPHERALS(1)', 'add_diagonal', 4, 2),
('ABSORPTION(ZO);PERIPHERALS(1)', 'fullblock', 4, 2),
('PERIPHERALS(1);LAGTIME()', 'absorption_delay', 4, 1),
],
)
def test_exhaustive_stepwise_iiv_strategies(
tmp_path,
model_count,
start_model,
search_space,
iiv_strategy,
no_of_models,
no_of_added_etas,
):
with chdir(tmp_path):
res = run_modelsearch(
search_space,
'exhaustive_stepwise',
iiv_strategy=iiv_strategy,
results=start_model.modelfit_results,
model=start_model,
)
assert len(res.summary_tool) == no_of_models + 1
assert len(res.summary_models) == no_of_models + 1
assert len(res.models) == no_of_models
model_last = res.models[no_of_models - 1]
assert (
len(model_last.random_variables.etas.names)
- len(start_model.random_variables.etas.names)
== no_of_added_etas
)
assert model_last.modelfit_results
rundir = tmp_path / 'modelsearch_dir1'
assert rundir.is_dir()
assert model_count(rundir) == no_of_models
assert (rundir / 'results.json').exists()
assert (rundir / 'results.csv').exists()
assert (rundir / 'metadata.json').exists()
# def test_exhaustive_stepwise_start_model_not_fitted(tmp_path, model_count, start_model):
# with chdir(tmp_path):
# start_model = start_model.copy()
# start_model.name = 'start_model_copy'
# start_model.modelfit_results = None
# search_space = 'ABSORPTION(ZO);PERIPHERALS(1)'
# with pytest.warns(UserWarning, match='Could not update'):
# res = run_modelsearch(
# search_space,
# 'exhaustive_stepwise',
# results=start_model.modelfit_results,
# model=start_model,
# )
# assert len(res.summary_tool) == 5
# assert len(res.summary_models) == 5
# assert res.summary_tool['dbic'].isnull().values.all()
# assert len(res.models) == 4
# rundir = tmp_path / 'modelsearch_dir1'
# assert rundir.is_dir()
# assert model_count(rundir) == 4
def test_exhaustive_stepwise_peripheral_upper_limit(tmp_path, start_model):
with chdir(tmp_path):
res = run_modelsearch(
'PERIPHERALS(1)',
'exhaustive_stepwise',
results=start_model.modelfit_results,
model=start_model,
)
assert ',999999) ; POP_QP1' in res.models[0].model_code
assert ',999999) ; POP_VP1' in res.models[0].model_code
def test_summary_individuals(tmp_path, testdata):
with chdir(tmp_path):
shutil.copy2(testdata / 'nonmem' / 'pheno_real.mod', tmp_path)
shutil.copy2(testdata / 'nonmem' / 'pheno.dta', tmp_path)
m = read_model('pheno_real.mod')
start_res = fit(m)
m = m.replace(modelfit_results=start_res)
res = run_modelsearch(
model=m,
results=m.modelfit_results,
search_space='ABSORPTION(ZO);PERIPHERALS([1, 2])',
algorithm='reduced_stepwise',
)
summary = res.summary_individuals
columns = (
'description',
'parent_model',
'outlier_count',
'ofv',
'dofv_vs_parent',
'predicted_dofv',
'predicted_residual',
)
assert summary is not None
assert tuple(summary.columns) == columns
for column in columns:
# Cannot check that all are non-na because some model runs fail
assert summary[column].notna().any()
assert summary['dofv_vs_parent'].equals(
summary.apply(
lambda row: summary.loc[(row['parent_model'], row.name[1])]['ofv'] - row['ofv'],
axis=1,
)
)
| null |
1,885 |
'''
Copyright (C) 2017-2023 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
from collections import defaultdict
from cryptofeed.symbols import Symbol
import logging
from typing import Dict, Tuple
import zlib
from decimal import Decimal
from yapic import json
from cryptofeed.connection import AsyncConnection, RestEndpoint, Routes, WebsocketEndpoint
from cryptofeed.defines import BUY, FUTURES, HUOBI_DM, L2_BOOK, SELL, TRADES
from cryptofeed.feed import Feed
from cryptofeed.types import OrderBook, Trade
LOG = logging.getLogger('feedhandler')
class HuobiDM(Feed):
id = HUOBI_DM
websocket_endpoints = [WebsocketEndpoint('wss://www.hbdm.com/ws')]
rest_endpoints = [RestEndpoint('https://www.hbdm.com', routes=Routes('/api/v1/contract_contract_info'))]
websocket_channels = {
L2_BOOK: 'depth.step0',
TRADES: 'trade.detail',
}
@classmethod
def timestamp_normalize(cls, ts: float) -> float:
return ts / 1000.0
@classmethod
def _parse_symbol_data(cls, data: dict) -> Tuple[Dict, Dict]:
ret = {}
info = defaultdict(dict)
for e in data['data']:
# Pricing is all in USD, see https://huobiglobal.zendesk.com/hc/en-us/articles/360000113102-Introduction-of-Huobi-Futures
s = Symbol(e['symbol'], 'USD', type=FUTURES, expiry_date=e['contract_code'].replace(e['symbol'], ''))
ret[s.normalized] = e['contract_code']
info['tick_size'][s.normalized] = e['price_tick']
info['instrument_type'][s.normalized] = FUTURES
return ret, info
def __reset(self):
self._l2_book = {}
async def METHOD_NAME(self, msg: dict, timestamp: float):
"""
{
'ch':'market.BTC_CW.depth.step0',
'ts':1565857755564,
'tick':{
'mrid':14848858327,
'id':1565857755,
'bids':[
[ Decimal('9829.99'), 1], ...
]
'asks':[
[ 9830, 625], ...
]
},
'ts':1565857755552,
'version':1565857755,
'ch':'market.BTC_CW.depth.step0'
}
"""
pair = self.exchange_symbol_to_std_symbol(msg['ch'].split('.')[1])
data = msg['tick']
# When Huobi Delists pairs, empty updates still sent:
# {'ch': 'market.AKRO-USD.depth.step0', 'ts': 1606951241196, 'tick': {'mrid': 50651100044, 'id': 1606951241, 'ts': 1606951241195, 'version': 1606951241, 'ch': 'market.AKRO-USD.depth.step0'}}
# {'ch': 'market.AKRO-USD.depth.step0', 'ts': 1606951242297, 'tick': {'mrid': 50651100044, 'id': 1606951242, 'ts': 1606951242295, 'version': 1606951242, 'ch': 'market.AKRO-USD.depth.step0'}}
if 'bids' in data and 'asks' in data:
if pair not in self._l2_book:
self._l2_book[pair] = OrderBook(self.id, pair, max_depth=self.max_depth)
self._l2_book[pair].book.bids = {Decimal(price): Decimal(amount) for price, amount in data['bids']}
self._l2_book[pair].book.asks = {Decimal(price): Decimal(amount) for price, amount in data['asks']}
await self.book_callback(L2_BOOK, self._l2_book[pair], timestamp, timestamp=self.timestamp_normalize(msg['ts']), raw=msg)
async def _trade(self, msg: dict, timestamp: float):
"""
{
'ch': 'market.btcusd.trade.detail',
'ts': 1549773923965,
'tick': {
'id': 100065340982,
'ts': 1549757127140,
'data': [{'id': '10006534098224147003732', 'amount': Decimal('0.0777'), 'price': Decimal('3669.69'), 'direction': 'buy', 'ts': 1549757127140}]}
}
"""
for trade in msg['tick']['data']:
t = Trade(
self.id,
self.exchange_symbol_to_std_symbol(msg['ch'].split('.')[1]),
BUY if trade['direction'] == 'buy' else SELL,
Decimal(trade['amount']),
Decimal(trade['price']),
self.timestamp_normalize(trade['ts']),
id=str(trade['id']),
raw=trade
)
await self.callback(TRADES, t, timestamp)
async def message_handler(self, msg: str, conn, timestamp: float):
# unzip message
msg = zlib.decompress(msg, 16 + zlib.MAX_WBITS)
msg = json.loads(msg, parse_float=Decimal)
# Huobi sends a ping evert 5 seconds and will disconnect us if we do not respond to it
if 'ping' in msg:
await conn.write(json.dumps({'pong': msg['ping']}))
elif 'status' in msg and msg['status'] == 'ok':
return
elif 'ch' in msg:
if 'trade' in msg['ch']:
await self._trade(msg, timestamp)
elif 'depth' in msg['ch']:
await self.METHOD_NAME(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def subscribe(self, conn: AsyncConnection):
self.__reset()
client_id = 0
for chan, symbols in conn.subscription.items():
for symbol in symbols:
client_id += 1
await conn.write(json.dumps(
{
"sub": f"market.{symbol}.{chan}",
"id": str(client_id)
}
))
| null |
1,886 |
import argparse
import struct
import sys
try:
from elftools.elf.elffile import ELFFile
except ImportError:
print('pytelftools missing, install to run this script', file=sys.stderr)
print('https://github.com/eliben/pyelftools#installing', file=sys.stderr)
sys.exit(1)
class Colors:
RED = '\033[91m'
BLUE = '\033[94m'
GREEN = '\033[92m'
END = '\033[0m'
CORE = 0x20
param_type_to_str_dict = {
0x0 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT8',
0x0 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT8',
0x1 | 0x0 << 2 | 0x1 << 3: 'PARAM_UIN16',
0x1 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT16',
0x2 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT32',
0x2 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT32',
0x2 | 0x1 << 2 | 0x0 << 3: 'PARAM_FLOAT'
}
def param_type_to_str(t: int) -> str:
extra = str()
if t & (1 << 5): # PARAM_CORE set
extra = ' | PARAM_CORE'
if t & (1 << 6): # PARAM_RONLY set
extra += ' | PARAM_RONLY'
int_type = t & ~(1 << 5 | 1 << 6)
return '{:12}{}'.format(param_type_to_str_dict[int_type], extra)
log_type_to_str_dict = {
0x1: 'LOG_UINT8',
0x2: 'LOG_INT8',
0x3: 'LOG_UIN16',
0x4: 'LOG_INT16',
0x5: 'LOG_UINT32',
0x6: 'LOG_INT32',
0x7: 'LOG_FLOAT',
0x8: 'LOG_FP16'
}
def METHOD_NAME(t: int) -> str:
extra = str()
if t & (1 << 5): # LOG_CORE set
extra = ' | LOG_CORE'
if t & (1 << 6): # BY_FUNCTION set
extra += ' | BY_FUNCTION'
int_type = t & ~(1 << 5 | 1 << 6)
return '{:12}{}'.format(log_type_to_str_dict[int_type], extra)
def process_file(filename, list_params: bool, list_logs: bool, core: bool):
with open(filename, 'rb') as f:
parameters = check_structs(f, 'param', core)
if list_params:
for key in sorted(parameters.keys()):
t = parameters[key]
print('{:25}\t{}'.format(key, param_type_to_str(t)))
logs = check_structs(f, 'log', core)
if list_logs:
for key in sorted(logs.keys()):
t = logs[key]
print('{:25}\t{}'.format(key, METHOD_NAME(t)))
n_logs = Colors.GREEN + str(len(logs.keys())) + Colors.END
n_params = Colors.BLUE + str(len(parameters.keys())) + Colors.END
print('{} parameters and {} log vars in elf'.format(n_params, n_logs))
def get_offset_of(elf, addr):
for seg in elf.iter_segments():
if seg.header['p_type'] != 'PT_LOAD':
continue
# If the symbol is inside the range of a LOADed segment, calculate the
# file offset by subtracting the virtual start address and adding the
# file offset of the loaded section(s)
if addr >= seg['p_vaddr'] and addr < seg['p_vaddr'] + seg['p_filesz']:
return addr - seg['p_vaddr'] + seg['p_offset']
return None
def get_offset_of_symbol(elf, name):
section = elf.get_section_by_name('.symtab')
sym = section.get_symbol_by_name(name)[0]
if not sym:
print('symbol %s not found' % name, file=sys.stderr)
sys.exit(1)
return get_offset_of(elf, sym['st_value'])
def check_structs(stream, what: str, core: bool) -> dict:
elf = ELFFile(stream)
offset = get_offset_of_symbol(elf, '_{}_start'.format(what))
stop_offset = get_offset_of_symbol(elf, '_{}_stop'.format(what))
name_type_dict = {}
name_maxlen = 25
group_bit = 0x1 << 7
start_bit = 0x1
if what == 'log':
struct_len = 12
else:
struct_len = 20
while offset < stop_offset:
elf.stream.seek(offset)
#
# Parsing log or param, first unpack the struct:
# struct [log_s] {
# uint8_t type;
# char * name;
# void * address;
# };
#
# struct [param_s] {
# uint8_t type;
# uint8_t extended_type;
# char * name;
# void * address;
# void * callback;
# void * getter;
# };
#
# We want the type and the name.
#
buffer = elf.stream.read(struct_len)
if what == 'log':
t, addr = struct.unpack('@Bxxxixxxx', buffer)
else:
t, addr = struct.unpack('@Bxxxixxxxxxxxxxxx', buffer)
#
# Next, convert address of name to offset in elf
#
addr = get_offset_of(elf, addr)
#
# And read the name from that offset
#
elf.stream.seek(addr)
name = ''.join(iter(lambda: stream.read(1).decode('ascii'), '\x00'))
#
# Check if this is start of a group
#
if t & group_bit != 0 and t & start_bit != 0:
current_group = name
elif t & group_bit == 0:
name = '%s.%s' % (current_group, name)
if name in name_type_dict:
print('%sDuplicate parameter detected!%s (%s)' %
(Colors.RED, Colors.END, name), file=sys.stderr)
sys.exit(1)
else:
#
# If core only is specified we check if the core flag is set
#
if not core or (t & CORE) != 0:
name_type_dict[name] = t
if len(name) > name_maxlen:
print('%sName too long!%s (%s > %d)' %
(Colors.RED, Colors.END, name, name_maxlen),
file=sys.stderr)
sys.exit(1)
# Parameter and log names must not contain space as they are mapped to topic in ROS that does not support
# space.
if ' ' in name:
print(f'{Colors.RED}Name contains space(s){Colors.END} ("{name}")', file=sys.stderr)
sys.exit(1)
offset += struct_len
return name_type_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--list-params', action='store_true')
parser.add_argument('--list-logs', action='store_true')
parser.add_argument('--core', action='store_true')
parser.add_argument('filename', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.filename:
process_file(args.filename[0], args.list_params, args.list_logs, args.core)
else:
sys.exit(1)
| null |
1,887 |
################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2014-2023 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from __future__ import annotations
from collections import OrderedDict
from dateutil.relativedelta import relativedelta
from dateutil.rrule import (
DAILY,
HOURLY,
MINUTELY,
MONTHLY,
WEEKLY,
YEARLY,
rrule,
)
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
class DatePeriod:
name: str = 'base_date_period' # Override
verbose_name = 'Date period' # Override
def __str__(self):
return str(self.verbose_name)
def __eq__(self, other_dp):
try:
other_td = other_dp.as_timedelta()
# except:
except AttributeError:
return False
return self.as_timedelta() == other_td
def __ne__(self, other_dp):
return not self == other_dp
def as_timedelta(self) -> relativedelta:
raise NotImplementedError
def _value_as_dict(self):
"Period as a jsonifiable dictionary."
raise NotImplementedError
def as_rrule(self) -> rrule:
"Period as a dateutil recurrent rule."
raise NotImplementedError
def as_dict(self) -> dict:
"Period as a jsonifiable dictionary."
d = {'type': self.name}
d.update(self._value_as_dict())
return d
class SimpleValueDatePeriod(DatePeriod):
frequency: int # = ... TO BE DEFINED (see MINUTELY etc...)
def __init__(self, value):
self._value = value
def __str__(self):
value = self._value
return self._ngettext(self._value).format(number=value)
def _ngettext(self, value):
raise NotImplementedError
def as_rrule(self, **kwargs):
return rrule(self.frequency, interval=self._value, **kwargs)
def as_timedelta(self):
return relativedelta(**{self.name: self._value})
def _value_as_dict(self):
return {'value': self._value}
class MinutesPeriod(SimpleValueDatePeriod):
name = 'minutes'
verbose_name = _('Minute(s)')
frequency = MINUTELY
def _ngettext(self, value):
return ngettext('{number} minute', '{number} minutes', value)
class HoursPeriod(SimpleValueDatePeriod):
name = 'hours'
verbose_name = _('Hour(s)')
frequency = HOURLY
def _ngettext(self, value):
return ngettext('{number} hour', '{number} hours', value)
class DaysPeriod(SimpleValueDatePeriod):
name = 'days'
verbose_name = _('Day(s)')
frequency = DAILY
def _ngettext(self, value):
return ngettext('{number} day', '{number} days', value)
class WeeksPeriod(SimpleValueDatePeriod):
name = 'weeks'
verbose_name = _('Week(s)')
frequency = WEEKLY
def _ngettext(self, value):
return ngettext('{number} week', '{number} weeks', value)
class MonthsPeriod(SimpleValueDatePeriod):
name = 'months'
verbose_name = _('Month(s)')
frequency = MONTHLY
def _ngettext(self, value):
return ngettext('{number} month', '{number} months', value)
class YearsPeriod(SimpleValueDatePeriod):
name = 'years'
verbose_name = _('Year(s)')
frequency = YEARLY
def _ngettext(self, value):
return ngettext('{number} year', '{number} years', value)
class DatePeriodRegistry:
class RegistrationError(Exception):
pass
def __init__(self, *periods: type[DatePeriod]):
self._periods: dict[str, type[DatePeriod]] = OrderedDict()
self.METHOD_NAME(*periods)
def choices(self, choices=None):
"""Yield tuples which can be used to build the DatePeriodField formfield.
@param choices List of names or None, used to filter the registry elements.
If None provided, return all the elements.
@yield The tuples (name, period_klass.verbose_name) of registry elements.
"""
is_allowed = (
(lambda name: True)
if choices is None else
(lambda name: name in choices)
)
for name, period_klass in self._periods.items():
if is_allowed(name):
yield name, period_klass.verbose_name
def get_period(self, name: str, *args) -> DatePeriod | None:
klass = self._periods.get(name)
if not klass:
return None
return klass(*args)
def deserialize(self, dict_value: dict) -> DatePeriod | None:
return self.get_period(dict_value['type'], dict_value['value'])
def METHOD_NAME(self, *periods: type[DatePeriod]):
periods_map = self._periods
for period in periods:
name = period.name
if name in periods_map:
raise self.RegistrationError(
f"Duplicate date period's id or period registered twice: {name}"
)
periods_map[name] = period
date_period_registry = DatePeriodRegistry(
MinutesPeriod, HoursPeriod, DaysPeriod,
WeeksPeriod, MonthsPeriod, YearsPeriod,
)
| null |
1,888 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class StoreMaterialTemporarilyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Trademark', '2018-07-24', 'StoreMaterialTemporarily','trademark')
def get_ContactEmail(self):
return self.get_query_params().get('ContactEmail')
def set_ContactEmail(self,ContactEmail):
self.add_query_param('ContactEmail',ContactEmail)
def get_ContactAddress(self):
return self.get_query_params().get('ContactAddress')
def set_ContactAddress(self,ContactAddress):
self.add_query_param('ContactAddress',ContactAddress)
def get_EAddress(self):
return self.get_query_params().get('EAddress')
def set_EAddress(self,EAddress):
self.add_query_param('EAddress',EAddress)
def METHOD_NAME(self):
return self.get_query_params().get('Country')
def set_Country(self,Country):
self.add_query_param('Country',Country)
def get_LegalNoticeOssKey(self):
return self.get_query_params().get('LegalNoticeOssKey')
def set_LegalNoticeOssKey(self,LegalNoticeOssKey):
self.add_query_param('LegalNoticeOssKey',LegalNoticeOssKey)
def get_Address(self):
return self.get_query_params().get('Address')
def set_Address(self,Address):
self.add_query_param('Address',Address)
def get_Town(self):
return self.get_query_params().get('Town')
def set_Town(self,Town):
self.add_query_param('Town',Town)
def get_ContactNumber(self):
return self.get_query_params().get('ContactNumber')
def set_ContactNumber(self,ContactNumber):
self.add_query_param('ContactNumber',ContactNumber)
def get_City(self):
return self.get_query_params().get('City')
def set_City(self,City):
self.add_query_param('City',City)
def get_IdCardOssKey(self):
return self.get_query_params().get('IdCardOssKey')
def set_IdCardOssKey(self,IdCardOssKey):
self.add_query_param('IdCardOssKey',IdCardOssKey)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_ContactName(self):
return self.get_query_params().get('ContactName')
def set_ContactName(self,ContactName):
self.add_query_param('ContactName',ContactName)
def get_PassportOssKey(self):
return self.get_query_params().get('PassportOssKey')
def set_PassportOssKey(self,PassportOssKey):
self.add_query_param('PassportOssKey',PassportOssKey)
def get_ContactZipcode(self):
return self.get_query_params().get('ContactZipcode')
def set_ContactZipcode(self,ContactZipcode):
self.add_query_param('ContactZipcode',ContactZipcode)
def get_EName(self):
return self.get_query_params().get('EName')
def set_EName(self,EName):
self.add_query_param('EName',EName)
def get_Province(self):
return self.get_query_params().get('Province')
def set_Province(self,Province):
self.add_query_param('Province',Province)
def get_BusinessLicenceOssKey(self):
return self.get_query_params().get('BusinessLicenceOssKey')
def set_BusinessLicenceOssKey(self,BusinessLicenceOssKey):
self.add_query_param('BusinessLicenceOssKey',BusinessLicenceOssKey)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_CardNumber(self):
return self.get_query_params().get('CardNumber')
def set_CardNumber(self,CardNumber):
self.add_query_param('CardNumber',CardNumber)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
def get_LoaOssKey(self):
return self.get_query_params().get('LoaOssKey')
def set_LoaOssKey(self,LoaOssKey):
self.add_query_param('LoaOssKey',LoaOssKey
| null |
1,889 |
#!/usr/bin/env python
from __future__ import print_function
import re
import sys
import tempfile
try:
from rpy2.rpy_classic import (
BASIC_CONVERSION,
NO_CONVERSION,
r,
RException,
set_default_mode,
)
except ImportError:
# RPy isn't maintained, and doesn't work with R>3.0, use it as a fallback
from rpy import (
BASIC_CONVERSION,
NO_CONVERSION,
r,
RException,
set_default_mode,
)
def METHOD_NAME(msg):
sys.stderr.write(msg)
sys.exit(1)
def S3_METHODS(all="key"):
Group_Math = [
"abs",
"sign",
"sqrt",
"floor",
"ceiling",
"trunc",
"round",
"signif",
"exp",
"log",
"cos",
"sin",
"tan",
"acos",
"asin",
"atan",
"cosh",
"sinh",
"tanh",
"acosh",
"asinh",
"atanh",
"lgamma",
"gamma",
"gammaCody",
"digamma",
"trigamma",
"cumsum",
"cumprod",
"cummax",
"cummin",
"c",
]
Group_Ops = [
"+",
"-",
"*",
"/",
"^",
"%%",
"%/%",
"&",
"|",
"!",
"==",
"!=",
"<",
"<=",
">=",
">",
"(",
")",
"~",
",",
]
if all == "key":
return {"Math": Group_Math, "Ops": Group_Ops}
def main():
try:
datafile = sys.argv[1]
outfile_name = sys.argv[2]
expression = sys.argv[3]
except Exception:
METHOD_NAME("Usage: python gsummary.py input_file ouput_file expression")
math_allowed = S3_METHODS()["Math"]
ops_allowed = S3_METHODS()["Ops"]
# Check for invalid expressions
for word in re.compile("[a-zA-Z]+").findall(expression):
if word and word not in math_allowed:
METHOD_NAME("Invalid expression '%s': term '%s' is not recognized or allowed" % (expression, word))
symbols = set()
for symbol in re.compile(r"[^a-z0-9\s]+").findall(expression):
if symbol and symbol not in ops_allowed:
METHOD_NAME("Invalid expression '%s': operator '%s' is not recognized or allowed" % (expression, symbol))
else:
symbols.add(symbol)
if len(symbols) == 1 and "," in symbols:
# User may have entered a comma-separated list r_data_frame columns
METHOD_NAME("Invalid columns '%s': this tool requires a single column or expression" % expression)
# Find all column references in the expression
cols = []
for col in re.compile("c[0-9]+").findall(expression):
try:
cols.append(int(col[1:]) - 1)
except Exception:
pass
tmp_file = tempfile.NamedTemporaryFile("w+")
# Write the R header row to the temporary file
hdr_str = "\t".join("c%s" % str(col + 1) for col in cols)
tmp_file.write("%s\n" % hdr_str)
skipped_lines = 0
first_invalid_line = 0
i = 0
for i, line in enumerate(open(datafile)):
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
valid = True
fields = line.split("\t")
# Write the R data row to the temporary file
for col in cols:
try:
float(fields[col])
except Exception:
skipped_lines += 1
if not first_invalid_line:
first_invalid_line = i + 1
valid = False
break
if valid:
data_str = "\t".join(fields[col] for col in cols)
tmp_file.write("%s\n" % data_str)
tmp_file.flush()
if skipped_lines == i + 1:
METHOD_NAME(
"Invalid column or column data values invalid for computation. See tool tips and syntax for data requirements."
)
else:
# summary function and return labels
set_default_mode(NO_CONVERSION)
summary_func = r(
"function( x ) { c( sum=sum( as.numeric( x ), na.rm=T ), mean=mean( as.numeric( x ), na.rm=T ), stdev=sd( as.numeric( x ), na.rm=T ), quantile( as.numeric( x ), na.rm=TRUE ) ) }"
)
headings = ["sum", "mean", "stdev", "0%", "25%", "50%", "75%", "100%"]
headings_str = "\t".join(headings)
r_data_frame = r.read_table(tmp_file.name, header=True, sep="\t")
outfile = open(outfile_name, "w")
for col in re.compile("c[0-9]+").findall(expression):
r.assign(col, r["$"](r_data_frame, col))
try:
summary = summary_func(r(expression))
except RException as s:
outfile.close()
METHOD_NAME("Computation resulted in the following error: %s" % str(s))
summary = summary.as_py(BASIC_CONVERSION)
outfile.write("#%s\n" % headings_str)
if type(summary) is dict:
# using rpy
outfile.write("%s\n" % "\t".join("%g" % summary[k] for k in headings))
else:
# using rpy2
outfile.write("%s\n" % "\t".join("%g" % k for k in summary))
outfile.close()
if skipped_lines:
print(
"Skipped %d invalid lines beginning with line #%d. See tool tips for data requirements."
% (skipped_lines, first_invalid_line)
)
if __name__ == "__main__":
main()
| null |
1,890 |
from typing import (
Any,
List,
)
from dacite import DaciteError
from pcs.cli.common.errors import CmdLineInputError
from pcs.cli.common.parse_args import InputModifiers
from pcs.cli.reports.output import error
from pcs.common.dr import (
DrConfigDto,
DrConfigSiteDto,
DrSiteStatusDto,
)
from pcs.common.interface import dto
from pcs.common.reports import codes as report_codes
from pcs.common.str_tools import indent
from pcs.common.types import StringSequence
def config(
lib: Any,
argv: StringSequence,
modifiers: InputModifiers,
) -> None:
"""
Options: None
"""
modifiers.ensure_only_supported()
if argv:
raise CmdLineInputError()
config_raw = lib.dr.get_config()
try:
config_dto = dto.from_dict(DrConfigDto, config_raw)
except (
KeyError,
TypeError,
ValueError,
DaciteError,
dto.PayloadConversionError,
) as e:
raise error(
"Unable to communicate with pcsd, received response:\n"
f"{config_raw}"
) from e
lines = ["Local site:"]
lines.extend(indent(_config_site_lines(config_dto.local_site)))
for site_dto in config_dto.remote_site_list:
lines.append("Remote site:")
lines.extend(indent(_config_site_lines(site_dto)))
print("\n".join(lines))
def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]:
lines = [f"Role: {site_dto.site_role.capitalize()}"]
if site_dto.node_list:
lines.append("Nodes:")
lines.extend(indent(sorted([node.name for node in site_dto.node_list])))
return lines
def set_recovery_site(
lib: Any,
argv: StringSequence,
modifiers: InputModifiers,
) -> None:
"""
Options:
* --request-timeout - HTTP timeout for node authorization check
"""
modifiers.ensure_only_supported("--request-timeout")
if len(argv) != 1:
raise CmdLineInputError()
lib.dr.set_recovery_site(argv[0])
def status(
lib: Any,
argv: StringSequence,
modifiers: InputModifiers,
) -> None:
"""
Options:
* --full - show full details, node attributes and failcount
* --hide-inactive - hide inactive resources
* --request-timeout - HTTP timeout for node authorization check
"""
modifiers.ensure_only_supported(
"--full",
"--hide-inactive",
"--request-timeout",
)
if argv:
raise CmdLineInputError()
status_list_raw = lib.dr.status_all_sites_plaintext(
hide_inactive_resources=modifiers.get("--hide-inactive"),
verbose=modifiers.get("--full"),
)
try:
status_list = [
dto.from_dict(DrSiteStatusDto, status_raw)
for status_raw in status_list_raw
]
except (
KeyError,
TypeError,
ValueError,
DaciteError,
dto.PayloadConversionError,
) as e:
raise error(
"Unable to communicate with pcsd, received response:\n"
f"{status_list_raw}"
) from e
has_errors = False
plaintext_parts = []
for site_status in status_list:
plaintext_parts.append(
"--- {local_remote} cluster - {role} site ---".format(
local_remote=("Local" if site_status.local_site else "Remote"),
role=site_status.site_role.capitalize(),
)
)
if site_status.status_successfully_obtained:
plaintext_parts.append(site_status.status_plaintext.strip())
plaintext_parts.extend(["", ""])
else:
has_errors = True
plaintext_parts.extend(
["Error: Unable to get status of the cluster from any node", ""]
)
print("\n".join(plaintext_parts).strip())
if has_errors:
raise error("Unable to get status of all sites")
def METHOD_NAME(
lib: Any,
argv: StringSequence,
modifiers: InputModifiers,
) -> None:
"""
Options:
* --skip-offline - skip unreachable nodes (including missing auth token)
* --request-timeout - HTTP timeout for node authorization check
"""
modifiers.ensure_only_supported("--skip-offline", "--request-timeout")
if argv:
raise CmdLineInputError()
force_flags = []
if modifiers.get("--skip-offline"):
force_flags.append(report_codes.SKIP_OFFLINE_NODES)
lib.dr.METHOD_NAME(force_flags=force_flags)
| null |
1,891 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkga.endpoint import endpoint_data
class UpdateEndpointGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ga', '2019-11-20', 'UpdateEndpointGroups','gaplus')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def METHOD_NAME(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_EndpointGroupConfigurationss(self): # RepeatList
return self.get_query_params().get('EndpointGroupConfigurations')
def set_EndpointGroupConfigurationss(self, EndpointGroupConfigurations): # RepeatList
for depth1 in range(len(EndpointGroupConfigurations)):
if EndpointGroupConfigurations[depth1].get('EndpointGroupName') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointGroupName', EndpointGroupConfigurations[depth1].get('EndpointGroupName'))
if EndpointGroupConfigurations[depth1].get('EndpointGroupDescription') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointGroupDescription', EndpointGroupConfigurations[depth1].get('EndpointGroupDescription'))
if EndpointGroupConfigurations[depth1].get('TrafficPercentage') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.TrafficPercentage', EndpointGroupConfigurations[depth1].get('TrafficPercentage'))
if EndpointGroupConfigurations[depth1].get('HealthCheckEnabled') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.HealthCheckEnabled', EndpointGroupConfigurations[depth1].get('HealthCheckEnabled'))
if EndpointGroupConfigurations[depth1].get('HealthCheckIntervalSeconds') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.HealthCheckIntervalSeconds', EndpointGroupConfigurations[depth1].get('HealthCheckIntervalSeconds'))
if EndpointGroupConfigurations[depth1].get('HealthCheckPath') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.HealthCheckPath', EndpointGroupConfigurations[depth1].get('HealthCheckPath'))
if EndpointGroupConfigurations[depth1].get('HealthCheckPort') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.HealthCheckPort', EndpointGroupConfigurations[depth1].get('HealthCheckPort'))
if EndpointGroupConfigurations[depth1].get('HealthCheckProtocol') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.HealthCheckProtocol', EndpointGroupConfigurations[depth1].get('HealthCheckProtocol'))
if EndpointGroupConfigurations[depth1].get('ThresholdCount') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.ThresholdCount', EndpointGroupConfigurations[depth1].get('ThresholdCount'))
if EndpointGroupConfigurations[depth1].get('EndpointConfigurations') is not None:
for depth2 in range(len(EndpointGroupConfigurations[depth1].get('EndpointConfigurations'))):
if EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Type') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointConfigurations.' + str(depth2 + 1) + '.Type', EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Type'))
if EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Weight') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointConfigurations.' + str(depth2 + 1) + '.Weight', EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Weight'))
if EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Endpoint') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointConfigurations.' + str(depth2 + 1) + '.Endpoint', EndpointGroupConfigurations[depth1].get('EndpointConfigurations')[depth2].get('Endpoint'))
if EndpointGroupConfigurations[depth1].get('EndpointRequestProtocol') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointRequestProtocol', EndpointGroupConfigurations[depth1].get('EndpointRequestProtocol'))
if EndpointGroupConfigurations[depth1].get('PortOverrides') is not None:
for depth2 in range(len(EndpointGroupConfigurations[depth1].get('PortOverrides'))):
if EndpointGroupConfigurations[depth1].get('PortOverrides')[depth2].get('ListenerPort') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.PortOverrides.' + str(depth2 + 1) + '.ListenerPort', EndpointGroupConfigurations[depth1].get('PortOverrides')[depth2].get('ListenerPort'))
if EndpointGroupConfigurations[depth1].get('PortOverrides')[depth2].get('EndpointPort') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.PortOverrides.' + str(depth2 + 1) + '.EndpointPort', EndpointGroupConfigurations[depth1].get('PortOverrides')[depth2].get('EndpointPort'))
if EndpointGroupConfigurations[depth1].get('EnableClientIPPreservationToa') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EnableClientIPPreservationToa', EndpointGroupConfigurations[depth1].get('EnableClientIPPreservationToa'))
if EndpointGroupConfigurations[depth1].get('EnableClientIPPreservationProxyProtocol') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EnableClientIPPreservationProxyProtocol', EndpointGroupConfigurations[depth1].get('EnableClientIPPreservationProxyProtocol'))
if EndpointGroupConfigurations[depth1].get('EndpointGroupId') is not None:
self.add_query_param('EndpointGroupConfigurations.' + str(depth1 + 1) + '.EndpointGroupId', EndpointGroupConfigurations[depth1].get('EndpointGroupId'))
def get_ListenerId(self): # String
return self.get_query_params().get('ListenerId')
def set_ListenerId(self, ListenerId): # String
self.add_query_param('ListenerId', ListenerId)
| null |
1,892 |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUILOAD.py - Load window
###################################################
import GemRB
import LoadScreen
from GameCheck import MAX_PARTY_SIZE
from GUIDefines import *
LoadWindow = 0
TextAreaControl = 0
Games = ()
ScrollBar = 0
def OnLoad ():
global LoadWindow, TextAreaControl, Games, ScrollBar
GemRB.SetToken ("SaveDir", "mpsave") # iwd2 is always using 'mpsave'
LoadWindow = GemRB.LoadWindow (0, "GUILOAD")
CancelButton=LoadWindow.GetControl (22)
CancelButton.SetText (13727)
CancelButton.OnPress (LoadWindow.Close)
CancelButton.MakeEscape()
for i in range (5):
Button = LoadWindow.GetControl (55+i)
Button.SetText (15590)
Button.OnPress (LoadGamePress)
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetValue (i)
Button = LoadWindow.GetControl (60+i)
Button.SetText (13957)
Button.OnPress (DeleteGamePress)
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetValue (i)
#area previews
Button = LoadWindow.GetControl (1+i)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE|IE_GUI_BUTTON_PICTURE,OP_SET)
#PC portraits
for j in range (min(6, MAX_PARTY_SIZE)):
Button = LoadWindow.GetControl (25 + i*min(6, MAX_PARTY_SIZE) + j)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE|IE_GUI_BUTTON_PICTURE,OP_SET)
Button.SetSize (21, 21)
ScrollBar=LoadWindow.GetControl (23)
ScrollBar.OnChange (ScrollBarPress)
Games=GemRB.GetSaveGames()
TopIndex = max (0, len(Games) - 5)
ScrollBar.SetVarAssoc ("TopIndex", TopIndex, 0, TopIndex)
ScrollBarPress ()
LoadWindow.SetEventProxy(ScrollBar)
LoadWindow.Focus()
return
def ScrollBarPress ():
#draw load game portraits
Pos = GemRB.GetVar ("TopIndex")
for i in range (5):
ActPos = Pos + i
Button1 = LoadWindow.GetControl (55+i)
Button2 = LoadWindow.GetControl (60+i)
ScreenShotButton = LoadWindow.GetControl (1 + i)
if ActPos<len(Games):
Button1.SetState (IE_GUI_BUTTON_ENABLED)
Button2.SetState (IE_GUI_BUTTON_ENABLED)
ScreenShotButton.SetPicture (Games[ActPos].GetPreview())
Slotname = Games[ActPos].GetName()
GameDate = Games[ActPos].GetGameDate()
SaveDate = Games[ActPos].GetDate()
else:
Button1.SetState (IE_GUI_BUTTON_DISABLED)
Button2.SetState (IE_GUI_BUTTON_DISABLED)
ScreenShotButton.SetPicture (None)
Slotname = ""
GameDate = ""
SaveDate = ""
Label = LoadWindow.GetControl (0x10000005+i)
Label.SetText (Slotname)
Label = LoadWindow.GetControl (0x1000000a+i)
Label.SetText (GameDate)
Label = LoadWindow.GetControl (0x1000000f+i)
Label.SetText (SaveDate)
for j in range (min(6, MAX_PARTY_SIZE)):
Button=LoadWindow.GetControl (25 + i*min(6, MAX_PARTY_SIZE) + j)
if ActPos<len(Games):
Button.SetPicture (Games[ActPos].GetPortrait(j))
else:
Button.SetPicture (None)
return
def LoadGamePress (btn):
if LoadWindow:
LoadWindow.Close ()
Pos = GemRB.GetVar ("TopIndex") + btn.Value
LoadScreen.StartLoadScreen()
GemRB.LoadGame(Games[Pos]) #loads and enters savegame
GemRB.EnterGame ()
return
def METHOD_NAME (btn):
global Games
TopIndex = GemRB.GetVar ("TopIndex")
Pos = TopIndex + btn.Value
GemRB.DeleteSaveGame(Games[Pos])
del Games[Pos]
if TopIndex > 0:
TopIndex = TopIndex - 1
ScrollBar.SetVarAssoc ("TopIndex", TopIndex, 0, max (0, len(Games) - 5))
ScrollBarPress ()
if ConfirmWindow:
ConfirmWindow.Close ()
LoadWindow.Focus()
return
def DeleteGameCancel ():
if ConfirmWindow:
ConfirmWindow.Close ()
LoadWindow.Focus()
return
def DeleteGamePress (btn):
global ConfirmWindow
ConfirmWindow=GemRB.LoadWindow (1)
ConfirmWindow.SetFlags (WF_ALPHA_CHANNEL, OP_OR)
Text=ConfirmWindow.GetControl (0)
Text.SetText (15305)
DeleteButton=ConfirmWindow.GetControl (1)
DeleteButton.SetText (13957)
DeleteButton.OnPress (METHOD_NAME)
DeleteButton.SetValue (btn.Value)
CancelButton=ConfirmWindow.GetControl (2)
CancelButton.SetText (13727)
CancelButton.OnPress (DeleteGameCancel)
CancelButton.MakeEscape()
ConfirmWindow.ShowModal (MODAL_SHADOW_GRAY)
return
| null |
1,893 |
import asyncio
import gzip
import logging
import subprocess
from pathlib import Path
from typing import Any, BinaryIO, Mapping, Tuple, cast
import pkg_resources
from aiodocker.docker import Docker
from aiodocker.exceptions import DockerError
from ai.backend.common.logging import BraceStyleAdapter
from ..exception import InitializationError
from ..utils import closing_async, get_arch_name, update_nested_dict
log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined]
class PersistentServiceContainer:
def __init__(
self,
image_ref: str,
container_config: Mapping[str, Any],
*,
name: str = None,
) -> None:
self.image_ref = image_ref
arch = get_arch_name()
default_container_name = image_ref.split(":")[0].rsplit("/", maxsplit=1)[-1]
if name is None:
self.container_name = default_container_name
else:
self.container_name = name
self.container_config = container_config
self.img_version = int(
Path(
pkg_resources.resource_filename(
"ai.backend.agent.docker",
f"{default_container_name}.version.txt",
)
).read_text()
)
self.img_path = Path(
pkg_resources.resource_filename(
"ai.backend.agent.docker",
f"{default_container_name}.img.{arch}.tar.gz",
)
)
async def get_container_version_and_status(self) -> Tuple[int, bool]:
async with closing_async(Docker()) as docker:
try:
c = docker.containers.container(self.container_name)
await c.show()
except DockerError as e:
if e.status == 404:
return 0, False
else:
raise
if c["Config"].get("Labels", {}).get("ai.backend.system", "0") != "1":
raise RuntimeError(
f"An existing container named \"{c['Name'].lstrip('/')}\" is not a system container"
" spawned by Backend.AI. Please check and remove it."
)
return (
int(c["Config"].get("Labels", {}).get("ai.backend.version", "0")),
c["State"]["Status"].lower() == "running",
)
async def METHOD_NAME(self) -> int:
async with closing_async(Docker()) as docker:
try:
img = await docker.images.inspect(self.image_ref)
except DockerError as e:
if e.status == 404:
return 0
else:
raise
return int((img["Config"].get("Labels") or {}).get("ai.backend.version", "0"))
async def ensure_running_latest(self) -> None:
image_version = await self.METHOD_NAME()
if image_version == 0:
log.info("PersistentServiceContainer({}): installing...", self.image_ref)
await self.install_latest()
elif image_version < self.img_version:
log.info(
"PersistentServiceContainer({}): upgrading (v{} -> v{})",
self.image_ref,
image_version,
self.img_version,
)
await self.install_latest()
container_version, is_running = await self.get_container_version_and_status()
if container_version == 0 or image_version != container_version or not is_running:
log.info("PersistentServiceContainer({}): recreating...", self.image_ref)
await self.recreate()
if not is_running:
log.info("PersistentServiceContainer({}): starting...", self.image_ref)
await self.start()
async def install_latest(self) -> None:
with gzip.open(self.img_path, "rb") as reader:
proc = await asyncio.create_subprocess_exec(
*["docker", "load"],
stdin=cast(BinaryIO, reader),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
if await proc.wait() != 0:
stderr = b"(unavailable)"
if proc.stderr is not None:
stderr = await proc.stderr.read()
raise RuntimeError(
"loading the image has failed!",
self.image_ref,
proc.returncode,
stderr,
)
async def recreate(self) -> None:
async with closing_async(Docker()) as docker:
try:
c = docker.containers.container(self.container_name)
await c.stop()
await c.delete(force=True)
except DockerError as e:
if e.status == 409 and "is not running" in e.message:
pass
elif e.status == 404:
pass
else:
raise
container_config = {
"Image": self.image_ref,
"Tty": True,
"Privileged": False,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"HostConfig": {
"Init": True,
"RestartPolicy": {
"Name": "unless-stopped", # make it persistent
"MaximumRetryCount": 0,
},
},
}
update_nested_dict(container_config, self.container_config)
try:
await docker.containers.create(config=container_config, name=self.container_name)
except DockerError as e:
err_msg = e.args[1].get("message", "")
if (
e.args[0] == 400
and "bind source path does not exist" in err_msg
and "/tmp/backend.ai/ipc" in err_msg
):
raise InitializationError(
f"Could not create persistent service container '{self.container_name}'"
" because it cannot access /tmp/backend.ai/ipc directory. This may"
" occur when Docker is installed with Snap or the agent is configured"
" to use a private tmp directory. To resolve, explicitly configure the"
" 'ipc-base-path' option in agent.toml to indicate a directory under"
" $HOME or a non-virtualized directory.",
)
else:
raise
async def start(self) -> None:
async with closing_async(Docker()) as docker:
c = docker.containers.container(self.container_name)
await c.start()
| null |
1,894 |
import logging
import os
from mercurial import (
hg,
ui,
)
from whoosh.writing import AsyncWriter
import tool_shed.webapp.model.mapping as ts_mapping
from galaxy.tool_util.loader_directory import load_tool_elements_from_path
from galaxy.tools.search import get_or_create_index
from galaxy.util import (
directory_hash_id,
ExecutionTimer,
pretty_print_time_interval,
unicodify,
)
from tool_shed.util.hgweb_config import hgweb_config_manager
from tool_shed.webapp import model
from tool_shed.webapp.search.repo_search import schema as repo_schema
from tool_shed.webapp.search.tool_search import schema as tool_schema
log = logging.getLogger(__name__)
def _get_or_create_index(whoosh_index_dir):
tool_index_dir = os.path.join(whoosh_index_dir, "tools")
if not os.path.exists(whoosh_index_dir):
os.makedirs(whoosh_index_dir)
if not os.path.exists(tool_index_dir):
os.makedirs(tool_index_dir)
return get_or_create_index(whoosh_index_dir, repo_schema), get_or_create_index(tool_index_dir, tool_schema)
def build_index(whoosh_index_dir, file_path, hgweb_config_dir, dburi, **kwargs):
"""
Build two search indexes simultaneously
One is for repositories and the other for tools.
Returns a tuple with number of repos and tools that were indexed.
"""
model = ts_mapping.init(dburi, engine_options={}, create_tables=False)
sa_session = model.session
repo_index, tool_index = _get_or_create_index(whoosh_index_dir)
repo_index_writer = AsyncWriter(repo_index)
tool_index_writer = AsyncWriter(tool_index)
repos_indexed = 0
tools_indexed = 0
execution_timer = ExecutionTimer()
with repo_index.searcher() as searcher:
for repo in get_repos(sa_session, file_path, hgweb_config_dir, **kwargs):
tools_list = repo.pop("tools_list")
repo_id = repo["id"]
indexed_document = searcher.document(id=repo_id)
if indexed_document:
if indexed_document["full_last_updated"] == repo.get("full_last_updated"):
# We're done, since we sorted repos by update time
break
else:
# Got an update, delete the previous document
repo_index_writer.delete_by_term("id", repo_id)
repo_index_writer.add_document(**repo)
# Tools get their own index
tool_index_writer.delete_by_term("repo_id", repo_id)
for tool in tools_list:
tool_contents = tool.copy()
tool_contents["repo_owner_username"] = repo.get("repo_owner_username")
tool_contents["repo_name"] = repo.get("name")
tool_contents["repo_id"] = repo_id
tool_index_writer.add_document(**tool_contents)
tools_indexed += 1
repos_indexed += 1
tool_index_writer.commit()
repo_index_writer.commit()
log.info("Indexed repos: %s, tools: %s", repos_indexed, tools_indexed)
log.info("Toolbox index finished %s", execution_timer)
return repos_indexed, tools_indexed
def get_repos(sa_session, file_path, hgweb_config_dir, **kwargs):
"""
Load repos from DB and included tools from .xml configs.
"""
hgwcm = hgweb_config_manager
hgwcm.hgweb_config_dir = hgweb_config_dir
# Do not index deleted, deprecated, or "tool_dependency_definition" type repositories.
q = (
sa_session.query(model.Repository)
.filter_by(deleted=False)
.filter_by(deprecated=False)
.order_by(model.Repository.update_time.desc())
)
q = q.filter(model.Repository.type != "tool_dependency_definition")
for repo in q:
category_names = []
for rca in sa_session.query(model.RepositoryCategoryAssociation).filter(
model.RepositoryCategoryAssociation.repository_id == repo.id
):
for category in sa_session.query(model.Category).filter(model.Category.id == rca.category.id):
category_names.append(category.name.lower())
categories = (",").join(category_names)
repo_id = repo.id
name = repo.name
description = repo.description
long_description = repo.long_description
homepage_url = repo.homepage_url
remote_repository_url = repo.remote_repository_url
times_downloaded = repo.times_downloaded or 0
repo_owner_username = ""
if repo.user_id is not None:
user = sa_session.query(model.User).filter(model.User.id == repo.user_id).one()
repo_owner_username = user.username.lower()
last_updated = pretty_print_time_interval(repo.update_time)
full_last_updated = repo.update_time.strftime("%Y-%m-%d %I:%M %p")
# Load all changesets of the repo for lineage.
repo_path = os.path.join(
hgweb_config_dir, hgwcm.get_entry(os.path.join("repos", repo.user.username, repo.name))
)
hg_repo = hg.repository(ui.ui(), repo_path.encode("utf-8"))
lineage = []
for changeset in hg_repo.changelog:
lineage.append(f"{unicodify(changeset)}:{unicodify(hg_repo[changeset])}")
repo_lineage = str(lineage)
# Parse all the tools within repo for a separate index.
tools_list = []
path = os.path.join(file_path, *directory_hash_id(repo.id))
path = os.path.join(path, "repo_%d" % repo.id)
if os.path.exists(path):
tools_list.extend(METHOD_NAME(path))
for root, dirs, _files in os.walk(path):
if ".hg" in dirs:
dirs.remove(".hg")
for dirname in dirs:
tools_in_dir = METHOD_NAME(os.path.join(root, dirname))
tools_list.extend(tools_in_dir)
yield (
dict(
id=unicodify(repo_id),
name=unicodify(name),
description=unicodify(description),
long_description=unicodify(long_description),
homepage_url=unicodify(homepage_url),
remote_repository_url=unicodify(remote_repository_url),
repo_owner_username=unicodify(repo_owner_username),
times_downloaded=unicodify(times_downloaded),
approved=unicodify("no"),
last_updated=unicodify(last_updated),
full_last_updated=unicodify(full_last_updated),
tools_list=tools_list,
repo_lineage=unicodify(repo_lineage),
categories=unicodify(categories),
)
)
def debug_handler(path, exc_info):
"""
By default the underlying tool parsing logs warnings for each exception.
This is very chatty hence this metod changes it to debug level.
"""
log.debug(f"Failed to load tool with path {path}.", exc_info=exc_info)
def METHOD_NAME(path):
tools_in_dir = []
tool_elems = load_tool_elements_from_path(path, load_exception_handler=debug_handler)
if tool_elems:
for elem in tool_elems:
root = elem[1].getroot()
if root.tag == "tool":
tool = {}
if root.find("help") is not None:
tool.update(dict(help=unicodify(root.find("help").text)))
if root.find("description") is not None:
tool.update(dict(description=unicodify(root.find("description").text)))
tool.update(
dict(
id=unicodify(root.attrib.get("id")),
name=unicodify(root.attrib.get("name")),
version=unicodify(root.attrib.get("version")),
)
)
tools_in_dir.append(tool)
return tools_in_dir
| null |
1,895 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateRouterInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateRouterInterface','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AccessPointId(self): # String
return self.get_query_params().get('AccessPointId')
def set_AccessPointId(self, AccessPointId): # String
self.add_query_param('AccessPointId', AccessPointId)
def get_OppositeAccessPointId(self): # String
return self.get_query_params().get('OppositeAccessPointId')
def METHOD_NAME(self, OppositeAccessPointId): # String
self.add_query_param('OppositeAccessPointId', OppositeAccessPointId)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Spec(self): # String
return self.get_query_params().get('Spec')
def set_Spec(self, Spec): # String
self.add_query_param('Spec', Spec)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_OppositeRegionId(self): # String
return self.get_query_params().get('OppositeRegionId')
def set_OppositeRegionId(self, OppositeRegionId): # String
self.add_query_param('OppositeRegionId', OppositeRegionId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OppositeInterfaceOwnerId(self): # String
return self.get_query_params().get('OppositeInterfaceOwnerId')
def set_OppositeInterfaceOwnerId(self, OppositeInterfaceOwnerId): # String
self.add_query_param('OppositeInterfaceOwnerId', OppositeInterfaceOwnerId)
def get_Tagss(self): # RepeatList
return self.get_query_params().get('Tags')
def set_Tagss(self, Tags): # RepeatList
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_OppositeRouterType(self): # String
return self.get_query_params().get('OppositeRouterType')
def set_OppositeRouterType(self, OppositeRouterType): # String
self.add_query_param('OppositeRouterType', OppositeRouterType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_PricingCycle(self): # String
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self, PricingCycle): # String
self.add_query_param('PricingCycle', PricingCycle)
def get_OppositeRouterId(self): # String
return self.get_query_params().get('OppositeRouterId')
def set_OppositeRouterId(self, OppositeRouterId): # String
self.add_query_param('OppositeRouterId', OppositeRouterId)
def get_Role(self): # String
return self.get_query_params().get('Role')
def set_Role(self, Role): # String
self.add_query_param('Role', Role)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckTargetIp(self): # String
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self, HealthCheckTargetIp): # String
self.add_query_param('HealthCheckTargetIp', HealthCheckTargetIp)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_FastLinkMode(self): # Boolean
return self.get_query_params().get('FastLinkMode')
def set_FastLinkMode(self, FastLinkMode): # Boolean
self.add_query_param('FastLinkMode', FastLinkMode)
def get_OppositeInterfaceId(self): # String
return self.get_query_params().get('OppositeInterfaceId')
def set_OppositeInterfaceId(self, OppositeInterfaceId): # String
self.add_query_param('OppositeInterfaceId', OppositeInterfaceId)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_RouterType(self): # String
return self.get_query_params().get('RouterType')
def set_RouterType(self, RouterType): # String
self.add_query_param('RouterType', RouterType)
def get_HealthCheckSourceIp(self): # String
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self, HealthCheckSourceIp): # String
self.add_query_param('HealthCheckSourceIp', HealthCheckSourceIp)
def get_RouterId(self): # String
return self.get_query_params().get('RouterId')
def set_RouterId(self, RouterId): # String
self.add_query_param('RouterId', RouterId)
| null |
1,896 |
######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""Functionality for generating box parameters."""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = [
"boxTypes",
"generateBoxParameters",
"cubic",
"rhombicDodecahedronSquare",
"rhombicDodecahedronHexagon",
"truncatedOctahedron",
]
from sire.legacy.Maths import Vector as _Vector
from sire.legacy.Vol import TriclinicBox as _TriclinicBox
from ..Types import Angle as _Angle
from ..Types import Length as _Length
def generateBoxParameters(box_type, image_distance):
"""
Generate parameters for the named box type with specified image distance.
Parameters
----------
box_type : str
The name of the box type. Run BioSimSpace.Box.boxTypes() to get a
list of the supported boxes.
image_distance : :class:`Length <BioSimSpace.Types.Length>`
The image distance.
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
angles : [:class:`Angle <BioSimSpace.Types.Angle>`]
The box vector angles: yz, xz, and xy.
"""
if not isinstance(box_type, str):
raise TypeError("'box_type' must be of type 'str'")
else:
# Strip whitespace and convert to lower case.
box_type = box_type.replace(" ", "").lower()
if box_type not in _box_types_lower:
raise ValueError("Supported box types are: %s" % boxTypes())
return _box_types_dict[box_type](image_distance)
def cubic(image_distance):
"""
Generate parameters for a cubic box.
Parameters
----------
image_distance : :class:`Length <BioSimSpace.Types.Length>`
The image distance.
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
angles : [:class:`Angle <BioSimSpace.Types.Angle>`]
The box vector angles: yz, xz, and xy.
"""
# Validate arguments.
if not isinstance(image_distance, _Length):
raise TypeError("'image_distance' must be of type 'BioSimSpace.Types.Length'.")
if image_distance.value() <= 0:
raise ValueError("'image_distance' must be greater than zero.")
box = 3 * [image_distance]
angles = 3 * [_Angle(90, "degrees")]
return box, angles
def METHOD_NAME(image_distance):
"""
Generate parameters for a square rhombic dodecahedron.
Parameters
----------
image_distance : :class:`Length <BioSimSpace.Types.Length>`
The image distance.
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
angles : [:class:`Angle <BioSimSpace.Types.Angle>`]
The box vector angles: yz, xz, and xy.
"""
# Validate arguments.
if not isinstance(image_distance, _Length):
raise TypeError("'image_distance' must be of type 'BioSimSpace.Types.Length'.")
if image_distance.value() <= 0:
raise ValueError("'image_distance' must be greater than zero.")
# Create the triclinic box.
triclinic_box = _TriclinicBox.METHOD_NAME(
image_distance.angstroms().value()
)
return _get_box_parameters(triclinic_box)
def rhombicDodecahedronHexagon(image_distance):
"""
Generate parameters for a hexagonal rhombic dodecahedron.
Parameters
----------
image_distance : :class:`Length <BioSimSpace.Types.Length>`
The image distance.
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
angles : [:class:`Angle <BioSimSpace.Types.Angle>`]
The box vector angles: yz, xz, and xy.
"""
# Validate arguments.
if not isinstance(image_distance, _Length):
raise TypeError("'image_distance' must be of type 'BioSimSpace.Types.Length'.")
if image_distance.value() <= 0:
raise ValueError("'image_distance' must be greater than zero.")
# Create the triclinic box.
triclinic_box = _TriclinicBox.rhombicDodecahedronHexagon(
image_distance.angstroms().value()
)
return _get_box_parameters(triclinic_box)
def truncatedOctahedron(image_distance):
"""
Generate parameters for a truncated octahedron.
Parameters
----------
image_distance : :class:`Length <BioSimSpace.Types.Length>`
The image distance.
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
angles : [:class:`Angle <BioSimSpace.Types.Angle>`]
The box vector angles: yz, xz, and xy.
"""
# Validate arguments.
if not isinstance(image_distance, _Length):
raise TypeError("'image_distance' must be of type 'BioSimSpace.Types.Length'.")
if image_distance.value() <= 0:
raise ValueError("'image_distance' must be greater than zero.")
# Create the triclinic box.
triclinic_box = _TriclinicBox.truncatedOctahedron(
image_distance.angstroms().value()
)
return _get_box_parameters(triclinic_box)
def _get_box_parameters(triclinic_box):
"""
Internal helper function to get parameters for the passed triclinic box.
Parameters
----------
triclinic_box : :class `TriclinicBox <Sire.Vol.TriclinicBox>`
Returns
-------
box : [:class:`Length <BioSimSpace.Types.Length>`]
The box vector magnitudes.
"""
box = [
_Length(triclinic_box.vector0().magnitude(), "angstrom"),
_Length(triclinic_box.vector1().magnitude(), "angstrom"),
_Length(triclinic_box.vector2().magnitude(), "angstrom"),
]
angles = [
_Angle(
_Vector.angle(triclinic_box.vector1(), triclinic_box.vector2()).value(),
"radians",
).degrees(),
_Angle(
_Vector.angle(triclinic_box.vector0(), triclinic_box.vector2()).value(),
"radians",
).degrees(),
_Angle(
_Vector.angle(triclinic_box.vector0(), triclinic_box.vector1()).value(),
"radians",
).degrees(),
]
return box, angles
# Create a list of the box type names.
# This needs to come after all of the box functions.
_box_types = [] # List of box types (actual names).
_box_types_lower = [] # List of lower case names.
_box_types_dict = {} # Mapping between lower case names and functions.
import sys as _sys
_namespace = _sys.modules[__name__]
for _var in dir():
if _var[0] != "_" and _var[0].upper() != "G":
_box_types.append(_var)
_box_types_lower.append(_var.lower())
_box_types_dict[_var.lower()] = getattr(_namespace, _var)
del _namespace
del _sys
del _var
def boxTypes():
"""
Return a list of the supported box types.
Returns
-------
box_types_fields : [str]
A list of the supported box types.
"""
return _box_types
| null |
1,897 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateElasticityAssuranceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateElasticityAssurance','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def METHOD_NAME(self): # String
return self.get_query_params().get('PrivatePoolOptions.MatchCriteria')
def set_PrivatePoolOptionsMatchCriteria(self, PrivatePoolOptionsMatchCriteria): # String
self.add_query_param('PrivatePoolOptions.MatchCriteria', PrivatePoolOptionsMatchCriteria)
def get_InstanceTypes(self): # RepeatList
return self.get_query_params().get('InstanceType')
def set_InstanceTypes(self, InstanceType): # RepeatList
for depth1 in range(len(InstanceType)):
self.add_query_param('InstanceType.' + str(depth1 + 1), InstanceType[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_PrivatePoolOptionsName(self): # String
return self.get_query_params().get('PrivatePoolOptions.Name')
def set_PrivatePoolOptionsName(self, PrivatePoolOptionsName): # String
self.add_query_param('PrivatePoolOptions.Name', PrivatePoolOptionsName)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_AssuranceTimes(self): # String
return self.get_query_params().get('AssuranceTimes')
def set_AssuranceTimes(self, AssuranceTimes): # String
self.add_query_param('AssuranceTimes', AssuranceTimes)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceCpuCoreCount(self): # Integer
return self.get_query_params().get('InstanceCpuCoreCount')
def set_InstanceCpuCoreCount(self, InstanceCpuCoreCount): # Integer
self.add_query_param('InstanceCpuCoreCount', InstanceCpuCoreCount)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_ZoneIds(self): # RepeatList
return self.get_query_params().get('ZoneId')
def set_ZoneIds(self, ZoneId): # RepeatList
for depth1 in range(len(ZoneId)):
self.add_query_param('ZoneId.' + str(depth1 + 1), ZoneId[depth1])
def get_InstanceAmount(self): # Integer
return self.get_query_params().get('InstanceAmount')
def set_InstanceAmount(self, InstanceAmount): # Integer
self.add_query_param('InstanceAmount', InstanceAmount)
| null |
1,898 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class AuthorizeSecurityGroupEgressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'AuthorizeSecurityGroupEgress','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NicType(self): # String
return self.get_query_params().get('NicType')
def set_NicType(self, NicType): # String
self.add_query_param('NicType', NicType)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SourcePortRange(self): # String
return self.get_query_params().get('SourcePortRange')
def set_SourcePortRange(self, SourcePortRange): # String
self.add_query_param('SourcePortRange', SourcePortRange)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_DestPrefixListId(self): # String
return self.get_query_params().get('DestPrefixListId')
def set_DestPrefixListId(self, DestPrefixListId): # String
self.add_query_param('DestPrefixListId', DestPrefixListId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Permissions(self): # Array
return self.get_query_params().get('Permissions')
def set_Permissions(self, Permissions): # Array
for index1, value1 in enumerate(Permissions):
if value1.get('Policy') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.Policy', value1.get('Policy'))
if value1.get('Priority') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.Priority', value1.get('Priority'))
if value1.get('IpProtocol') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.IpProtocol', value1.get('IpProtocol'))
if value1.get('DestCidrIp') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.DestCidrIp', value1.get('DestCidrIp'))
if value1.get('Ipv6DestCidrIp') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.Ipv6DestCidrIp', value1.get('Ipv6DestCidrIp'))
if value1.get('DestGroupId') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.DestGroupId', value1.get('DestGroupId'))
if value1.get('DestPrefixListId') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.DestPrefixListId', value1.get('DestPrefixListId'))
if value1.get('PortRange') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.PortRange', value1.get('PortRange'))
if value1.get('SourceCidrIp') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.SourceCidrIp', value1.get('SourceCidrIp'))
if value1.get('Ipv6SourceCidrIp') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.Ipv6SourceCidrIp', value1.get('Ipv6SourceCidrIp'))
if value1.get('SourcePortRange') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.SourcePortRange', value1.get('SourcePortRange'))
if value1.get('DestGroupOwnerAccount') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.DestGroupOwnerAccount', value1.get('DestGroupOwnerAccount'))
if value1.get('DestGroupOwnerId') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.DestGroupOwnerId', value1.get('DestGroupOwnerId'))
if value1.get('NicType') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.NicType', value1.get('NicType'))
if value1.get('Description') is not None:
self.add_query_param('Permissions.' + str(index1 + 1) + '.Description', value1.get('Description'))
def get_Policy(self): # String
return self.get_query_params().get('Policy')
def set_Policy(self, Policy): # String
self.add_query_param('Policy', Policy)
def get_Ipv6DestCidrIp(self): # String
return self.get_query_params().get('Ipv6DestCidrIp')
def set_Ipv6DestCidrIp(self, Ipv6DestCidrIp): # String
self.add_query_param('Ipv6DestCidrIp', Ipv6DestCidrIp)
def get_Ipv6SourceCidrIp(self): # String
return self.get_query_params().get('Ipv6SourceCidrIp')
def set_Ipv6SourceCidrIp(self, Ipv6SourceCidrIp): # String
self.add_query_param('Ipv6SourceCidrIp', Ipv6SourceCidrIp)
def METHOD_NAME(self): # String
return self.get_query_params().get('PortRange')
def set_PortRange(self, PortRange): # String
self.add_query_param('PortRange', PortRange)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SourceCidrIp(self): # String
return self.get_query_params().get('SourceCidrIp')
def set_SourceCidrIp(self, SourceCidrIp): # String
self.add_query_param('SourceCidrIp', SourceCidrIp)
def get_DestGroupId(self): # String
return self.get_query_params().get('DestGroupId')
def set_DestGroupId(self, DestGroupId): # String
self.add_query_param('DestGroupId', DestGroupId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Priority(self): # String
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # String
self.add_query_param('Priority', Priority)
def get_DestGroupOwnerAccount(self): # String
return self.get_query_params().get('DestGroupOwnerAccount')
def set_DestGroupOwnerAccount(self, DestGroupOwnerAccount): # String
self.add_query_param('DestGroupOwnerAccount', DestGroupOwnerAccount)
def get_DestCidrIp(self): # String
return self.get_query_params().get('DestCidrIp')
def set_DestCidrIp(self, DestCidrIp): # String
self.add_query_param('DestCidrIp', DestCidrIp)
def get_DestGroupOwnerId(self): # Long
return self.get_query_params().get('DestGroupOwnerId')
def set_DestGroupOwnerId(self, DestGroupOwnerId): # Long
self.add_query_param('DestGroupOwnerId', DestGroupOwnerId)
| null |
1,899 |
import re
from typing import (
Optional,
Union,
)
from lxml.etree import XMLSyntaxError
from galaxy.tool_util.verify import asserts
from galaxy.util import (
asbool,
parse_xml_string,
unicodify,
)
def assert_is_valid_xml(output: str) -> None:
"""Simple assertion that just verifies the specified output
is valid XML."""
try:
parse_xml_string(output)
except XMLSyntaxError as e:
raise AssertionError(f"Expected valid XML, but could not parse output. {unicodify(e)}")
def assert_has_element_with_path(output: str, path: str, negate: Union[bool, str] = False) -> None:
"""Asserts the specified output has at least one XML element with a
path matching the specified path argument. Valid paths are the
simplified subsets of XPath implemented by lxml.etree;
https://lxml.de/xpathxslt.html for more information."""
assert_xml_element(output, path, negate=negate)
def assert_has_n_elements_with_path(
output: str,
path: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts the specified output has exactly n elements matching the
path specified."""
assert_xml_element(output, path, n=n, delta=delta, min=min, max=max, negate=negate)
def assert_element_text_matches(output: str, path: str, expression: str, negate: Union[bool, str] = False) -> None:
"""Asserts the text of the first element matching the specified
path matches the specified regular expression."""
sub = {"tag": "has_text_matching", "attributes": {"expression": expression, "negate": negate}}
assert_xml_element(output, path, asserts.verify_assertions, [sub])
def METHOD_NAME(output: str, path: str, text: str, negate: Union[bool, str] = False) -> None:
"""Asserts the text of the first element matching the specified
path matches exactly the specified text."""
assert_element_text_matches(output, path, re.escape(text) + "$", negate=negate)
def assert_attribute_matches(
output: str, path: str, attribute, expression: str, negate: Union[bool, str] = False
) -> None:
"""Asserts the specified attribute of the first element matching
the specified path matches the specified regular expression."""
sub = {"tag": "has_text_matching", "attributes": {"expression": expression, "negate": negate}}
assert_xml_element(output, path, asserts.verify_assertions, [sub], attribute=attribute)
def assert_attribute_is(output: str, path: str, attribute: str, text, negate: Union[bool, str] = False) -> None:
"""Asserts the specified attribute of the first element matching
the specified path matches exactly the specified text."""
assert_attribute_matches(output, path, attribute, re.escape(text) + "$", negate=negate)
def assert_element_text(
output: str, path: str, verify_assertions_function, children, negate: Union[bool, str] = False
) -> None:
"""Recursively checks the specified assertions against the text of
the first element matching the specified path."""
assert_xml_element(output, path, verify_assertions_function, children, negate=negate)
def assert_xml_element(
output: str,
path: str,
verify_assertions_function=None,
children=None,
attribute: Optional[str] = None,
all: Union[bool, str] = False,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""
Check if path occurs in the xml. If n and delta or min and max are given
also the number of occurences is checked.
If there are any sub assertions then check them against
- the element's text if attribute is None
- the content of the attribute
If all is True then the sub assertions are checked for all occurences.
"""
children = children or []
all = asbool(all)
# assert that path is in output (the specified number of times)
xml = parse_xml_string(output)
asserts._util._assert_presence_number(
xml,
path,
n,
delta,
min,
max,
negate,
lambda x, p: x.find(p) is not None,
lambda x, p: len(x.findall(p)),
"{expected} path '{text}' in xml",
"{expected} {n}+-{delta} occurrences of path '{text}' in xml",
"{expected} that the number of occurences of path '{text}' in xml is in [{min}:{max}]",
)
# check sub-assertions
if len(children) == 0 or verify_assertions_function is None:
return
for occ in xml.findall(path):
if attribute is None or attribute == "":
content = occ.text
else:
content = occ.attrib[attribute]
try:
verify_assertions_function(content, children)
except AssertionError as e:
if attribute is not None and attribute != "":
raise AssertionError(f"Attribute '{attribute}' on element with path '{path}': {str(e)}")
else:
raise AssertionError(f"Text of element with path '{path}': {str(e)}")
if not all:
break
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.