id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
1,700 |
#!/usr/bin/python
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
import unittest, types, os, sys, glob, re
from feedvalidator.logging import Message,SelfDoesntMatchLocation,MissingSelf
from feedvalidator import compatibility
from feedvalidator.formatter.application_test import Formatter
class TestCase(unittest.TestCase):
def failIfNoMessage(self, theList=[]):
filterFunc = compatibility.AA
events = filterFunc(theList)
output = Formatter(events)
for e in events:
if not output.format(e):
raise self.failureException('could not contruct message for %s' % e)
def failUnlessContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are no instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
failure=(msg or 'no %s instances in %s' % (theClass.__name__, repr(theList)))
for item in theList:
if issubclass(item.__class__, theClass):
if not params: return
for k, v in list(params.items()):
if str(item.params[k]) != v:
failure=("%s.%s value was %s, expected %s" %
(theClass.__name__, k, item.params[k], v))
break
else:
return
raise self.failureException(failure)
def failIfContainsInstanceOf(self, theClass, params, theList, msg=None):
"""Fail if there are instances of theClass in theList with given params"""
self.failIfNoMessage(theList)
for item in theList:
if theClass==Message and isinstance(item,SelfDoesntMatchLocation):
continue
if theClass==Message and isinstance(item,MissingSelf):
continue
if issubclass(item.__class__, theClass):
if not params:
raise self.failureException(msg or 'unexpected %s' % (item.__class__.__name__))
allmatch = 1
for k, v in list(params.items()):
if item.params[k] != v:
allmatch = 0
if allmatch:
raise self.failureException("unexpected %s.%s with a value of %s" % \
(theClass.__name__, k, v))
desc_re = re.compile("<!--\s*Description:\s*(.*?)\s*Expect:\s*(!?)(\w*)(?:{(.*?)})?\s*-->")
validome_re = re.compile("<!--\s*Description:\s*(.*?)\s*Message:\s*(!?)(\w*).*?\s*-->", re.S)
def getDescription(xmlfile):
"""Extract description and exception from XML file
The deal here is that each test case is an XML file which contains
not only a possibly invalid RSS feed but also the description of the
test, i.e. the exception that we would expect the RSS validator to
raise (or not) when it validates the feed. The expected exception and
the human-readable description are placed into an XML comment like this:
<!--
Description: channel must include title
Expect: MissingTitle
-->
"""
with open(xmlfile, encoding='utf-8', errors='replace') as stream:
xmldoc = stream.read()
search_results = desc_re.search(xmldoc)
if search_results:
description, cond, excName, plist = list(search_results.groups())
else:
search_results = validome_re.search(xmldoc)
if search_results:
plist = ''
description, cond, excName = list(search_results.groups())
excName = excName.capitalize()
if excName=='Valid': cond,excName = '!', 'Message'
else:
raise RuntimeError("can't parse %s" % xmlfile)
if cond == "":
method = TestCase.failUnlessContainsInstanceOf
else:
method = TestCase.failIfContainsInstanceOf
params = {}
if plist:
for entry in plist.split(','):
name,value = entry.lstrip().split(':',1)
params[name] = value
exc = getattr(feedvalidator, excName)
description = xmlfile + ": " + description
return method, description, params, exc
def buildTestCase(xmlfile, xmlBase, description, method, exc, params):
"""factory to create functions which validate `xmlfile`
the returned function asserts that validating `xmlfile` (an XML file)
will return a list of exceptions that include an instance of
`exc` (an Exception class)
"""
def func(self, xmlfile=xmlfile, exc=exc, params=params):
with open(xmlfile, 'rb') as stream:
xmldoc = stream.read()
loggedEvents = feedvalidator.validateString(xmldoc, fallback='US-ASCII', base=xmlBase)['loggedEvents']
method(self, exc, params, loggedEvents)
func.__doc__ = description
return func
def METHOD_NAME():
curdir = os.path.dirname(os.path.abspath(__file__))
basedir = os.path.split(curdir)[0]
for xmlfile in sys.argv[1:] or (glob.glob(os.path.join(basedir, 'testcases', '**', '**', '*.xml')) + glob.glob(os.path.join(basedir, 'testcases', 'opml', '**', '*.opml'))):
method, description, params, exc = getDescription(xmlfile)
xmlBase = os.path.abspath(xmlfile).replace(basedir,"http://www.feedvalidator.org")
testName = 'test_' + xmlBase.replace(os.path.sep, "/")
testFunc = buildTestCase(xmlfile, xmlBase, description, method, exc, params)
instanceMethod = types.MethodType(testFunc, TestCase)
setattr(TestCase, testName, instanceMethod)
return unittest.TestLoader().loadTestsFromTestCase(TestCase)
if __name__ == '__main__':
suite = METHOD_NAME()
unittest.main(argv=sys.argv[:1])
| null |
1,701 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmarthosting.endpoint import endpoint_data
class ListManagedHostsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'SmartHosting', '2020-08-01', 'ListManagedHosts','smarthosting')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_HostType(self):
return self.get_query_params().get('HostType')
def set_HostType(self,HostType):
self.add_query_param('HostType',HostType)
def get_Mode(self):
return self.get_query_params().get('Mode')
def set_Mode(self,Mode):
self.add_query_param('Mode',Mode)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_NextToken(self):
return self.get_query_params().get('NextToken')
def set_NextToken(self,NextToken):
self.add_query_param('NextToken',NextToken)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ManagedHostIds(self):
return self.get_query_params().get('ManagedHostIds')
def set_ManagedHostIds(self, ManagedHostIds):
for depth1 in range(len(ManagedHostIds)):
if ManagedHostIds[depth1] is not None:
self.add_query_param('ManagedHostId.' + str(depth1 + 1) , ManagedHostIds[depth1])
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ManagedPrivateSpaceId(self):
return self.get_query_params().get('ManagedPrivateSpaceId')
def set_ManagedPrivateSpaceId(self,ManagedPrivateSpaceId):
self.add_query_param('ManagedPrivateSpaceId',ManagedPrivateSpaceId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_MaxResults(self):
return self.get_query_params().get('MaxResults')
def set_MaxResults(self,MaxResults):
self.add_query_param('MaxResults',MaxResults)
def get_ManagedHostName(self):
return self.get_query_params().get('ManagedHostName')
def set_ManagedHostName(self,ManagedHostName):
self.add_query_param('ManagedHostName',ManagedHostName)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status
| null |
1,702 |
# Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import requests
from autoscaler.exception import HeapsterElasticHTTPError
from autoscaler.model import Node
class ElasticSearchClient:
def __init__(self, protocol, host, port):
self._elastic_protocol = protocol
self._elastic_host = host
self._elastic_port = port
self._headers = {
'Content-Type': 'application/json'
}
def get_utilization(self, node: Node, from_timestamp: float, to_timestamp: float):
return self.METHOD_NAME(node, from_timestamp, to_timestamp), \
self._get_memory_utilization(node, from_timestamp, to_timestamp)
def METHOD_NAME(self, node, from_timestamp, to_timestamp):
return self._extract_cpu_utilization(self._request(self._request_cpu_utilization(
node_name=node.name, from_timestamp=from_timestamp, to_timestamp=to_timestamp)))
def _request_cpu_utilization(self, node_name: str, from_timestamp: float, to_timestamp: float):
return self._request_utilization(node_name, 'CpuMetricsTimestamp', from_timestamp, to_timestamp, {
"avg_cpu_utilization": {
"avg": {
"field": "Metrics.cpu/node_utilization.value"
}
},
"avg_cpu_capacity": {
"avg": {
"field": "Metrics.cpu/node_capacity.value"
}
}
})
def _extract_cpu_utilization(self, response):
capacity = response.get('aggregations', {}).get('avg_cpu_capacity', {}).get('value') or 0
capacity = capacity / 1000
utilization = response.get('aggregations', {}).get('avg_cpu_utilization', {}).get('value') or 0
utilization = utilization * capacity
return int(float(utilization) / float(capacity) * 100) if capacity else None
def _get_memory_utilization(self, node, from_timestamp, to_timestamp):
return self._extract_memory_utilization(self._request(self._request_memory_utilization(
node_name=node.name, from_timestamp=from_timestamp, to_timestamp=to_timestamp)))
def _request_memory_utilization(self, node_name, from_timestamp, to_timestamp):
return self._request_utilization(node_name, 'MemoryMetricsTimestamp', from_timestamp, to_timestamp, {
"avg_memory_utilization": {
"avg": {
"field": "Metrics.memory/working_set.value"
}
},
"avg_memory_capacity": {
"avg": {
"field": "Metrics.memory/node_capacity.value"
}
}
})
def _extract_memory_utilization(self, response):
utilization = response.get('aggregations', {}).get('avg_memory_utilization', {}).get('value') or 0
capacity = response.get('aggregations', {}).get('avg_memory_capacity', {}).get('value') or 0
return int(float(utilization) / float(capacity) * 100) if capacity else None
def _request_utilization(self, node_name, metric_timestamp_name, from_timestamp, to_timestamp, aggregations):
from_timestamp = int(from_timestamp) * 1000
to_timestamp = int(to_timestamp) * 1000
return json.dumps({
"size": 0,
"query": {
"bool": {
"filter": [
{
"terms": {
"MetricsTags.nodename.raw": [
node_name
],
"boost": 1
}
},
{
"term": {
"MetricsTags.type": {
"value": "node",
"boost": 1
}
}
},
{
"range": {
metric_timestamp_name: {
"from": from_timestamp,
"to": to_timestamp,
"include_lower": True,
"include_upper": True,
"boost": 1
}
}
}
],
"disable_coord": False,
"adjust_pure_negative": True,
"boost": 1
}
},
"aggregations": aggregations
})
def _request(self, data):
indices = 'heapster-' + datetime.datetime.now().strftime('%Y.%m.%d')
url = f'{self._elastic_protocol}://{self._elastic_host}:{self._elastic_port}/{indices}/_search'
response = requests.request('GET', url, headers=self._headers, data=data)
if response.status_code != 200:
logging.warning('Unexpected response HTTP status code %s.', response.status_code)
raise HeapsterElasticHTTPError(response.status_code)
return response.json() or {}
| null |
1,703 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknlb.endpoint import endpoint_data
class CreateLoadBalancerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Nlb', '2022-04-30', 'CreateLoadBalancer','nlb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_ModificationProtectionConfig(self): # Struct
return self.get_body_params().get('ModificationProtectionConfig')
def set_ModificationProtectionConfig(self, ModificationProtectionConfig): # Struct
if ModificationProtectionConfig.get('Status') is not None:
self.add_body_params('ModificationProtectionConfig.Status', ModificationProtectionConfig.get('Status'))
if ModificationProtectionConfig.get('Reason') is not None:
self.add_body_params('ModificationProtectionConfig.Reason', ModificationProtectionConfig.get('Reason'))
def get_LoadBalancerBillingConfig(self): # Struct
return self.get_body_params().get('LoadBalancerBillingConfig')
def set_LoadBalancerBillingConfig(self, LoadBalancerBillingConfig): # Struct
if LoadBalancerBillingConfig.get('PayType') is not None:
self.add_body_params('LoadBalancerBillingConfig.PayType', LoadBalancerBillingConfig.get('PayType'))
def get_DeletionProtectionConfig(self): # Struct
return self.get_body_params().get('DeletionProtectionConfig')
def set_DeletionProtectionConfig(self, DeletionProtectionConfig): # Struct
if DeletionProtectionConfig.get('Enabled') is not None:
self.add_body_params('DeletionProtectionConfig.Enabled', DeletionProtectionConfig.get('Enabled'))
if DeletionProtectionConfig.get('Reason') is not None:
self.add_body_params('DeletionProtectionConfig.Reason', DeletionProtectionConfig.get('Reason'))
def get_AddressIpVersion(self): # String
return self.get_body_params().get('AddressIpVersion')
def set_AddressIpVersion(self, AddressIpVersion): # String
self.add_body_params('AddressIpVersion', AddressIpVersion)
def get_ResourceGroupId(self): # String
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_LoadBalancerName(self): # String
return self.get_body_params().get('LoadBalancerName')
def METHOD_NAME(self, LoadBalancerName): # String
self.add_body_params('LoadBalancerName', LoadBalancerName)
def get_AddressType(self): # String
return self.get_body_params().get('AddressType')
def set_AddressType(self, AddressType): # String
self.add_body_params('AddressType', AddressType)
def get_Tags(self): # RepeatList
return self.get_body_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_BandwidthPackageId(self): # String
return self.get_body_params().get('BandwidthPackageId')
def set_BandwidthPackageId(self, BandwidthPackageId): # String
self.add_body_params('BandwidthPackageId', BandwidthPackageId)
def get_DryRun(self): # Boolean
return self.get_body_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_body_params('DryRun', DryRun)
def get_ZoneMappings(self): # Array
return self.get_body_params().get('ZoneMappings')
def set_ZoneMappings(self, ZoneMappings): # Array
for index1, value1 in enumerate(ZoneMappings):
if value1.get('VSwitchId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.VSwitchId', value1.get('VSwitchId'))
if value1.get('ZoneId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.ZoneId', value1.get('ZoneId'))
if value1.get('PrivateIPv4Address') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.PrivateIPv4Address', value1.get('PrivateIPv4Address'))
if value1.get('AllocationId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.AllocationId', value1.get('AllocationId'))
def get_LoadBalancerType(self): # String
return self.get_body_params().get('LoadBalancerType')
def set_LoadBalancerType(self, LoadBalancerType): # String
self.add_body_params('LoadBalancerType', LoadBalancerType)
def get_VpcId(self): # String
return self.get_body_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_body_params('VpcId', VpcId)
| null |
1,704 |
# **************************************************************************
# *
# * Authors: J.M. De la Rosa Trevin ([email protected])
# * Oier Lauzirika Zarrabeitia ([email protected])
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# **************************************************************************
"""
This module implement the wrappers around
visualization program.
"""
from pyworkflow.viewer import (ProtocolViewer, DESKTOP_TKINTER, WEB_DJANGO)
from pyworkflow.protocol.params import Form, Line, LabelParam, IntParam
from pyworkflow.protocol.params import GE
from pwem.viewers.showj import *
from pwem.viewers import TableView, ObjectView
from pwem.objects import SetOfClasses
from xmipp3.protocols.protocol_consensus_classes import XmippProtConsensusClasses
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial
import scipy.cluster
class XmippConsensusClassesViewer(ProtocolViewer):
""" Visualization of results from the consensus classes 3D protocol
"""
_label = 'viewer consensus classes'
_targets = [XmippProtConsensusClasses]
_environments = [DESKTOP_TKINTER, WEB_DJANGO]
def __init__(self, **kwargs):
ProtocolViewer.__init__(self, **kwargs)
def _defineParams(self, form: Form):
form.addSection(label='Classes')
form.addParam('visualizeClasses', IntParam,
validators=[GE(1)], default=1,
label='Classes' )
form.addSection(label='Graphs')
form.addParam('visualizeDendrogram', LabelParam,
label='Dendrogram' )
form.addParam('visualizeCostFunction', LabelParam,
label='Cost function' )
def _getVisualizeDict(self):
return {
'visualizeClasses': self._visualizeClasses,
'visualizeDendrogram': self._visualizeDendrogram,
'visualizeCostFunction': self._visualizeCostFunction,
}
# --------------------------- UTILS functions ------------------------------
def _getLinkageMatrix(self) -> np.ndarray:
return self.protocol._readLinkageMatrix()
def METHOD_NAME(self) -> dict:
return self.protocol._readElbows()
def _getMergedIntersections(self, size) -> SetOfClasses:
return self.protocol._obtainMergedIntersections(size)
def _visualizeClasses(self, param=None):
count = self.visualizeClasses.get()
classes = self._getMergedIntersections(count)
return self._showSetOfClasses3D(classes)
def _visualizeDendrogram(self, param=None):
linkage = self._getLinkageMatrix()
elbows = self.METHOD_NAME()
labels = np.arange(1, len(linkage)+2)
y = linkage[:,2]
fig, ax = plt.subplots()
scipy.cluster.hierarchy.dendrogram(linkage, ax=ax, labels=labels)
# Plot the elbows
for key, value in elbows.items():
index = len(y) - value
label = key + ': ' + str(value)
ax.axhline((y[index] + y[index+1])/2, label=label, color='black', linestyle='--')
ax.legend()
ax.set_ylabel('cost')
ax.set_xlabel('classId')
ax.set_title('Dendrogram')
return [fig]
def _visualizeCostFunction(self, param=None):
linkage = self._getLinkageMatrix()
elbows = self.METHOD_NAME()
y = linkage[:,2]
x = np.arange(len(y), 0, -1)
fig, ax = plt.subplots()
ax.plot(x, y)
# Plot the elbows
for key, value in elbows.items():
index = len(x) - value
label = key + ': ' + str(value)
ax.scatter([x[index]], [y[index]], label=label)
ax.legend()
ax.set_ylabel('cost')
ax.set_xlabel('class count')
ax.set_title('Cost function')
return [fig]
def _showSetOfClasses3D(self, classes):
labels = 'enabled id _size _representative._filename _xmipp_classIntersectionSizePValue _xmipp_classIntersectionRelativeSizePValue'
labelRender = '_representative._filename'
return [ObjectView( self._project, classes.strId(), classes.getFileName(),
viewParams={ORDER: labels,
VISIBLE: labels,
RENDER: labelRender,
SORT_BY: '_size desc',
MODE: MODE_MD})]
| null |
1,705 |
"""
This class is very experimental and probably not up to date and needs to be refurbished.
If it works, you can watch replays with it.
"""
# pylint: disable=W0201,W0212
from __future__ import annotations
from typing import TYPE_CHECKING, List, Union
from sc2.bot_ai_internal import BotAIInternal
from sc2.data import Alert, Result
from sc2.game_data import GameData
from sc2.ids.ability_id import AbilityId
from sc2.ids.upgrade_id import UpgradeId
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
if TYPE_CHECKING:
from sc2.METHOD_NAME import Client
from sc2.game_info import GameInfo
class ObserverAI(BotAIInternal):
"""Base class for bots."""
@property
def time(self) -> float:
""" Returns time in seconds, assumes the game is played on 'faster' """
return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)
@property
def time_formatted(self) -> str:
""" Returns time as string in min:sec format """
t = self.time
return f"{int(t // 60):02}:{int(t % 60):02}"
@property
def game_info(self) -> GameInfo:
""" See game_info.py """
return self._game_info
@property
def game_data(self) -> GameData:
""" See game_data.py """
return self._game_data
@property
def METHOD_NAME(self) -> Client:
""" See client.py """
return self._client
def alert(self, alert_code: Alert) -> bool:
"""
Check if alert is triggered in the current step.
Possible alerts are listed here https://github.com/Blizzard/s2client-proto/blob/e38efed74c03bec90f74b330ea1adda9215e655f/s2clientprotocol/sc2api.proto#L679-L702
Example use:
from sc2.data import Alert
if self.alert(Alert.AddOnComplete):
print("Addon Complete")
Alert codes::
AlertError
AddOnComplete
BuildingComplete
BuildingUnderAttack
LarvaHatched
MergeComplete
MineralsExhausted
MorphComplete
MothershipComplete
MULEExpired
NuclearLaunchDetected
NukeComplete
NydusWormDetected
ResearchComplete
TrainError
TrainUnitComplete
TrainWorkerComplete
TransformationComplete
UnitUnderAttack
UpgradeComplete
VespeneExhausted
WarpInComplete
:param alert_code:
"""
assert isinstance(alert_code, Alert), f"alert_code {alert_code} is no Alert"
return alert_code.value in self.state.alerts
@property
def start_location(self) -> Point2:
"""
Returns the spawn location of the bot, using the position of the first created townhall.
This will be None if the bot is run on an arcade or custom map that does not feature townhalls at game start.
"""
return self.game_info.player_start_location
@property
def enemy_start_locations(self) -> List[Point2]:
"""Possible start locations for enemies."""
return self.game_info.start_locations
async def get_available_abilities(
self, units: Union[List[Unit], Units], ignore_resource_requirements: bool = False
) -> List[List[AbilityId]]:
"""Returns available abilities of one or more units. Right now only checks cooldown, energy cost, and whether the ability has been researched.
Examples::
units_abilities = await self.get_available_abilities(self.units)
or::
units_abilities = await self.get_available_abilities([self.units.random])
:param units:
:param ignore_resource_requirements:"""
return await self.METHOD_NAME.query_available_abilities(units, ignore_resource_requirements)
async def on_unit_destroyed(self, unit_tag: int):
"""
Override this in your bot class.
This will event will be called when a unit (or structure, friendly or enemy) dies.
For enemy units, this only works if the enemy unit was in vision on death.
:param unit_tag:
"""
async def on_unit_created(self, unit: Unit):
"""Override this in your bot class. This function is called when a unit is created.
:param unit:"""
async def on_building_construction_started(self, unit: Unit):
"""
Override this in your bot class.
This function is called when a building construction has started.
:param unit:
"""
async def on_building_construction_complete(self, unit: Unit):
"""
Override this in your bot class. This function is called when a building
construction is completed.
:param unit:
"""
async def on_upgrade_complete(self, upgrade: UpgradeId):
"""
Override this in your bot class. This function is called with the upgrade id of an upgrade that was not finished last step and is now.
:param upgrade:
"""
async def on_start(self):
"""
Override this in your bot class. This function is called after "on_start".
At this point, game_data, game_info and the first iteration of game_state (self.state) are available.
"""
async def on_step(self, iteration: int):
"""
You need to implement this function!
Override this in your bot class.
This function is called on every game step (looped in realtime mode).
:param iteration:
"""
raise NotImplementedError
async def on_end(self, game_result: Result):
"""Override this in your bot class. This function is called at the end of a game.
:param game_result:"""
| null |
1,706 |
# -*- coding: utf-8 -*-
"""**Lightly Train:** Train a self-supervised model from the command-line.
This module contains the entrypoint for the **lightly-train**
command-line interface.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os.path
from typing import List
import hydra
import yaml
from lightly.cli._helpers import fix_hydra_arguments, fix_input_path
from lightly.data import LightlyDataset
from lightly.utils.bounding_box import BoundingBox
from lightly.utils.cropping.crop_image_by_bounding_boxes import (
crop_dataset_by_bounding_boxes_and_save,
)
from lightly.utils.cropping.read_yolo_label_file import read_yolo_label_file
from lightly.utils.hipify import bcolors
def _crop_cli(cfg, is_cli_call=True):
input_dir = cfg["input_dir"]
if input_dir and is_cli_call:
input_dir = fix_input_path(input_dir)
output_dir = cfg["output_dir"]
if output_dir and is_cli_call:
output_dir = fix_input_path(output_dir)
label_dir = cfg["label_dir"]
if label_dir and is_cli_call:
label_dir = fix_input_path(label_dir)
label_names_file = cfg["label_names_file"]
if label_names_file and len(label_names_file) > 0:
if is_cli_call:
label_names_file = fix_input_path(label_names_file)
with open(label_names_file, "r") as file:
label_names_file_dict = yaml.full_load(file)
class_names = label_names_file_dict["names"]
else:
class_names = None
dataset = LightlyDataset(input_dir)
class_indices_list_list: List[List[int]] = []
bounding_boxes_list_list: List[List[BoundingBox]] = []
# YOLO-Specific
for filename_image in dataset.get_filenames():
filepath_image_base, image_extension = os.path.splitext(filename_image)
filepath_label = os.path.join(label_dir, filename_image).replace(
image_extension, ".txt"
)
class_indices, bounding_boxes = read_yolo_label_file(
filepath_label, float(cfg["crop_padding"])
)
class_indices_list_list.append(class_indices)
bounding_boxes_list_list.append(bounding_boxes)
cropped_images_list_list = crop_dataset_by_bounding_boxes_and_save(
dataset,
output_dir,
bounding_boxes_list_list,
class_indices_list_list,
class_names,
)
print(f"Cropped images are stored at: {bcolors.OKBLUE}{output_dir}{bcolors.ENDC}")
return cropped_images_list_list
@hydra.main(**fix_hydra_arguments(config_path="config", config_name="config"))
def crop_cli(cfg):
"""Crops images into one sub-image for each object.
Args:
cfg:
The default configs are loaded from the config file.
To overwrite them please see the section on the config file
(.config.config.yaml).
Command-Line Args:
input_dir:
Path to the input directory where images are stored.
labels_dir:
Path to the directory where the labels are stored. There must be one label file for each image.
The label file must have the same name as the image file, but the extension .txt.
For example, img_123.txt for img_123.jpg. The label file must be in YOLO format.
output_dir:
Path to the directory where the cropped images are stored. They are stored in one directory per input image.
crop_padding: Optional
The additonal padding about the bounding box. This makes the crops include the context of the object.
The padding is relative and added to the width and height.
label_names_file: Optional
A yaml file including the names of the classes. If it is given, the filenames of the cropped images include
the class names instead of the class id. This file is usually included when having a dataset in yolo format.
Example contents of such a label_names_file.yaml: "names: ['class_name_a', 'class_name_b']"
Examples:
>>> # Crop images and set the crop to be 20% around the bounding box
>>> lightly-crop input_dir=data/images label_dir=data/labels output_dir=data/cropped_images crop_padding=0.2
>>> # Crop images and use the class names in the filename
>>> lightly-crop input_dir=data/images label_dir=data/labels output_dir=data/cropped_images label_names_file=data/data.yaml
"""
return _crop_cli(cfg)
def METHOD_NAME():
crop_cli()
| null |
1,707 |
## \example core/linear_and_harmonic_scores.py
# An example for setting a linear or harmonic score between two particles
# using either a point distance (between particle centers) or a sphere
# distance (between particle surfaces).
#
# Note: this example relies on matplotlib for plotting the scores,
# but it can be easily modified to just print the scores.
#
# Author: Barak Raveh, 2022/12/13
import IMP
import IMP.algebra
import IMP.core
try:
import matplotlib.pyplot
except ImportError:
matplotlib = None
import numpy
import sys
IMP.setup_from_argv(sys.argv, "linear or harmonic score example")
DEFAULT_RADIUS = 2.0 # radius of particles
LINEAR_OFFSET = 4.0 # distance at which the function is zero (note this is not the Y-axis intercept!)
LINEAR_SLOPE = 3.0 # slope of linear score (= force in kcal/mol/A)
HARMONIC_MEAN = 3 # distance at which the function is minimal/maximal
HARMONIC_K = 2.0 # quadratic coefficient
def create_particle(m, radius = DEFAULT_RADIUS):
p = IMP.Particle(m)
s = IMP.algebra.Sphere3D([0,0,0], radius)
IMP.core.XYZR.setup_particle(p, s)
return p
def create_linear_point_pair_score():
''' slope*x + intercept for point distance '''
linear_functor = IMP.core.Linear(LINEAR_OFFSET, LINEAR_SLOPE)
return IMP.core.DistancePairScore(linear_functor)
def METHOD_NAME():
''' 0.5*k*(x-mean)^2 for point distance '''
harmonic_functor = IMP.core.Harmonic(HARMONIC_MEAN, HARMONIC_K)
return IMP.core.DistancePairScore(harmonic_functor)
def create_linear_sphere_pair_score():
''' slope*x + intercept for sphere distance '''
linear_functor = IMP.core.Linear(LINEAR_OFFSET, LINEAR_SLOPE)
return IMP.core.SphereDistancePairScore(linear_functor)
def create_harmonic_sphere_pair_score():
''' 0.5*k*(x-mean)^2 for sphere distance '''
harmonic_functor = IMP.core.Harmonic(HARMONIC_MEAN, HARMONIC_K)
return IMP.core.SphereDistancePairScore(harmonic_functor)
def create_model(pair_score):
m = IMP.Model()
particles = [create_particle(m) for x in range(2)]
restraint = IMP.core.PairRestraint(m, pair_score, particles)
xyzrs = [IMP.core.XYZR(p) for p in particles]
return m, xyzrs, restraint
def plot_score(pair_score, caption,
xmin = -15.0, xmax = 15.0, xstep = 0.01):
'''
Plots a pair_score between two particles, one particle
being at [0,0,0] and the other particle being at [x,0,0]
for x in the closed interval [xmin:xstep:xmax]
'''
m, xyzrs, restraint = create_model(pair_score)
xyzrs[0].set_coordinates([0,0,0])
X = numpy.arange(xmin, xmax+0.1*xstep, xstep)
Y = 0.0*X
for i,x in enumerate(X):
xyzrs[0].set_coordinates([x,0,0])
Y[i] = restraint.get_score()
if not matplotlib:
print("Not showing plot; matplotlib is not installed "
"or could not be imported")
elif IMP.get_is_quick_test():
print("Not showing plot, as we are running test cases")
else:
matplotlib.pyplot.plot(X,Y,'-')
matplotlib.pyplot.title(caption)
matplotlib.pyplot.xlabel(r"$X_2$ [$\AA$]")
matplotlib.pyplot.ylabel("Energy [$kcal \cdot mol^{-1}$]")
matplotlib.pyplot.gca().spines['bottom'].set_position(('data', 0))
matplotlib.pyplot.show()
if __name__ == "__main__":
# NOTE: sphere distance is not distance!
linear_str = "{:.1f}*(dist-{:.1f})".format(LINEAR_SLOPE, LINEAR_OFFSET)
harmonic_str = "{:.1f}*(dist-{:.1f})^2".format(HARMONIC_K, HARMONIC_MEAN)
plot_score(create_linear_point_pair_score(),
caption="Linear point distance\n{}"
.format(linear_str))
plot_score(create_linear_sphere_pair_score(),
caption="Linear sphere distance (R={:.1f} A)\n{}"
.format(DEFAULT_RADIUS, linear_str))
plot_score(METHOD_NAME(),
caption="Harmonic point distance\n{}"
.format(harmonic_str))
plot_score(create_harmonic_sphere_pair_score(),
caption="Harmonic sphere distance (R={:.1f} A)\n{}"
.format(DEFAULT_RADIUS, harmonic_str))
| null |
1,708 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeGroupedMaliciousFilesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeGroupedMaliciousFiles')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RepoId(self): # String
return self.get_query_params().get('RepoId')
def set_RepoId(self, RepoId): # String
self.add_query_param('RepoId', RepoId)
def get_FuzzyMaliciousName(self): # String
return self.get_query_params().get('FuzzyMaliciousName')
def set_FuzzyMaliciousName(self, FuzzyMaliciousName): # String
self.add_query_param('FuzzyMaliciousName', FuzzyMaliciousName)
def get_RepoNamespace(self): # String
return self.get_query_params().get('RepoNamespace')
def set_RepoNamespace(self, RepoNamespace): # String
self.add_query_param('RepoNamespace', RepoNamespace)
def get_ImageDigest(self): # String
return self.get_query_params().get('ImageDigest')
def set_ImageDigest(self, ImageDigest): # String
self.add_query_param('ImageDigest', ImageDigest)
def get_ScanRanges(self): # RepeatList
return self.get_query_params().get('ScanRange')
def set_ScanRanges(self, ScanRange): # RepeatList
for depth1 in range(len(ScanRange)):
self.add_query_param('ScanRange.' + str(depth1 + 1), ScanRange[depth1])
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_ImageTag(self): # String
return self.get_query_params().get('ImageTag')
def set_ImageTag(self, ImageTag): # String
self.add_query_param('ImageTag', ImageTag)
def get_MaliciousMd5(self): # String
return self.get_query_params().get('MaliciousMd5')
def set_MaliciousMd5(self, MaliciousMd5): # String
self.add_query_param('MaliciousMd5', MaliciousMd5)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RepoName(self): # String
return self.get_query_params().get('RepoName')
def set_RepoName(self, RepoName): # String
self.add_query_param('RepoName', RepoName)
def get_RepoInstanceId(self): # String
return self.get_query_params().get('RepoInstanceId')
def set_RepoInstanceId(self, RepoInstanceId): # String
self.add_query_param('RepoInstanceId', RepoInstanceId)
def METHOD_NAME(self): # String
return self.get_query_params().get('ImageLayer')
def set_ImageLayer(self, ImageLayer): # String
self.add_query_param('ImageLayer', ImageLayer)
def get_Levels(self): # String
return self.get_query_params().get('Levels')
def set_Levels(self, Levels): # String
self.add_query_param('Levels', Levels)
def get_RepoRegionId(self): # String
return self.get_query_params().get('RepoRegionId')
def set_RepoRegionId(self, RepoRegionId): # String
self.add_query_param('RepoRegionId', RepoRegionId)
| null |
1,709 |
# Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
"""
Tests of cocotb.test functionality
* expect_error
* expect_fail
* timeout
"""
from collections.abc import Coroutine
import pytest
from common import MyBaseException, MyException
import cocotb
from cocotb.triggers import NullTrigger, Timer
@cocotb.test(expect_error=NameError)
async def test_error(dut):
"""Error in the test"""
await Timer(100, "ns")
fail # noqa
@cocotb.test()
async def test_tests_are_tests(dut):
"""
Test that things annotated with cocotb.test are tests
"""
assert isinstance(test_tests_are_tests, cocotb.test)
# just to be sure...
@cocotb.test(expect_fail=True)
async def test_async_test_can_fail(dut):
assert False
@cocotb.test()
async def test_immediate_test(dut):
"""Test that tests can return immediately"""
return
@cocotb.test(expect_fail=True)
async def test_assertion_is_failure(dut):
assert False
@cocotb.test(expect_error=MyException)
async def test_expect_particular_exception(dut):
raise MyException()
@cocotb.test(expect_error=(MyException, ValueError))
async def test_expect_exception_list(dut):
raise MyException()
@cocotb.test(
expect_error=cocotb.result.SimTimeoutError, timeout_time=1, timeout_unit="ns"
)
async def test_timeout_testdec_fail(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=100, timeout_unit="ns")
async def test_timeout_testdec_pass(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=10, timeout_unit="ns")
async def test_timeout_testdec_simultaneous(dut):
try:
await cocotb.triggers.with_timeout(
Timer(1, "ns"), timeout_time=1, timeout_unit="ns"
)
except cocotb.result.SimTimeoutError:
pass
else:
assert False, "Expected a Timeout"
# Whether this test fails or passes depends on the behavior of the
# scheduler, simulator, and the implementation of the timeout function.
# CAUTION: THIS MAY CHANGE
# these tests should run in definition order, not lexicographic order
last_ordered_test = None
@cocotb.test()
async def test_ordering_3(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 3
assert val is None
@cocotb.test()
async def test_ordering_2(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 2
assert val == 3
@cocotb.test()
async def test_ordering_1(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 1
assert val == 2
@cocotb.test()
class TestClass(Coroutine):
def __init__(self, dut):
self._coro = self.run(dut)
async def run(self, dut):
pass
def send(self, value):
self._coro.send(value)
def throw(self, exception):
self._coro.throw(exception)
def __await__(self):
yield from self._coro.__await__()
@cocotb.test()
async def test_empty_docstring(dut) -> None:
""""""
@cocotb.test(expect_fail=True)
async def test_pytest_raises_fail(dut):
with pytest.raises(AssertionError):
assert True
@cocotb.test(expect_fail=True)
async def test_pytest_warns_fail(dut):
def test_func():
pass
with pytest.warns(RuntimeWarning):
test_func()
@cocotb.test(expect_fail=True)
async def test_pytest_deprecated_call_fail(dut):
def test_func():
pass
with pytest.deprecated_call():
test_func()
@cocotb.test(expect_fail=True)
async def test_pytest_raises_fail_in_task(dut):
async def test_func():
with pytest.raises(AssertionError):
assert True
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_fail=True)
async def METHOD_NAME(dut):
def inner_func():
pass
async def test_func():
with pytest.warns(RuntimeWarning):
inner_func()
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_fail=True)
async def test_pytest_deprecated_call_fail_in_task(dut):
def inner_func():
pass
async def test_func():
with pytest.deprecated_call():
inner_func()
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_error=MyBaseException)
async def test_base_exception_expect_fail(dut):
raise MyBaseException
@cocotb.test(expect_error=MyBaseException)
async def test_base_exception_in_task_expect_fail(dut):
async def test_func():
raise MyBaseException
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test
async def test_without_parenthesis(dut):
pass
| null |
1,710 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from NeoVintageous.tests import unittest
from NeoVintageous.nv.plugin_abolish import _coerce_to_camelcase
from NeoVintageous.nv.plugin_abolish import _coerce_to_dashcase
from NeoVintageous.nv.plugin_abolish import _coerce_to_dotcase
from NeoVintageous.nv.plugin_abolish import _coerce_to_mixedcase
from NeoVintageous.nv.plugin_abolish import _coerce_to_snakecase
from NeoVintageous.nv.plugin_abolish import _coerce_to_spacecase
from NeoVintageous.nv.plugin_abolish import _coerce_to_titlecase
from NeoVintageous.nv.plugin_abolish import _coerce_to_uppercase
class TestAbolish(unittest.TestCase):
def test_coerce_to_mixedcase(self):
self.assertEquals('', _coerce_to_mixedcase(''))
self.assertEquals('M', _coerce_to_mixedcase('m'))
self.assertEquals('Mixed', _coerce_to_mixedcase('Mixed'))
self.assertEquals('Mixed', _coerce_to_mixedcase('mixed'))
self.assertEquals('Mixed', _coerce_to_mixedcase('MIXED'))
self.assertEquals('MixedCase', _coerce_to_mixedcase('MixedCase'))
def test_coerce_to_camelcase(self):
self.assertEquals('', _coerce_to_camelcase(''))
self.assertEquals('c', _coerce_to_camelcase('c'))
self.assertEquals('c', _coerce_to_camelcase('C'))
self.assertEquals('camel', _coerce_to_camelcase('camel'))
self.assertEquals('camel', _coerce_to_camelcase('Camel'))
self.assertEquals('camel', _coerce_to_camelcase('CAMEL'))
self.assertEquals('camelCase', _coerce_to_camelcase('camelCase'))
self.assertEquals('camelCamelCase', _coerce_to_camelcase('CamelCamelCase'))
self.assertEquals('snakeCase', _coerce_to_camelcase('snake_case'))
self.assertEquals('snakeSnakeCase', _coerce_to_camelcase('snake_snake_case'))
self.assertEquals('mixedCase', _coerce_to_camelcase('MixedCase'))
self.assertEquals('upperSnakeCase', _coerce_to_camelcase('UPPER_SNAKE_CASE'))
def METHOD_NAME(self):
self.assertEquals('', _coerce_to_snakecase(''))
self.assertEquals('s', _coerce_to_snakecase('S'))
self.assertEquals('snake', _coerce_to_snakecase('snake'))
self.assertEquals('snake', _coerce_to_snakecase('SNAKE'))
self.assertEquals('snake', _coerce_to_snakecase('Snake'))
self.assertEquals('snake_case', _coerce_to_snakecase('snake_case'))
self.assertEquals('snake_case', _coerce_to_snakecase('SnakeCase'))
self.assertEquals('snake_snake_case', _coerce_to_snakecase('SnakeSnakeCase'))
self.assertEquals('snake2_snake2_case', _coerce_to_snakecase('Snake2Snake2Case'))
self.assertEquals('get_http_response_code', _coerce_to_snakecase('getHTTPResponseCode'))
self.assertEquals('get2_http_response_code', _coerce_to_snakecase('get2HTTPResponseCode'))
self.assertEquals('http_response_code', _coerce_to_snakecase('HTTPResponseCode'))
self.assertEquals('http_response_code_xyz', _coerce_to_snakecase('HTTPResponseCodeXYZ'))
self.assertEquals('html_tidy', _coerce_to_snakecase('HTMLTidy'))
self.assertEquals('html_tidy_generator', _coerce_to_snakecase('HTMLTidyGenerator'))
self.assertEquals('free_bsd', _coerce_to_snakecase('FreeBSD'))
self.assertEquals('html', _coerce_to_snakecase('HTML'))
def test_coerce_to_uppercase(self):
self.assertEquals('', _coerce_to_uppercase(''))
self.assertEquals('U', _coerce_to_uppercase('u'))
self.assertEquals('UPPERCASE', _coerce_to_uppercase('UPPERCASE'))
self.assertEquals('UPPERCASE', _coerce_to_uppercase('uppercase'))
self.assertEquals('SNAKE_CASE', _coerce_to_uppercase('snake_case'))
self.assertEquals('CAMEL_CASE', _coerce_to_uppercase('camelCase'))
self.assertEquals('TITLE_CASE', _coerce_to_uppercase('titleCase'))
self.assertEquals('UPPER_UPPER_CASE', _coerce_to_uppercase('UpperUpperCase'))
self.assertEquals('UPPER_UPPER_CASE', _coerce_to_uppercase('upper_upperCase'))
self.assertEquals('UPPER_UPPER_CASE', _coerce_to_uppercase('UPPER_upperCase'))
def test_coerce_to_dashcase(self):
self.assertEquals('', _coerce_to_dashcase(''))
self.assertEquals('dash', _coerce_to_dashcase('dash'))
self.assertEquals('dash-case', _coerce_to_dashcase('dash-case'))
self.assertEquals('snake-case', _coerce_to_dashcase('snake_case'))
self.assertEquals('camel-case', _coerce_to_dashcase('camelCase'))
self.assertEquals('title-case', _coerce_to_dashcase('TitleCase'))
def test_coerce_to_spacecase(self):
self.assertEquals('', _coerce_to_spacecase(''))
self.assertEquals('space', _coerce_to_spacecase('space'))
self.assertEquals('space case', _coerce_to_spacecase('space case'))
self.assertEquals('snake case', _coerce_to_spacecase('snake_case'))
self.assertEquals('camel case', _coerce_to_spacecase('camelCase'))
self.assertEquals('title case', _coerce_to_spacecase('TitleCase'))
def test_coerce_to_dotcase(self):
self.assertEquals('', _coerce_to_dotcase(''))
self.assertEquals('dot', _coerce_to_dotcase('dot'))
self.assertEquals('dot.case', _coerce_to_dotcase('dot.case'))
self.assertEquals('snake.case', _coerce_to_dotcase('snake_case'))
self.assertEquals('camel.case', _coerce_to_dotcase('camelCase'))
self.assertEquals('title.case', _coerce_to_dotcase('TitleCase'))
def test_coerce_to_titlecase(self):
self.assertEquals('', _coerce_to_titlecase(''))
self.assertEquals('Title', _coerce_to_titlecase('Title'))
self.assertEquals('Title', _coerce_to_titlecase('title'))
self.assertEquals('Title', _coerce_to_titlecase('TITLE'))
self.assertEquals('Title Case', _coerce_to_titlecase('TitleCase'))
self.assertEquals('Snake Case', _coerce_to_titlecase('snake_case'))
self.assertEquals('Camel Case', _coerce_to_titlecase('camelCase'))
| null |
1,711 |
# -*- coding: utf-8 -*-
import logging
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from owncloud import Client as OwnCloudClient
from addons.base import exceptions
from addons.owncloud import settings
from addons.owncloud.serializer import OwnCloudSerializer
from addons.owncloud.settings import DEFAULT_HOSTS, USE_SSL
from osf.models.external import BasicAuthProviderMixin
from website.util import api_v2_url
logger = logging.getLogger(__name__)
class OwncloudFileNode(BaseFileNode):
_provider = 'owncloud'
class OwncloudFolder(OwncloudFileNode, Folder):
pass
class OwncloudFile(OwncloudFileNode, File):
@property
def _hashes(self):
# ownCloud API doesn't provide this metadata
return None
class OwnCloudProvider(BasicAuthProviderMixin):
"""An alternative to `ExternalProvider` not tied to OAuth"""
name = 'ownCloud'
short_name = 'owncloud'
def __init__(self, account=None, host=None, username=None, password=None):
if username:
username = username.lower()
return super(OwnCloudProvider, self).__init__(account=account, host=host, username=username, password=password)
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = OwnCloudProvider
serializer = OwnCloudSerializer
def to_json(self, user):
ret = super(UserSettings, self).to_json(user)
ret['hosts'] = DEFAULT_HOSTS
return ret
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = OwnCloudProvider
serializer = OwnCloudSerializer
folder_id = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
if self._api is None:
self._api = OwnCloudProvider(self.external_account)
return self._api
@property
def folder_path(self):
return self.folder_id
@property
def folder_name(self):
return self.folder_id
def set_folder(self, folder, auth=None):
if folder == '/ (Full ownCloud)':
folder = '/'
self.folder_id = folder
self.save()
self.nodelogger.log(action='folder_selected', save=True)
def fetch_folder_name(self):
if self.folder_id == '/':
return '/ (Full ownCloud)'
return self.folder_id.strip('/').split('/')[-1]
def clear_settings(self):
self.folder_id = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
if add_log:
self.nodelogger.log(action='node_deauthorized')
self.clear_auth() # Also performs a .save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
provider = OwnCloudProvider(self.external_account)
return {
'host': provider.host,
'username': provider.username,
'password': provider.password
}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('ownCloud is not configured')
return {
'folder': self.folder_id,
'verify_ssl': USE_SSL
}
def METHOD_NAME(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file',
path=metadata['path'], provider='owncloud')
self.owner.add_log(
'owncloud_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'path': metadata['materialized'].lstrip('/'),
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
def get_folders(self, **kwargs):
path = kwargs.get('path')
if path is None:
return [{
'addon': 'owncloud',
'path': '/',
'kind': 'folder',
'id': '/',
'name': '/ (Full ownCloud)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/owncloud/folders/'.format(self.owner._id),
params={
'path': '/',
})
}
}]
provider = OwnCloudProvider(account=self.external_account)
c = OwnCloudClient(provider.host, verify_certs=settings.USE_SSL)
c.login(provider.username, provider.password)
ret = []
for item in c.list(path):
if item.file_type is 'dir':
ret.append({
'addon': 'owncloud',
'path': item.path,
'kind': 'folder',
'id': item.path,
'name': item.path.strip('/').split('/')[-1],
'urls': {
'folders': api_v2_url('nodes/{}/addons/owncloud/folders/'.format(self.owner._id),
params={
'path': item.path,
})
}
})
return ret
| null |
1,712 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class ModifyScheduledTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'ModifyScheduledTask','ess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ScheduledAction(self): # String
return self.get_query_params().get('ScheduledAction')
def set_ScheduledAction(self, ScheduledAction): # String
self.add_query_param('ScheduledAction', ScheduledAction)
def get_MaxValue(self): # Integer
return self.get_query_params().get('MaxValue')
def set_MaxValue(self, MaxValue): # Integer
self.add_query_param('MaxValue', MaxValue)
def get_ScalingGroupId(self): # String
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self, ScalingGroupId): # String
self.add_query_param('ScalingGroupId', ScalingGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_RecurrenceEndTime(self): # String
return self.get_query_params().get('RecurrenceEndTime')
def set_RecurrenceEndTime(self, RecurrenceEndTime): # String
self.add_query_param('RecurrenceEndTime', RecurrenceEndTime)
def METHOD_NAME(self): # String
return self.get_query_params().get('LaunchTime')
def set_LaunchTime(self, LaunchTime): # String
self.add_query_param('LaunchTime', LaunchTime)
def get_DesiredCapacity(self): # Integer
return self.get_query_params().get('DesiredCapacity')
def set_DesiredCapacity(self, DesiredCapacity): # Integer
self.add_query_param('DesiredCapacity', DesiredCapacity)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_RecurrenceValue(self): # String
return self.get_query_params().get('RecurrenceValue')
def set_RecurrenceValue(self, RecurrenceValue): # String
self.add_query_param('RecurrenceValue', RecurrenceValue)
def get_LaunchExpirationTime(self): # Integer
return self.get_query_params().get('LaunchExpirationTime')
def set_LaunchExpirationTime(self, LaunchExpirationTime): # Integer
self.add_query_param('LaunchExpirationTime', LaunchExpirationTime)
def get_MinValue(self): # Integer
return self.get_query_params().get('MinValue')
def set_MinValue(self, MinValue): # Integer
self.add_query_param('MinValue', MinValue)
def get_ScheduledTaskName(self): # String
return self.get_query_params().get('ScheduledTaskName')
def set_ScheduledTaskName(self, ScheduledTaskName): # String
self.add_query_param('ScheduledTaskName', ScheduledTaskName)
def get_TaskEnabled(self): # Boolean
return self.get_query_params().get('TaskEnabled')
def set_TaskEnabled(self, TaskEnabled): # Boolean
self.add_query_param('TaskEnabled', TaskEnabled)
def get_ScheduledTaskId(self): # String
return self.get_query_params().get('ScheduledTaskId')
def set_ScheduledTaskId(self, ScheduledTaskId): # String
self.add_query_param('ScheduledTaskId', ScheduledTaskId)
def get_RecurrenceType(self): # String
return self.get_query_params().get('RecurrenceType')
def set_RecurrenceType(self, RecurrenceType): # String
self.add_query_param('RecurrenceType', RecurrenceType)
| null |
1,713 |
import logging
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
from .config import vi_conf
from flare import loadProjectConf
loadProjectConf(vi_conf) #prepatch flare.conf
from flare import html5
from flare.popup import Alert
from flare import network
from flare.icons import SvgIcon
from . import utils
from . import sidebarwidgets
from . import exception
from .login import LoginScreen
from .admin import AdminScreen
from .config import conf
from flare.i18n import buildTranslations,translate
from flare import i18n
class Application(html5.Div):
def __init__(self):
super(Application, self).__init__()
self.addClass("vi-application")
conf["theApp"] = self
# Main Screens
self.loginScreen = None
self.adminScreen = None
try:
self.isFramed = bool(html5.jseval("window.top !== window.self"))
except:
self.isFramed = True
self.startup()
def startup(self, *args, **kwargs):
if conf["core.version"] is None:
network.NetworkService.request(None, "/vi/getVersion",
successHandler=self.METHOD_NAME,
failureHandler=self.startupFailure)
else:
network.NetworkService.request(None, "/vi/config",
successHandler=self.getConfigSuccess,
failureHandler=self.startupFailure)
def METHOD_NAME(self, req):
conf["core.version"] = network.NetworkService.decode(req)
if (conf["core.version"][0] == 3 # enforce ViUR3
and ((conf["core.version"][1] < conf["core.version.min"][1])
or (conf["core.version"][1] >= conf["core.version.max"][1]))
):
params = {
"core.version": ".".join(str(x) for x in conf["core.version"]),
"vi.version": ".".join(str(x) for x in conf["vi.version"]),
"core.version.min": ".".join(str(x) for x in conf["core.version.min"]),
"core.version.max": ".".join(str(x) for x in conf["core.version.max"]),
}
Alert(
translate("ViUR-core (v{{core.version}}) is incompatible to this Vi (v{{vi.version}}). The ViUR-core version musst be greater or equal version v{{core.version.min}} and lower than v{{core.version.max}}.", **params)
+ "\n\n" + translate("There may be a lack on functionality.")
+ "\n" + translate("Please update either your ViUR-core or Vi!"),
title=translate("Version mismatch"),
okCallback=self.startup,
okLabel=translate("Continue at your own risk")
)
return
elif conf["core.version"][0] == 2:
Alert(
translate("Please update your ViUR-core to ViUR 3"),
title=translate("Legacy ViUR-Version"),
closeable=False,
)
return
self.startup()
def getConfigSuccess(self, req):
d = (time.time() - s)
print( "%.5f Sek - Config and Version received" % d )
conf["mainConfig"] = network.NetworkService.decode(req)
if not self.adminScreen:
self.adminScreen = AdminScreen()
sc = (time.time() - s)
print( "%.5f Sek - Screen instantiated" % sc )
self.adminScreen.invoke()
scinv = (time.time() - s)
print( "%.5f Sek - Screen invoked" % scinv )
def startupFailure(self, req, err):
if err in [403, 401]:
self.login()
else:
Alert(
translate("The connection to the server could not be correctly established."),
title=translate("Communication error"),
okCallback=self.startup,
okLabel=translate("Retry")
)
def login(self, logout=False):
if not self.loginScreen:
self.loginScreen = LoginScreen()
if self.adminScreen:
self.adminScreen.reset()
self.adminScreen.hide()
self.loginScreen.invoke(logout=logout)
def admin(self):
if self.loginScreen:
self.loginScreen.hide()
self.startup()
def logout(self):
self.login(logout=True)
def setTitle(self, title = None):
if title:
title = [title]
else:
title = []
addendum = conf.get("vi.name")
if addendum:
title.append(addendum)
html5.document.title = conf["vi.title.delimiter"].join(title)
def setPath(self, path = ""):
hash = html5.window.location.hash
if "?" in hash and not "?" in path:
hash = hash.split("?", 1)[1]
if hash:
hash = "?" + hash
else:
hash = ""
html5.window.location.hash = path + hash
def preloadIcons():
iconList = ["icon-arrow-right",
"icon-save",
"icon-draggable",
"icon-save-file",
"icon-image-file",
"icon-arrow-left",
"icon-cancel",
"icon-file-system",
"icon-add",
"icon-list",
"icon-reload",
"icon-list-item",
"icon-hierarchy",
"icon-edit",
"icon-search",
"icon-clone",
"icon-delete",
"icon-play",
"icon-dashboard",
"icon-logout",
"icon-error",
"icon-error-file",
"icon-time"]
for icon in iconList:
SvgIcon(icon)
def start():
buildTranslations("vi")
# Configure vi as network render prefix
network.NetworkService.prefix = "/vi"
network.NetworkService.host = ""
conf["currentLanguage"] = i18n.getLanguage()
conf["indexeddb"] = utils.indexeddb("vi-cache")
preloadIcons()
# Application
app = Application()
html5.Body().appendChild(app)
s = None
a = None
d = None
sc = None
scinv = None
if __name__ == "vi":
import time
s = time.time()
print("Start App")
start()
a = (time.time() - s)
print( "%.5f Sek - Application instantiated " % a )
| null |
1,714 |
#!/usr/bin/env python3
""" Collects and returns Information on available Apple Silicon SoCs in Apple Macs. """
import typing as T
import os
import psutil
import tensorflow as tf
from lib.utils import FaceswapError
from ._base import _GPUStats
_METAL_INITIALIZED: bool = False
class AppleSiliconStats(_GPUStats):
""" Holds information and statistics about Apple Silicon SoC(s) available on the currently
running Apple system.
Notes
-----
Apple Silicon is a bit different from other backends, as it does not have a dedicated GPU with
it's own dedicated VRAM, rather the RAM is shared with the CPU and GPU. A combination of psutil
and Tensorflow are used to pull as much useful information as possible.
Parameters
----------
log: bool, optional
Whether the class should output information to the logger. There may be occasions where the
logger has not yet been set up when this class is queried. Attempting to log in these
instances will raise an error. If GPU stats are being queried prior to the logger being
available then this parameter should be set to ``False``. Otherwise set to ``True``.
Default: ``True``
"""
def __init__(self, log: bool = True) -> None:
# Following attribute set in :func:``_initialize``
self._tf_devices: list[T.Any] = []
super().__init__(log=log)
def _initialize(self) -> None:
""" Initialize Metal for Apple Silicon SoC(s).
If :attr:`_is_initialized` is ``True`` then this function just returns performing no
action. Otherwise :attr:`is_initialized` is set to ``True`` after successfully
initializing Metal.
"""
if self._is_initialized:
return
self._log("debug", "Initializing Metal for Apple Silicon SoC.")
self._initialize_metal()
self._tf_devices = tf.config.list_physical_devices(device_type="GPU")
super()._initialize()
def _initialize_metal(self) -> None:
""" Initialize Metal on first call to this class and set global
:attr:``_METAL_INITIALIZED`` to ``True``. If Metal has already been initialized then return
performing no action.
"""
global _METAL_INITIALIZED # pylint:disable=global-statement
if _METAL_INITIALIZED:
return
self._log("debug", "Performing first time Apple SoC setup.")
os.environ["DISPLAY"] = ":0"
try:
os.system("open -a XQuartz")
except Exception as err: # pylint:disable=broad-except
self._log("debug", f"Swallowing error opening XQuartz: {str(err)}")
self._test_tensorflow()
_METAL_INITIALIZED = True
def _test_tensorflow(self) -> None:
""" Test that tensorflow can execute correctly.
Raises
------
FaceswapError
If the Tensorflow library could not be successfully initialized
"""
try:
meminfo = tf.config.experimental.get_memory_info('GPU:0')
devices = tf.config.list_logical_devices()
self._log("debug",
f"Tensorflow initialization test: (mem_info: {meminfo}, devices: {devices}")
except RuntimeError as err:
msg = ("An unhandled exception occured initializing the device via Tensorflow "
f"Library. Original error: {str(err)}")
raise FaceswapError(msg) from err
def _get_device_count(self) -> int:
""" Detect the number of SoCs attached to the system.
Returns
-------
int
The total number of SoCs available
"""
retval = len(self._tf_devices)
self._log("debug", f"GPU Device count: {retval}")
return retval
def _get_handles(self) -> list:
""" Obtain the device handles for all available Apple Silicon SoCs.
Notes
-----
Apple SoC does not use handles, so return a list of indices corresponding to found
GPU devices
Returns
-------
list
The list of indices for available Apple Silicon SoCs
"""
handles = list(range(self._device_count))
self._log("debug", f"GPU Handles found: {handles}")
return handles
def _get_driver(self) -> str:
""" Obtain the Apple Silicon driver version currently in use.
Notes
-----
As the SoC is not a discreet GPU it does not technically have a driver version, so just
return `'Not Applicable'` as a string
Returns
-------
str
The current SoC driver version
"""
driver = "Not Applicable"
self._log("debug", f"GPU Driver: {driver}")
return driver
def METHOD_NAME(self) -> list[str]:
""" Obtain the list of names of available Apple Silicon SoC(s) as identified in
:attr:`_handles`.
Returns
-------
list
The list of available Apple Silicon SoC names
"""
names = [d.name for d in self._tf_devices]
self._log("debug", f"GPU Devices: {names}")
return names
def _get_vram(self) -> list[int]:
""" Obtain the VRAM in Megabytes for each available Apple Silicon SoC(s) as identified in
:attr:`_handles`.
Notes
-----
`tf.config.experimental.get_memory_info('GPU:0')` does not work, so uses psutil instead.
The total memory on the system is returned as it is shared between the CPU and the GPU.
There is no dedicated VRAM.
Returns
-------
list
The RAM in Megabytes for each available Apple Silicon SoC
"""
vram = [int((psutil.virtual_memory().total / self._device_count) / (1024 * 1024))
for _ in range(self._device_count)]
self._log("debug", f"SoC RAM: {vram}")
return vram
def _get_free_vram(self) -> list[int]:
""" Obtain the amount of VRAM that is available, in Megabytes, for each available Apple
Silicon SoC.
Returns
-------
list
List of `float`s containing the amount of RAM available, in Megabytes, for each
available SoC as corresponding to the values in :attr:`_handles
"""
vram = [int((psutil.virtual_memory().available / self._device_count) / (1024 * 1024))
for _ in range(self._device_count)]
self._log("debug", f"SoC RAM free: {vram}")
return vram
| null |
1,715 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Endpoint definitions for stack components."""
from typing import List
from uuid import UUID
from fastapi import APIRouter, Depends, Security
from zenml.constants import API, COMPONENT_TYPES, STACK_COMPONENTS, VERSION_1
from zenml.enums import PermissionType, StackComponentType
from zenml.models import (
ComponentFilterModel,
ComponentResponseModel,
ComponentUpdateModel,
)
from zenml.models.page_model import Page
from zenml.zen_server.auth import AuthContext, authorize
from zenml.zen_server.exceptions import error_response
from zenml.zen_server.utils import (
handle_exceptions,
make_dependable,
zen_store,
)
router = APIRouter(
prefix=API + VERSION_1 + STACK_COMPONENTS,
tags=["stack_components"],
responses={401: error_response},
)
types_router = APIRouter(
prefix=API + VERSION_1 + COMPONENT_TYPES,
tags=["stack_components"],
responses={401: error_response},
)
@router.get(
"",
response_model=Page[ComponentResponseModel],
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def list_stack_components(
component_filter_model: ComponentFilterModel = Depends(
make_dependable(ComponentFilterModel)
),
auth_context: AuthContext = Security(
authorize, scopes=[PermissionType.READ]
),
) -> Page[ComponentResponseModel]:
"""Get a list of all stack components for a specific type.
Args:
component_filter_model: Filter model used for pagination, sorting,
filtering
auth_context: Authentication Context
Returns:
List of stack components for a specific type.
"""
component_filter_model.set_scope_user(user_id=auth_context.user.id)
return zen_store().list_stack_components(
component_filter_model=component_filter_model
)
@router.get(
"/{component_id}",
response_model=ComponentResponseModel,
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def get_stack_component(
component_id: UUID,
_: AuthContext = Security(authorize, scopes=[PermissionType.READ]),
) -> ComponentResponseModel:
"""Returns the requested stack component.
Args:
component_id: ID of the stack component.
Returns:
The requested stack component.
"""
return zen_store().get_stack_component(component_id)
@router.put(
"/{component_id}",
response_model=ComponentResponseModel,
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def update_stack_component(
component_id: UUID,
component_update: ComponentUpdateModel,
_: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),
) -> ComponentResponseModel:
"""Updates a stack component.
Args:
component_id: ID of the stack component.
component_update: Stack component to use to update.
Returns:
Updated stack component.
"""
return zen_store().update_stack_component(
component_id=component_id,
component_update=component_update,
)
@router.delete(
"/{component_id}",
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def deregister_stack_component(
component_id: UUID,
_: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),
) -> None:
"""Deletes a stack component.
Args:
component_id: ID of the stack component.
"""
zen_store().delete_stack_component(component_id)
@types_router.get(
"",
response_model=List[str],
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def METHOD_NAME(
_: AuthContext = Security(authorize, scopes=[PermissionType.READ])
) -> List[str]:
"""Get a list of all stack component types.
Returns:
List of stack components.
"""
return StackComponentType.values()
| null |
1,716 |
import io
from wsgiref.validate import InputWrapper
import pytest
import falcon
from falcon import request_helpers
import falcon.request
from falcon.stream import BoundedStream
import falcon.testing as testing
SIZE_1_KB = 1024
@pytest.fixture
def resource():
return testing.SimpleTestResource()
@pytest.fixture
def client():
app = falcon.App()
return testing.TestClient(app)
class TestRequestBody:
def _get_wrapped_stream(self, req):
# Getting wrapped wsgi.input:
stream = req.stream
if isinstance(stream, BoundedStream):
stream = stream.stream
if isinstance(stream, InputWrapper):
stream = stream.input
return stream
def test_empty_body(self, client, resource):
client.app.add_route('/', resource)
client.simulate_request(path='/', body='')
stream = self._get_wrapped_stream(resource.captured_req)
assert stream.tell() == 0
def METHOD_NAME(self, client, resource):
client.app.add_route('/', resource)
expected_body = '.'
client.simulate_request(path='/', body=expected_body)
stream = self._get_wrapped_stream(resource.captured_req)
actual_body = stream.read(1)
assert actual_body == expected_body.encode('utf-8')
assert stream.tell() == 1
def test_tiny_body_overflow(self, client, resource):
client.app.add_route('/', resource)
expected_body = '.'
client.simulate_request(path='/', body=expected_body)
stream = self._get_wrapped_stream(resource.captured_req)
# Read too many bytes; shouldn't block
actual_body = stream.read(len(expected_body) + 1)
assert actual_body == expected_body.encode('utf-8')
def test_read_body(self, client, resource):
client.app.add_route('/', resource)
expected_body = testing.rand_string(SIZE_1_KB // 2, SIZE_1_KB)
expected_len = len(expected_body)
headers = {'Content-Length': str(expected_len)}
client.simulate_request(path='/', body=expected_body, headers=headers)
content_len = resource.captured_req.get_header('content-length')
assert content_len == str(expected_len)
stream = self._get_wrapped_stream(resource.captured_req)
actual_body = stream.read()
assert actual_body == expected_body.encode('utf-8')
stream.seek(0, 2)
assert stream.tell() == expected_len
assert stream.tell() == expected_len
def test_bounded_stream_property_empty_body(self):
"""Test that we can get a bounded stream outside of wsgiref."""
environ = testing.create_environ()
req = falcon.Request(environ)
bounded_stream = req.bounded_stream
# NOTE(kgriffs): Verify that we aren't creating a new object
# each time the property is called. Also ensures branch
# coverage of the property implementation.
assert bounded_stream is req.bounded_stream
data = bounded_stream.read()
assert len(data) == 0
def test_body_stream_wrapper(self):
data = testing.rand_string(SIZE_1_KB // 2, SIZE_1_KB)
expected_body = data.encode('utf-8')
expected_len = len(expected_body)
# NOTE(kgriffs): Append newline char to each line
# to match readlines behavior
expected_lines = [(line + '\n').encode('utf-8') for line in data.split('\n')]
# NOTE(kgriffs): Remove trailing newline to simulate
# what readlines does
expected_lines[-1] = expected_lines[-1][:-1]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.read() == expected_body
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.read(2) == expected_body[0:2]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.read(expected_len + 1) == expected_body
# NOTE(kgriffs): Test that reading past the end does not
# hang, but returns the empty string.
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
for i in range(expected_len + 1):
expected_value = expected_body[i : i + 1] if i < expected_len else b''
assert body.read(1) == expected_value
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readline() == expected_lines[0]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readline(-1) == expected_lines[0]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readline(expected_len + 1) == expected_lines[0]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readlines() == expected_lines
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readlines(-1) == expected_lines
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert body.readlines(expected_len + 1) == expected_lines
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
assert next(body) == expected_lines[0]
stream = io.BytesIO(expected_body)
body = request_helpers.Body(stream, expected_len)
for i, line in enumerate(body):
assert line == expected_lines[i]
def test_request_repr(self):
environ = testing.create_environ()
req = falcon.Request(environ)
_repr = '<%s: %s %r>' % (req.__class__.__name__, req.method, req.url)
assert req.__repr__() == _repr
| null |
1,717 |
"""
Support for L{attrs}.
"""
import ast
import inspect
from typing import Optional, Union
from pydoctor import astbuilder, model, astutils, extensions
import attr
attrs_decorator_signature = inspect.signature(attr.s)
"""Signature of the L{attr.s} class decorator."""
attrib_signature = inspect.signature(attr.ib)
"""Signature of the L{attr.ib} function for defining class attributes."""
def uses_auto_attribs(call: ast.AST, module: model.Module) -> bool:
"""Does the given L{attr.s()} decoration contain C{auto_attribs=True}?
@param call: AST of the call to L{attr.s()}.
This function will assume that L{attr.s()} is called without
verifying that.
@param module: Module that contains the call, used for error reporting.
@return: L{True} if L{True} is passed for C{auto_attribs},
L{False} in all other cases: if C{auto_attribs} is not passed,
if an explicit L{False} is passed or if an error was reported.
"""
if not isinstance(call, ast.Call):
return False
if not astutils.node2fullname(call.func, module) in ('attr.s', 'attr.attrs', 'attr.attributes'):
return False
try:
args = astutils.bind_args(attrs_decorator_signature, call)
except TypeError as ex:
message = str(ex).replace("'", '"')
module.report(
f"Invalid arguments for attr.s(): {message}",
lineno_offset=call.lineno
)
return False
auto_attribs_expr = args.arguments.get('auto_attribs')
if auto_attribs_expr is None:
return False
try:
value = ast.literal_eval(auto_attribs_expr)
except ValueError:
module.report(
'Unable to figure out value for "auto_attribs" argument '
'to attr.s(), maybe too complex',
lineno_offset=call.lineno
)
return False
if not isinstance(value, bool):
module.report(
f'Value for "auto_attribs" argument to attr.s() '
f'has type "{type(value).__name__}", expected "bool"',
lineno_offset=call.lineno
)
return False
return value
def is_attrib(expr: Optional[ast.expr], ctx: model.Documentable) -> bool:
"""Does this expression return an C{attr.ib}?"""
return isinstance(expr, ast.Call) and astutils.node2fullname(expr.func, ctx) in (
'attr.ib', 'attr.attrib', 'attr.attr'
)
def attrib_args(expr: ast.expr, ctx: model.Documentable) -> Optional[inspect.BoundArguments]:
"""Get the arguments passed to an C{attr.ib} definition.
@return: The arguments, or L{None} if C{expr} does not look like
an C{attr.ib} definition or the arguments passed to it are invalid.
"""
if isinstance(expr, ast.Call) and astutils.node2fullname(expr.func, ctx) in (
'attr.ib', 'attr.attrib', 'attr.attr'
):
try:
return astutils.bind_args(attrib_signature, expr)
except TypeError as ex:
message = str(ex).replace("'", '"')
ctx.module.report(
f"Invalid arguments for attr.ib(): {message}",
lineno_offset=expr.lineno
)
return None
def annotation_from_attrib(
self: astbuilder.ModuleVistor,
expr: ast.expr,
ctx: model.Documentable
) -> Optional[ast.expr]:
"""Get the type of an C{attr.ib} definition.
@param expr: The L{ast.Call} expression's AST.
@param ctx: The context in which this expression is evaluated.
@return: A type annotation, or None if the expression is not
an C{attr.ib} definition or contains no type information.
"""
args = attrib_args(expr, ctx)
if args is not None:
typ = args.arguments.get('type')
if typ is not None:
return astutils.unstring_annotation(typ, ctx)
default = args.arguments.get('default')
if default is not None:
return astbuilder._infer_type(default)
return None
class ModuleVisitor(extensions.ModuleVisitorExt):
def visit_ClassDef(self, node:ast.ClassDef) -> None:
"""
Called when a class definition is visited.
"""
cls = self.visitor.builder.current
if not isinstance(cls, model.Class) or cls.name!=node.name:
return
assert isinstance(cls, AttrsClass)
cls.auto_attribs = any(uses_auto_attribs(decnode, cls.module) for decnode in node.decorator_list)
def METHOD_NAME(self, target:str, node: Union[ast.Assign, ast.AnnAssign]) -> None:
cls = self.visitor.builder.current
assert isinstance(cls, AttrsClass)
attr: Optional[model.Documentable] = cls.contents.get(target)
if attr is None:
return
if not isinstance(attr, model.Attribute):
return
annotation = node.annotation if isinstance(node, ast.AnnAssign) else None
if is_attrib(node.value, cls) or (
cls.auto_attribs and \
annotation is not None and \
not astutils.is_using_typing_classvar(annotation, cls)):
attr.kind = model.DocumentableKind.INSTANCE_VARIABLE
if annotation is None and node.value is not None:
attr.annotation = annotation_from_attrib(self.visitor, node.value, cls)
def _handleAttrsAssignment(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
for dottedname in astutils.iterassign(node):
if dottedname and len(dottedname)==1:
# Here, we consider single name assignment only
current = self.visitor.builder.current
if isinstance(current, model.Class):
self.METHOD_NAME(
dottedname[0], node
)
def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
self._handleAttrsAssignment(node)
visit_AnnAssign = visit_Assign
class AttrsClass(extensions.ClassMixin, model.Class):
def setup(self) -> None:
super().setup()
self.auto_attribs: bool = False
"""
L{True} if this class uses the C{auto_attribs} feature of the L{attrs}
library to automatically convert annotated fields into attributes.
"""
def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
r.register_astbuilder_visitor(ModuleVisitor)
r.register_mixin(AttrsClass)
| null |
1,718 |
from pprint import pformat
from urllib.parse import urlencode
from tornado.httputil import (
HTTPHeaders,
parse_cookie,
)
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application
from pcs.daemon import (
ruby_pcsd,
session,
)
from pcs.daemon.app.auth import PCSD_SESSION
USER = "user"
GROUPS = ["group1", "group2"]
PASSWORD = "password"
class RubyPcsdWrapper(ruby_pcsd.Wrapper):
def __init__(self, request_type):
# pylint: disable=super-init-not-called
self.request_type = request_type
self.status_code = 200
self.headers = {"Some": "value"}
self.body = b"Success action"
async def METHOD_NAME(
self,
request_type,
http_request=None,
payload=None,
):
if request_type != self.request_type:
raise AssertionError(
f"Wrong request type: expected '{self.request_type}'"
f" but was {request_type}"
)
return {
"headers": self.headers,
"status": self.status_code,
"body": self.body,
}
class AppTest(AsyncHTTPTestCase):
wrapper = None
# pylint: disable=abstract-method
def get_app(self):
return Application(self.get_routes())
def get_routes(self):
# pylint: disable=no-self-use
return []
def fetch(self, path, raise_error=False, **kwargs):
# pylint: disable=arguments-differ
if "follow_redirects" not in kwargs:
kwargs["follow_redirects"] = False
response = super().fetch(path, raise_error=raise_error, **kwargs)
# "Strict-Transport-Security" header is expected in every response
self.assertTrue(
"Strict-Transport-Security" in response.headers,
f"No 'Strict-Transport-Security' header in response for '{path}'",
)
return response
def post(self, path, body, **kwargs):
kwargs.update(
{
"method": "POST",
"body": urlencode(body),
}
)
return self.fetch(path, **kwargs)
def get(self, path, **kwargs):
return self.fetch(path, **kwargs)
def assert_headers_contains(self, headers: HTTPHeaders, contained: dict):
self.assertTrue(
all(item in headers.get_all() for item in contained.items()),
"Headers does not contain expected headers"
"\n Expected headers:"
f"\n {pformat(contained, indent=6)}"
"\n All headers:"
f"\n {pformat(dict(headers.get_all()), indent=6)}",
)
def assert_wrappers_response(self, response):
self.assertEqual(response.code, self.wrapper.status_code)
self.assert_headers_contains(response.headers, self.wrapper.headers)
self.assertEqual(response.body, self.wrapper.body)
class AppUiTestMixin(AppTest):
def setUp(self):
self.session_storage = session.Storage(lifetime_seconds=10)
super().setUp()
def assert_session_in_response(self, response, sid=None):
self.assertTrue("Set-Cookie" in response.headers)
cookie = parse_cookie(response.headers["Set-Cookie"])
self.assertTrue(PCSD_SESSION, cookie)
if sid:
self.assertEqual(cookie[PCSD_SESSION], sid)
return cookie[PCSD_SESSION]
def fetch(self, path, raise_error=False, **kwargs):
if "sid" in kwargs:
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"]["Cookie"] = f"{PCSD_SESSION}={kwargs['sid']}"
del kwargs["sid"]
if "is_ajax" in kwargs:
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"]["X-Requested-With"] = "XMLHttpRequest"
del kwargs["is_ajax"]
if "follow_redirects" not in kwargs:
kwargs["follow_redirects"] = False
return super().fetch(path, raise_error=raise_error, **kwargs)
def create_login_session(self):
return self.session_storage.login(USER)
def assert_success_response(self, response, expected_body):
self.assertEqual(response.code, 200)
self.assertEqual(response.body.decode(), expected_body)
def assert_unauth_ajax(self, response):
self.assertEqual(response.code, 401)
self.assertEqual(response.body, b'{"notauthorized":"true"}')
| null |
1,719 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class CreateDdrInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'CreateDdrInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBInstanceStorage(self): # Integer
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self, DBInstanceStorage): # Integer
self.add_query_param('DBInstanceStorage', DBInstanceStorage)
def get_SystemDBCharset(self): # String
return self.get_query_params().get('SystemDBCharset')
def set_SystemDBCharset(self, SystemDBCharset): # String
self.add_query_param('SystemDBCharset', SystemDBCharset)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_DBInstanceDescription(self): # String
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self, DBInstanceDescription): # String
self.add_query_param('DBInstanceDescription', DBInstanceDescription)
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_BackupSetId(self): # String
return self.get_query_params().get('BackupSetId')
def set_BackupSetId(self, BackupSetId): # String
self.add_query_param('BackupSetId', BackupSetId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_SecurityIPList(self): # String
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self, SecurityIPList): # String
self.add_query_param('SecurityIPList', SecurityIPList)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PrivateIpAddress(self): # String
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self, PrivateIpAddress): # String
self.add_query_param('PrivateIpAddress', PrivateIpAddress)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def set_InstanceNetworkType(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType)
def get_ConnectionMode(self): # String
return self.get_query_params().get('ConnectionMode')
def set_ConnectionMode(self, ConnectionMode): # String
self.add_query_param('ConnectionMode', ConnectionMode)
def get_SourceDBInstanceName(self): # String
return self.get_query_params().get('SourceDBInstanceName')
def set_SourceDBInstanceName(self, SourceDBInstanceName): # String
self.add_query_param('SourceDBInstanceName', SourceDBInstanceName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def set_Engine(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_DBInstanceStorageType(self): # String
return self.get_query_params().get('DBInstanceStorageType')
def METHOD_NAME(self, DBInstanceStorageType): # String
self.add_query_param('DBInstanceStorageType', DBInstanceStorageType)
def get_DBInstanceNetType(self): # String
return self.get_query_params().get('DBInstanceNetType')
def set_DBInstanceNetType(self, DBInstanceNetType): # String
self.add_query_param('DBInstanceNetType', DBInstanceNetType)
def get_RestoreTime(self): # String
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self, RestoreTime): # String
self.add_query_param('RestoreTime', RestoreTime)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_UsedTime(self): # String
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # String
self.add_query_param('UsedTime', UsedTime)
def get_RestoreType(self): # String
return self.get_query_params().get('RestoreType')
def set_RestoreType(self, RestoreType): # String
self.add_query_param('RestoreType', RestoreType)
def get_VPCId(self): # String
return self.get_query_params().get('VPCId')
def set_VPCId(self, VPCId): # String
self.add_query_param('VPCId', VPCId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
def get_SourceRegion(self): # String
return self.get_query_params().get('SourceRegion')
def set_SourceRegion(self, SourceRegion): # String
self.add_query_param('SourceRegion', SourceRegion)
| null |
1,720 |
# Copyright (C) 2021-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from copy import deepcopy
from random import shuffle
import numpy as np
import openvino.runtime as ov
import pytest
from otx.core.ov.graph.graph import Graph, SortedDict
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class TestSortedDict:
@e2e_pytest_unit
def test(self):
instance = SortedDict("key")
orders = list("abcdefghijklmnopqrstuvwxyz")
cands = list("abcdefghijklmnopqrstuvwxyz")
shuffle(cands)
for cand in cands:
instance[cand] = {"edge": {"key": ord(cand)}}
idx = 0
for key in instance:
assert key == orders[idx]
idx += 1
idx = len(orders) - 1
for key in reversed(instance):
assert key == orders[idx]
idx -= 1
repr(instance.keys())
idx = 0
for key in instance.keys():
assert key == orders[idx]
idx += 1
idx = len(orders) - 1
for key in reversed(instance.keys()):
assert key == orders[idx]
idx -= 1
repr(instance.values())
idx = 0
for value in instance.values():
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
idx = len(orders) - 1
for value in reversed(instance.values()):
assert value["edge"]["key"] == ord(orders[idx])
idx -= 1
repr(instance.values())
idx = 0
for key, value in instance.items():
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
idx = len(orders) - 1
for key, value in reversed(instance.items()):
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx -= 1
instance2 = deepcopy(instance)
idx = 0
for key, value in instance2.items():
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
instance.pop("i")
assert "i" not in instance
assert len(instance) == len(orders) - 1
instance.clear()
assert len(instance) == 0
class TestGraph:
@pytest.fixture(autouse=True)
def setup(self) -> None:
param = ov.opset10.parameter([1, 3, 64, 64], ov.Type.f32, name="in")
constant = ov.opset10.constant(np.array([103.0, 116.0, 123.0]).reshape(1, 3, 1, 1), ov.Type.f32)
node = ov.opset10.subtract(param, constant, "numpy")
constant = ov.opset10.constant(np.random.normal(size=(32, 3, 3, 3)), ov.Type.f32)
node = ov.opset10.convolution(node, constant, [2, 2], [1, 1], [1, 1], [1, 1], "explicit")
constant = ov.opset10.constant(np.random.normal(size=(1, 32, 1, 1)), ov.Type.f32)
node = ov.opset10.add(node, constant, "numpy")
node = ov.opset10.clamp(node, 0, 6)
result = ov.opset10.result(node, name="out")
ov_model = ov.Model([result], [param], "model")
self.graph = Graph.from_ov(ov_model)
assert isinstance(self.graph, Graph)
@e2e_pytest_unit
def test_get_edge_data(self):
nodes = [node for node in self.graph]
assert self.graph.get_edge_data(nodes[0], nodes[-1]) is None
assert self.graph.get_edge_data(nodes[0], nodes[2])
@e2e_pytest_unit
def test_remove_node(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
predecessor = list(self.graph.predecessors(node))[0]
successor = list(self.graph.successors(node))[0]
self.graph.remove_node(node, keep_connect=True)
assert self.graph.get_edge_data(predecessor, successor)
node = self.graph.get_nodes_by_types(["Convolution"])[0]
predecessor = list(self.graph.predecessors(node))[0]
successor = list(self.graph.successors(node))[0]
self.graph.remove_node(node, keep_connect=False)
assert self.graph.get_edge_data(predecessor, successor) is None
@e2e_pytest_unit
def test_replace_node(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
new_node = deepcopy(node)
predecessors = list(self.graph.predecessors(node))
successors = list(self.graph.successors(node))
self.graph.replace_node(node, new_node)
assert node not in self.graph
assert new_node in self.graph
assert predecessors == list(self.graph.predecessors(new_node))
assert successors == list(self.graph.successors(new_node))
@e2e_pytest_unit
def test_add_edge(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
new_node = deepcopy(node)
predecessors = list(self.graph.predecessors(node))
successors = list(self.graph.successors(node))
self.graph.remove_node(node)
for predecessor in predecessors:
assert self.graph.get_edge_data(predecessor, new_node) is None
self.graph.add_edge(predecessor, new_node)
assert self.graph.get_edge_data(predecessor, new_node)
for successor in successors:
assert self.graph.get_edge_data(new_node, successor) is None
self.graph.add_edge(new_node, successor)
assert self.graph.get_edge_data(new_node, successor)
assert new_node in self.graph
@e2e_pytest_unit
def test_get_nodes_by_type_pattern(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
founds = self.graph.get_nodes_by_type_pattern(["Subtract", "Clamp"], node)
for found in founds:
start, end = found
assert start == node
assert start.type == "Subtract"
assert end.type == "Clamp"
@e2e_pytest_unit
def METHOD_NAME(self):
self.graph.remove_normalize_nodes()
assert len(self.graph._normalize_nodes) == 0
@e2e_pytest_unit
def test_topological_sort(self):
assert len(list(self.graph.topological_sort())) == len(self.graph)
@e2e_pytest_unit
def test_clean_up(self):
nodes = self.graph.get_nodes_by_types(["Subtract"])
self.graph.remove_node(nodes[0])
n_nodes = len(self.graph)
self.graph.clean_up()
assert n_nodes > len(self.graph)
| null |
1,721 |
#!/usr/bin/env python
#/*##########################################################################
#
# The PyMca X-Ray Fluorescence Toolkit
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
from __future__ import with_statement
__author__ = "V.A. Sole - ESRF Data Analysis"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
import os
import sys
import re
import numpy
from PyMca5.PyMcaCore import DataObject
SOURCE_TYPE = "EdfFileStack"
class LuciaMap(DataObject.DataObject):
def __init__(self, filename, infofile=None):
DataObject.DataObject.__init__(self)
with open(filename, 'r') as f:
data = f.read()
data.replace("\r\n", "\n")
self.sourceName = [filename]
firstByte = data.index("\n\n")
header = data[0:firstByte]
#get rid of the date
data = data[firstByte:]
#leave only the '----' as separator
data.replace("\r", "")
data.replace("\n", "")
sep = '-'
while sep in data:
sep = sep + '-'
sep = sep[1:]
data = data.split(sep)
if len(data[0]) != len(data[-1]):
if len(data[0]) > 1:
del data[-1]
else:
del data[0]
#get the number of channels
exp = re.compile(r'(-?[0-9]+\.?[0-9]*)')
spectrum = [float(x) for x in exp.findall(data[0])]
self.nChannels = len(spectrum)
self.nSpectra = len(data)
self.nRows = self.nSpectra
#try to get the information
if infofile is None:
infofile = ""
split = filename.split('_')
if len(split) > 1:
for i in range(len(split) - 1):
if i == 0:
infofile = split[i]
else:
infofile += "_" + split[i]
infofile = infofile + "_Infos_" +\
split[-1].replace('.mca', '.dat')
if os.path.exists(infofile):
info = self.METHOD_NAME(infofile)
if ('vwidth' in info) and ('vstep' in info):
vwidth = info['vwidth']
vstep = info['vstep']
if abs(vstep) > 0:
self.nRows = int((vwidth / vstep) + 1)
#fill the header
self.header = header
#arrange as an EDF Stack
self.info = {}
self.__nFiles = 1
self.__nImagesPerFile = 1
#self.nRows = 41
self.nCols = self.nSpectra / self.nRows
self.data = numpy.zeros((self.nRows,
self.nCols,
self.nChannels),
numpy.float32)
n = 0
for i in range(self.nRows):
for j in range(self.nCols):
s = data[n]
spectrum = numpy.array([float(x) for x in exp.findall(s)])
self.data[i, j, :] = spectrum[:]
n = n + 1
shape = self.data.shape
for i in range(len(shape)):
key = 'Dim_%d' % (i + 1,)
self.info[key] = shape[i]
self.info["SourceType"] = SOURCE_TYPE
self.info["SourceName"] = self.sourceName
self.info["Size"] = self.__nFiles * self.__nImagesPerFile
self.info["NumberOfFiles"] = self.__nFiles * 1
self.info["FileIndex"] = 0
self.info["McaCalib"] = [0.0, 1.0, 0.0]
self.info["Channel0"] = 0.0
def METHOD_NAME(self, filename):
'''
This dictionary is to be internally normalized for the time
being no I0 nor dead time
'''
exp = re.compile(r'(-?[0-9]+\.?[0-9]*)')
#read the file in one go to minimize access to disk
with open(filename) as f:
data = f.readlines()
ddict = {}
for line in data:
if line.startswith("# Horizontal center position"):
ddict['center'] = [float(x) for x in exp.findall(line)][0]
elif line.startswith("# Horizontal width"):
ddict['hwidth'] = [float(x) for x in exp.findall(line)][0]
elif line.startswith("# Horizontal step"):
ddict['hstep'] = [float(x) for x in exp.findall(line)][0]
elif line.startswith("# Vertical width"):
ddict['vwidth'] = [float(x) for x in exp.findall(line)][0]
elif line.startswith("# Vertical step"):
ddict['vstep'] = [float(x) for x in exp.findall(line)][0]
return ddict
def main():
filename = None
if len(sys.argv) > 1:
filename = sys.argv[1]
elif os.path.exists("S10S_6_01.mca"):
filename = "S10S_6_01.mca"
if filename is not None:
w = LuciaMap(filename)
print(w.info)
else:
print("Please supply input filename")
if __name__ == "__main__":
main()
| null |
1,722 |
import bpy
from bpy.props import *
def update_size_prop(self, context):
if context.object == None:
return
mdata = context.object.data
i = mdata.arm_lodlist_index
ar = mdata.arm_lodlist
# Clamp screen size to not exceed previous entry
if i > 0 and ar[i - 1].screen_size_prop < self.screen_size_prop:
self.screen_size_prop = ar[i - 1].screen_size_prop
class ArmLodListItem(bpy.types.PropertyGroup):
# Group of properties representing an item in the list
name: StringProperty(
name="Name",
description="A name for this item",
default="")
enabled_prop: BoolProperty(
name="",
description="A name for this item",
default=True)
screen_size_prop: FloatProperty(
name="Screen Size",
description="A name for this item",
min=0.0,
max=1.0,
default=0.0,
update=update_size_prop)
class ARM_UL_LodList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
layout.use_property_split = False
if self.layout_type in {'DEFAULT', 'COMPACT'}:
row = layout.row()
row.separator(factor=0.1)
row.prop(item, "enabled_prop")
name = item.name
if name == '':
name = 'None'
row.label(text=name, icon='OBJECT_DATAMODE')
col = row.column()
col.alignment = 'RIGHT'
col.label(text="{:.2f}".format(item.screen_size_prop))
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon='OBJECT_DATAMODE')
class ArmLodListNewItem(bpy.types.Operator):
# Add a new item to the list
bl_idname = "arm_lodlist.new_item"
bl_label = "Add a new item"
bl_options = {'UNDO'}
def execute(self, context):
mdata = bpy.context.object.data
mdata.arm_lodlist.add()
mdata.arm_lodlist_index = len(mdata.arm_lodlist) - 1
return{'FINISHED'}
class ArmLodListDeleteItem(bpy.types.Operator):
# Delete the selected item from the list
bl_idname = "arm_lodlist.delete_item"
bl_label = "Deletes an item"
bl_options = {'INTERNAL', 'UNDO'}
@classmethod
def poll(cls, context):
""" Enable if there's something in the list """
if bpy.context.object is None:
return False
mdata = bpy.context.object.data
return len(mdata.arm_lodlist) > 0
def execute(self, context):
mdata = bpy.context.object.data
lodlist = mdata.arm_lodlist
index = mdata.arm_lodlist_index
n = lodlist[index].name
if n in context.scene.collection.objects:
obj = bpy.data.objects[n]
context.scene.collection.objects.unlink(obj)
lodlist.remove(index)
if index > 0:
index = index - 1
mdata.arm_lodlist_index = index
return{'FINISHED'}
class ArmLodListMoveItem(bpy.types.Operator):
# Move an item in the list
bl_idname = "arm_lodlist.move_item"
bl_label = "Move an item in the list"
bl_options = {'INTERNAL', 'UNDO'}
direction: EnumProperty(
items=(
('UP', 'Up', ""),
('DOWN', 'Down', ""),))
def move_index(self):
# Move index of an item render queue while clamping it
mdata = bpy.context.object.data
index = mdata.arm_lodlist_index
list_length = len(mdata.arm_lodlist) - 1
new_index = 0
if self.direction == 'UP':
new_index = index - 1
elif self.direction == 'DOWN':
new_index = index + 1
new_index = max(0, min(new_index, list_length))
mdata.arm_lodlist.move(index, new_index)
mdata.arm_lodlist_index = new_index
def execute(self, context):
mdata = bpy.context.object.data
list = mdata.arm_lodlist
index = mdata.arm_lodlist_index
if self.direction == 'DOWN':
neighbor = index + 1
self.move_index()
elif self.direction == 'UP':
neighbor = index - 1
self.move_index()
else:
return{'CANCELLED'}
return{'FINISHED'}
__REG_CLASSES = (
ArmLodListItem,
ARM_UL_LodList,
ArmLodListNewItem,
ArmLodListDeleteItem,
ArmLodListMoveItem,
)
__reg_classes, unregister = bpy.utils.register_classes_factory(__REG_CLASSES)
def METHOD_NAME():
__reg_classes()
bpy.types.Mesh.arm_lodlist = CollectionProperty(type=ArmLodListItem)
bpy.types.Mesh.arm_lodlist_index = IntProperty(name="Index for my_list", default=0)
bpy.types.Mesh.arm_lod_material = BoolProperty(name="Material Lod", description="Use materials of lod objects", default=False)
| null |
1,723 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstancesByPerformanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstancesByPerformance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Tag4value(self): # String
return self.get_query_params().get('Tag.4.value')
def set_Tag4value(self, Tag4value): # String
self.add_query_param('Tag.4.value', Tag4value)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Tag2key(self): # String
return self.get_query_params().get('Tag.2.key')
def set_Tag2key(self, Tag2key): # String
self.add_query_param('Tag.2.key', Tag2key)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Tag3key(self): # String
return self.get_query_params().get('Tag.3.key')
def set_Tag3key(self, Tag3key): # String
self.add_query_param('Tag.3.key', Tag3key)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Tag1value(self): # String
return self.get_query_params().get('Tag.1.value')
def set_Tag1value(self, Tag1value): # String
self.add_query_param('Tag.1.value', Tag1value)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SortKey(self): # String
return self.get_query_params().get('SortKey')
def set_SortKey(self, SortKey): # String
self.add_query_param('SortKey', SortKey)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_Tag3value(self): # String
return self.get_query_params().get('Tag.3.value')
def set_Tag3value(self, Tag3value): # String
self.add_query_param('Tag.3.value', Tag3value)
def get_proxyId(self): # String
return self.get_query_params().get('proxyId')
def set_proxyId(self, proxyId): # String
self.add_query_param('proxyId', proxyId)
def get_Tag5key(self): # String
return self.get_query_params().get('Tag.5.key')
def set_Tag5key(self, Tag5key): # String
self.add_query_param('Tag.5.key', Tag5key)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Tag5value(self): # String
return self.get_query_params().get('Tag.5.value')
def set_Tag5value(self, Tag5value): # String
self.add_query_param('Tag.5.value', Tag5value)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def METHOD_NAME(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_Tag1key(self): # String
return self.get_query_params().get('Tag.1.key')
def set_Tag1key(self, Tag1key): # String
self.add_query_param('Tag.1.key', Tag1key)
def get_SortMethod(self): # String
return self.get_query_params().get('SortMethod')
def set_SortMethod(self, SortMethod): # String
self.add_query_param('SortMethod', SortMethod)
def get_Tag2value(self): # String
return self.get_query_params().get('Tag.2.value')
def set_Tag2value(self, Tag2value): # String
self.add_query_param('Tag.2.value', Tag2value)
def get_Tag4key(self): # String
return self.get_query_params().get('Tag.4.key')
def set_Tag4key(self, Tag4key): # String
self.add_query_param('Tag.4.key', Tag4key)
| null |
1,724 |
def format_call(call):
if hasattr(call, "format"):
return call.format()
return call
def show_calls(name_list, call_list):
return "\n".join(
[
" {0}. '{1}': {2}".format(i, x[0], format_call(x[1]))
for i, x in enumerate(zip(name_list, call_list))
]
)
class Queue:
def __init__(self, call_list_builder=None):
if not call_list_builder:
call_list_builder = CallListBuilder()
self.__call_list = call_list_builder.calls
self.__name_list = call_list_builder.names
self.__index = 0
def take(self, type_of_call, real_call_info=None):
if self.__index >= len(self.__call_list):
raise self.__extra_call(type_of_call, real_call_info)
call = self.__call_list[self.__index]
if call.type != type_of_call:
raise self.__unexpected_type(call, type_of_call, real_call_info)
self.__index += 1
return self.__index, call
def METHOD_NAME(self, call_type):
return any(call.type == call_type for call in self.__call_list)
@property
def remaining(self):
return self.__call_list[self.__index :]
@property
def taken(self):
return self.__call_list[: self.__index]
def error_with_context(self, message):
return AssertionError(
"{0}\nAll calls in queue (current index={1}):\n{2}".format(
message,
self.__index,
show_calls(self.__name_list, self.__call_list),
)
)
def __unexpected_type(self, call, real_type, real_call_info):
return self.error_with_context(
(
"{0}. call was expected as '{1}' type but was '{2}' type"
"\n expected call: {3}{4}"
"\nHint: check call compatibility: for example if you use"
" env.push_cib() then runner.cib.push() will be never launched"
).format(
self.__index + 1,
call.type,
real_type,
call,
"\n real call: {0}".format(real_call_info)
if real_call_info
else "",
)
)
def __extra_call(self, type_of_call, real_call_info):
return self.error_with_context(
"No next call expected, but was ({0}):\n '{1}'".format(
type_of_call, real_call_info
)
)
class CallListBuilder:
def __init__(self):
self.__call_list = []
self.__name_list = []
@property
def calls(self):
return list(self.__call_list)
@property
def names(self):
return list(self.__name_list)
def __set(self, instead_name, name, call):
"""
Replace call that has key instead_name with new call that has key name
string name -- key of the call
Call call
string instead_name -- key of call instead of which this new call is to
be placed
"""
if instead_name not in self.__name_list:
raise self.__cannot_put("instead of", instead_name, name, call)
for i, current_name in enumerate(self.__name_list):
if current_name == instead_name:
self.__call_list[i] = call
# yes we change the name as well
self.__name_list[i] = name
return
def __append(self, name, call):
"""
Append call.
string name -- key of the call
Call call
"""
self.__name_list.append(name)
self.__call_list.append(call)
def __insert(self, before_name, name, call):
"""
Insert call before call with before_name.
string before_name -- key of call before which this new call is to be
placed
string name -- key of the call
Call call
"""
if before_name not in self.__name_list:
raise self.__cannot_put("before", before_name, name, call)
index = self.__name_list.index(before_name)
self.__name_list.insert(index, name)
self.__call_list.insert(index, call)
def remove(self, name):
"""
Remove a call with the specified name
"""
try:
index = self.__name_list.index(name)
del self.__call_list[index]
del self.__name_list[index]
except ValueError as e:
raise self.__name_not_exists(name) from e
def trim_before(self, name):
"""
Remove a call with the specified name and all calls after it from the list
"""
try:
index = self.__name_list.index(name)
self.__call_list = self.__call_list[:index]
self.__name_list = self.__name_list[:index]
except ValueError as e:
raise self.__name_not_exists(name) from e
def get(self, name):
"""
Get first call with name.
string name -- key of the call
"""
try:
return self.__call_list[self.__name_list.index(name)]
except ValueError as e:
raise self.__name_not_exists(name) from e
def place(self, name, call, before=None, instead=None):
"""
Place call into calllist.
string name -- key of the call
Call call
string before -- key of call before which this new call is to be placed
string instead -- key of call instead of which this new call is to be
placed
"""
if name and name in self.__name_list and instead != name:
raise self.__name_exists_already(name)
if before and instead:
raise self.__cannot_use_before_and_instead(
name,
call,
before,
instead,
)
if not hasattr(call, "type") or not call.type:
raise self.__type_of_call_is_not_specified(call)
if before:
self.__insert(before, name, call)
elif instead:
self.__set(instead, name, call)
else:
self.__append(name, call)
def __error_with_context(self, message):
return AssertionError(
"{0}\nCalls in the configuration call collection are:\n{1}".format(
message,
show_calls(self.__name_list, self.__call_list),
)
)
@staticmethod
def __type_of_call_is_not_specified(call):
return AssertionError(
(
"Class {0}.{1} must have the attribute 'type' with no-falsy "
"value."
).format(call.__module__, call.__class__.__name__)
)
def __name_not_exists(self, name):
return self.__error_with_context(
"Call named '{0}' does not exist.".format(name)
)
def __name_exists_already(self, name):
return self.__error_with_context(
"Name '{0}' is in this configuration already.".format(name)
)
def __cannot_use_before_and_instead(self, name, call, before, instead):
return self.__error_with_context(
(
"Args 'before' ({0}) and 'instead' ({1}) cannot be used"
" together\n '{2}': {3}"
).format(before, instead, name, call)
)
def __cannot_put(self, where_type, where_name, name, call):
return self.__error_with_context(
(
"Cannot put call named '{0}' ({1}) {2} '{3}'"
" because '{3}' does not exist."
).format(
name,
call,
where_type,
where_name,
)
)
| null |
1,725 |
#/*##########################################################################
# Copyright (C) 2004-2022 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
from PyMca5.PyMcaGui import PyMcaQt as qt
if hasattr(qt, 'QStringList'):
METHOD_NAME = qt.QVariant
else:
def METHOD_NAME(x=None):
return x
from . import NumpyArrayTableModel
import sys
class HorizontalHeader(qt.QAbstractItemModel):
def __init__(self, parent=None):
qt.QAbstractItemModel.__init__(self, parent)
def columnCount(self, modelIndex):
return self.parent().columnCount()
def headerData(self, section, orientation, role=qt.Qt.DisplayRole):
if role == qt.Qt.DisplayRole:
return METHOD_NAME("%d" % section)
return METHOD_NAME()
class VerticalHeader(qt.QAbstractItemModel):
def __init__(self, parent=None):
qt.QAbstractItemModel.__init__(self, parent)
def rowCount(self, modelIndex):
return self.parent().rowCount()
def headerData(self, section, orientation, role=qt.Qt.DisplayRole):
if role == qt.Qt.DisplayRole:
return METHOD_NAME("%d" % section)
return METHOD_NAME()
class NumpyArrayTableView(qt.QTableView):
def __init__(self, parent=None):
qt.QTableView.__init__(self, parent)
self._model = NumpyArrayTableModel.NumpyArrayTableModel(self)
self.setModel(self._model)
self._horizontalHeaderModel = HorizontalHeader(self._model)
self._verticalHeaderModel = VerticalHeader(self._model)
self.horizontalHeader().setModel(self._horizontalHeaderModel)
self.verticalHeader().setModel(self._verticalHeaderModel)
def setArrayData(self, data):
t = "%s" % data.dtype
if '|' in t:
fmt = "%s"
else:
fmt = "%g"
self._model.setFormat(fmt)
self._model.setArrayData(data)
#some linux distributions need this call
self.setModel(self._model)
if sys.platform not in ['win32']:
self._horizontalHeaderModel = HorizontalHeader(self._model)
self._verticalHeaderModel = VerticalHeader(self._model)
self.horizontalHeader().setModel(self._horizontalHeaderModel)
self.verticalHeader().setModel(self._verticalHeaderModel)
def setCurrentArrayIndex(self, index):
return self._model.setCurrentArrayIndex(index)
if __name__ == "__main__":
import numpy
a = qt.QApplication([])
d = numpy.random.normal(0,1, (5, 1000,1000))
for i in range(5):
d[i, :, :] += i
w = NumpyArrayTableView()
w.setArrayData(d)
w.show()
a.exec()
| null |
1,726 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeImageVulListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeImageVulList')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterName(self): # String
return self.get_query_params().get('ClusterName')
def set_ClusterName(self, ClusterName): # String
self.add_query_param('ClusterName', ClusterName)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Tag(self): # String
return self.get_query_params().get('Tag')
def set_Tag(self, Tag): # String
self.add_query_param('Tag', Tag)
def get_Image(self): # String
return self.get_query_params().get('Image')
def set_Image(self, Image): # String
self.add_query_param('Image', Image)
def get_AliasName(self): # String
return self.get_query_params().get('AliasName')
def set_AliasName(self, AliasName): # String
self.add_query_param('AliasName', AliasName)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Necessity(self): # String
return self.get_query_params().get('Necessity')
def set_Necessity(self, Necessity): # String
self.add_query_param('Necessity', Necessity)
def get_Uuids(self): # String
return self.get_query_params().get('Uuids')
def set_Uuids(self, Uuids): # String
self.add_query_param('Uuids', Uuids)
def get_RepoId(self): # String
return self.get_query_params().get('RepoId')
def set_RepoId(self, RepoId): # String
self.add_query_param('RepoId', RepoId)
def get_StatusList(self): # String
return self.get_query_params().get('StatusList')
def set_StatusList(self, StatusList): # String
self.add_query_param('StatusList', StatusList)
def get_Pod(self): # String
return self.get_query_params().get('Pod')
def set_Pod(self, Pod): # String
self.add_query_param('Pod', Pod)
def get_RepoNamespace(self): # String
return self.get_query_params().get('RepoNamespace')
def set_RepoNamespace(self, RepoNamespace): # String
self.add_query_param('RepoNamespace', RepoNamespace)
def get_ScanRanges(self): # RepeatList
return self.get_query_params().get('ScanRange')
def set_ScanRanges(self, ScanRange): # RepeatList
for depth1 in range(len(ScanRange)):
self.add_query_param('ScanRange.' + str(depth1 + 1), ScanRange[depth1])
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Digest(self): # String
return self.get_query_params().get('Digest')
def set_Digest(self, Digest): # String
self.add_query_param('Digest', Digest)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Dealed(self): # String
return self.get_query_params().get('Dealed')
def set_Dealed(self, Dealed): # String
self.add_query_param('Dealed', Dealed)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RepoName(self): # String
return self.get_query_params().get('RepoName')
def set_RepoName(self, RepoName): # String
self.add_query_param('RepoName', RepoName)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def METHOD_NAME(self): # String
return self.get_query_params().get('RepoInstanceId')
def set_RepoInstanceId(self, RepoInstanceId): # String
self.add_query_param('RepoInstanceId', RepoInstanceId)
def get_RepoRegionId(self): # String
return self.get_query_params().get('RepoRegionId')
def set_RepoRegionId(self, RepoRegionId): # String
self.add_query_param('RepoRegionId', RepoRegionId)
| null |
1,727 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from collections import namedtuple
from NeoVintageous.tests import unittest
test_data = namedtuple('test_data', 'text startRegion findChar mode expectedRegion msg')
NORMAL_CASES = (
test_data('0x23a5', (4, 4), 'x', unittest.NORMAL, (2, 2), 'Find behind'),
test_data('012xa5', (4, 4), 'x', unittest.NORMAL, (4, 4), 'Find previous'),
test_data('0123x5', (4, 4), 'x', unittest.NORMAL, (4, 4), 'Find self'),
test_data('0xx3a5', (4, 4), 'x', unittest.NORMAL, (3, 3), 'Find multiple'),
test_data('01x3x5', (4, 4), 'x', unittest.NORMAL, (3, 3), 'Find self multiple'),
)
INTERNAL_NORMAL_CASES = (
test_data('0x23a5', (4, 4), 'x', unittest.INTERNAL_NORMAL, (4, 2), 'Find behind'),
test_data('012xa5', (4, 4), 'x', unittest.INTERNAL_NORMAL, (4, 4), 'Find previous'),
test_data('0123x5', (4, 4), 'x', unittest.INTERNAL_NORMAL, (4, 4), 'Find self'),
test_data('0xx3a5', (4, 4), 'x', unittest.INTERNAL_NORMAL, (4, 3), 'Find multiple'),
test_data('01x3x5', (4, 4), 'x', unittest.INTERNAL_NORMAL, (4, 3), 'Find self multiple'),
)
VISUAL_MULTI_CHAR_CASES = (
test_data('0x2ba5', (5, 3), 'x', unittest.VISUAL, (5, 2), 'Reverse'),
test_data('0x23a5', (5, 1), 'x', unittest.VISUAL, (5, 1), 'Reverse find b'),
test_data('0ax3b5', (1, 5), 'x', unittest.VISUAL, (1, 4), 'Forward no crossover'),
test_data('0x2ab5', (3, 5), 'x', unittest.VISUAL, (4, 2), 'Forward crossover'),
test_data('01x3b5', (2, 5), 'x', unittest.VISUAL, (2, 4), 'Forward find a'),
test_data('01a3x5', (2, 5), 'x', unittest.VISUAL, (2, 5), 'Forward find b'),
test_data('0xb3a5', (5, 2), 'x', unittest.VISUAL, (5, 2), 'Reverse find b-1'),
test_data('0a2xb5', (1, 5), 'x', unittest.VISUAL, (1, 5), 'Forward find b-1'),
test_data('0xa3b5', (2, 5), 'x', unittest.VISUAL, (2, 3), 'Forward find a-1'),
)
VISUAL_ONE_CHAR_CASES = (
test_data('xa', (2, 0), 'x', unittest.VISUAL, (2, 0), 'Reverse find b'),
test_data('xb', (0, 2), 'x', unittest.VISUAL, (0, 2), 'Forward find a'),
test_data('xr', (2, 1), 'x', unittest.VISUAL, (2, 1), 'Reverse find previous'),
test_data('xf', (1, 2), 'x', unittest.VISUAL, (1, 2), 'Forward find previous'),
test_data('r', (1, 0), 'r', unittest.VISUAL, (1, 0), 'Reverse find self'),
test_data('f', (0, 1), 'f', unittest.VISUAL, (0, 1), 'Forward find self'),
)
VISUAL_MULTI_MATCHES_CASES = (
test_data('0xxba5', (5, 3), 'x', unittest.VISUAL, (5, 3), 'Reverse find first'),
test_data('01xxa5', (5, 3), 'x', unittest.VISUAL, (5, 3), 'Reverse find b'),
test_data('01xxb5', (3, 5), 'x', unittest.VISUAL, (3, 5), 'Forward find a'),
test_data('01xxb5', (2, 5), 'x', unittest.VISUAL, (2, 5), 'Forward find a'),
test_data('01xax5', (3, 5), 'x', unittest.VISUAL, (3, 4), 'Forward find b'),
)
VISUAL_MULTI_LINE_CASES = (
test_data('012\n456', (2, 7), '0', unittest.VISUAL, (2, 7), 'Select L1->L2, find on L1'),
test_data('012\n456', (2, 7), '4', unittest.VISUAL, (2, 6), 'Select L1->L2, find on L2'),
test_data('012\n456', (2, 4), '0', unittest.VISUAL, (3, 1), 'Select L1->LF, find on L1'),
test_data('012\n456', (2, 4), '5', unittest.VISUAL, (2, 4), 'Select L1->LF, find on L2'),
test_data('012\n456', (7, 2), '0', unittest.VISUAL, (7, 1), 'Select L2->L1, find on L1'),
test_data('012\n456', (7, 2), '4', unittest.VISUAL, (7, 2), 'Select L2->L1, find on L2'),
test_data('012\n456', (7, 3), '0', unittest.VISUAL, (7, 1), 'Select L2->LF, find on L1'),
test_data('012\n456', (7, 3), '4', unittest.VISUAL, (7, 3), 'Select L2->LF, find on L2'),
test_data('0123\n5678', (2, 4), '0', unittest.VISUAL, (3, 1), 'Select L1->LF-1, find on L1'),
)
SKIP_CASES = (
test_data('xxxx', (2, 2), 'x', unittest.NORMAL, (1, 1), 'Skip past previous match'),
test_data('xxxx', (1, 1), 'x', unittest.NORMAL, (1, 1), 'Does not skip past final match'),
)
class Test__nv_vi_big_t(unittest.ViewTestCase):
def runTests(self, data, skipping=False):
for (i, data) in enumerate(data):
self.write(data.text)
self.select(self._R(*data.startRegion))
self.view.run_command('nv_vi_reverse_find_in_line', {
'mode': data.mode,
'count': 1,
'char': data.findChar,
'inclusive': False,
'skipping': skipping
})
self._assertRegionsEqual(
self._R(*data.expectedRegion),
self.view.sel()[0],
"Failed on index {} {} : Text:\"{}\" Region:{} Find:'{}'"
.format(i, data.msg, data.text, data.startRegion, data.findChar)
)
def runTestsWithSkip(self, data):
self.runTests(data, skipping=True)
def test_normal_cases(self):
self.runTests(NORMAL_CASES)
def test_internal_normal_cases(self):
self.runTests(INTERNAL_NORMAL_CASES)
def test_visual_multiple_character_cases(self):
self.runTests(VISUAL_MULTI_CHAR_CASES)
def test_visual_single_character_cases(self):
self.runTests(VISUAL_ONE_CHAR_CASES)
def test_visual_multiple_matches_cases(self):
self.runTests(VISUAL_MULTI_MATCHES_CASES)
def METHOD_NAME(self):
self.runTests(VISUAL_MULTI_LINE_CASES)
def test_skip_cases(self):
self.runTestsWithSkip(SKIP_CASES)
| null |
1,728 |
import pytest
from django.utils import timezone
from api.base.settings.defaults import API_BASE
from api.users.views import UserDraftRegistrations
from api_tests.nodes.views.test_node_draft_registration_list import DraftRegistrationTestCase
from api_tests.utils import only_supports_methods
from osf.models import RegistrationSchema
from osf_tests.factories import (
RegistrationFactory,
AuthUserFactory,
DraftRegistrationFactory,
)
from osf.utils import permissions
SCHEMA_VERSION = 2
@pytest.mark.django_db
class TestDraftRegistrationList(DraftRegistrationTestCase):
@pytest.fixture()
def other_admin(self, project_public):
user = AuthUserFactory()
project_public.add_contributor(user, permissions=permissions.ADMIN, save=True)
return user
@pytest.fixture()
def schema(self):
return RegistrationSchema.objects.get(
name='Open-Ended Registration',
schema_version=SCHEMA_VERSION)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public
)
@pytest.fixture()
def url_draft_registrations(self, project_public):
return '/{}users/me/draft_registrations/'.format(API_BASE)
def test_unacceptable_methods(self):
assert only_supports_methods(UserDraftRegistrations, ['GET'])
def test_view_permissions(
self, app, user, other_admin, draft_registration,
user_write_contrib, user_read_contrib, user_non_contrib,
schema, url_draft_registrations):
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href']
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href']
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
# test_read_only_contributor_can_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_read_contrib.auth)
assert len(res.json['data']) == 1
# test_read_write_contributor_can_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
# test_logged_in_non_contributor_cannot_view_draft_list
res = app.get(
url_draft_registrations,
auth=user_non_contrib.auth)
assert len(res.json['data']) == 0
# test_unauthenticated_user_cannot_view_draft_list
res = app.get(url_draft_registrations, expect_errors=True)
assert res.status_code == 401
def test_deleted_draft_registration_does_not_show_up_in_draft_list(
self, app, user, draft_registration, url_draft_registrations):
draft_registration.deleted = timezone.now()
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_deleted_node_does_not_show_up_in_draft_list(
self, app, user, project_public, draft_registration, url_draft_registrations):
project_public.deleted = timezone.now()
project_public.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_draft_with_registered_node_does_not_show_up_in_draft_list(
self, app, user, project_public, draft_registration, url_draft_registrations):
reg = RegistrationFactory(project=project_public, draft_registration=draft_registration)
draft_registration.registered_node = reg
draft_registration.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def METHOD_NAME(
self, app, user, project_public,
draft_registration, schema,
url_draft_registrations):
reg = RegistrationFactory(project=project_public, draft_registration=draft_registration)
draft_registration.registered_node = reg
draft_registration.save()
reg.deleted = timezone.now()
reg.save()
res = app.get(url_draft_registrations, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href']
assert data[0]['id'] == draft_registration._id
assert data[0]['attributes']['registration_metadata'] == {}
def test_cannot_access_other_users_draft_registration(
self, app, user, other_admin, project_public,
draft_registration, schema):
url = '/{}users/{}/draft_registrations/'.format(API_BASE, user._id)
res = app.get(url, auth=other_admin.auth, expect_errors=True)
assert res.status_code == 403
def test_can_access_own_draft_registrations_with_guid(
self, app, user, draft_registration):
url = '/{}users/{}/draft_registrations/'.format(API_BASE, user._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert len(res.json['data']) == 1
| null |
1,729 |
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Finds the energy efficient frequency for a provided frequency sweep
and provides that as part of a ConstConfigIO configuration file for the GPU Activity Agent
'''
import argparse
import json
import jsonschema
import math
import os
import sys
import pandas
import numpy as np
import geopmpy.io
from experiment import util
from experiment import common_args
from experiment import machine
def METHOD_NAME(df):
"""
Extract the columns of interest from the full report collection
dataframe.
"""
#Explicitly a copy to deal with setting on copy errors
df_filtered = df.copy()
# Use requested frequency from the agent
df_filtered['requested gpu-frequency (Hz)'] = df['FREQ_GPU_DEFAULT']
# these are the only columns we need
try:
df_filtered = df_filtered[['runtime (s)',
'package-energy (J)',
'dram-energy (J)',
'frequency (Hz)',
'gpu-frequency (Hz)',
'gpu-energy (J)',
'requested gpu-frequency (Hz)']]
except:
df_filtered = df_filtered[['runtime (s)',
'package-energy (J)',
'dram-energy (J)',
'frequency (Hz)',
'gpu-energy (J)',
'requested gpu-frequency (Hz)']]
df_filtered['gpu-frequency (Hz)'] = df_filtered['GPU_CORE_FREQUENCY_STATUS']
return df_filtered
def get_config_from_frequency_sweep(full_df, mach, energy_margin, use_freq_req):
"""
The main function. full_df is a report collection dataframe
"""
df = METHOD_NAME(full_df)
#Round entries to nearest step size
frequency_step = mach.gpu_frequency_step()
df.loc[:,'gpu-frequency (Hz)'] = (df['gpu-frequency (Hz)'] /
frequency_step).round(decimals=0) * frequency_step
energy_col = 'gpu-energy (J)'
if use_freq_req:
freq_col = 'requested gpu-frequency (Hz)'
else:
freq_col = 'gpu-frequency (Hz)'
gpu_freq_efficient = util.energy_efficient_frequency(df, freq_col, energy_col, energy_margin)
json_dict = {
"GPU_FREQUENCY_EFFICIENT_HIGH_INTENSITY" : {
"domain" : "board",
"description" : "Defines the efficient compute frequency to use for GPUs. " +
"This value is based on a workload that scales strongly with the frequency domain.",
"units" : "hertz",
"aggregation" : "average",
"values" : [gpu_freq_efficient],
},
}
return json_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--const-config-path', required=False, default=None,
help='path containing existing ConstConfigIO configuration file')
parser.add_argument('--gpu-energy-margin', default=0, type=float, dest='gpu_energy_margin',
help='Percentage of additional energy it is acceptable to consume if it results '
'in a lower frequency selection for Fe (energy efficient frequency). This is useful for analyzing '
'noisy systems that have many GPU frequencies near the Fe energy consumption value')
parser.add_argument('--path', required=True,
help='path containing reports and machine.json')
parser.add_argument('--use-requested-frequency', action='store_true', default=False,
dest='use_freq_req',
help='Use the frequency that was requested during the frequency sweep instead '
'of the achieved frequency for a given run. This is useful in cases where '
'multiple frequency domains or settings are impacted (i.e. core frequency causes '
'an uncore frequency change) and the achieved frequency does not reflect this '
'behavior.')
args = parser.parse_args()
try:
df = geopmpy.io.RawReportCollection('*report', dir_name=args.path).get_app_df()
except RuntimeError:
sys.stderr.write('Error: <geopm> gen_gpu_activity_constconfig_recommendation.py: No report data found in ' + path + \
'; run a frequency sweep before using this analysis.\n')
sys.exit(1)
if args.gpu_energy_margin < 0:
sys.stderr.write('Error: <geopm> gen_gpu_activity_constconfig_recommendation.py: GPU energy margin must be non-negative\n')
sys.exit(1)
mach = machine.get_machine(args.path);
output = get_config_from_frequency_sweep(df, mach, args.gpu_energy_margin, args.use_freq_req)
output = util.merge_const_config(output, args.const_config_path);
sys.stdout.write(json.dumps(output, indent=4) + "\n")
| null |
1,730 |
import logging
from abc import ABC, abstractmethod
from decimal import Decimal
from enum import Enum
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from bidict import bidict
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.gateway.common_types import CancelOrderResult, PlaceOrderResult
from hummingbot.connector.gateway.gateway_in_flight_order import GatewayInFlightOrder
from hummingbot.connector.gateway.gateway_order_tracker import GatewayOrderTracker
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.connector.utils import get_new_client_order_id
from hummingbot.core.data_type.common import OrderType
from hummingbot.core.data_type.in_flight_order import InFlightOrder, OrderUpdate, TradeUpdate
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.trade_fee import MakerTakerExchangeFeeRates
from hummingbot.core.event.event_forwarder import EventForwarder
from hummingbot.core.event.event_listener import EventListener
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.pubsub import HummingbotLogger, PubSub
class CLOBAPIDataSourceBase(ABC):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(HummingbotLogger.logger_name_for_class(cls))
return cls._logger
def __init__(
self,
trading_pairs: List[str],
connector_spec: Dict[str, Any],
client_config_map: ClientConfigAdapter,
):
self._trading_pairs = trading_pairs
self._connector_spec = connector_spec
self._client_config = client_config_map
self._publisher = PubSub()
self._forwarders_map: Dict[Tuple[Enum, Callable], EventForwarder] = {}
self._gateway_order_tracker: Optional[GatewayOrderTracker] = None
self._markets_info: Dict[str, Any] = {}
@property
@abstractmethod
def real_time_balance_update(self) -> bool:
...
@property
@abstractmethod
def events_are_streamed(self) -> bool:
"""Set this to False if the exchange does not offer event streams."""
...
@staticmethod
@abstractmethod
def supported_stream_events() -> List[Enum]:
"""This method serves as a guide to what events a client of this class expects an implementation to
provide.
"""
...
@abstractmethod
def get_supported_order_types(self) -> List[OrderType]:
...
@abstractmethod
async def start(self):
...
@abstractmethod
async def stop(self):
...
@abstractmethod
async def place_order(
self, order: GatewayInFlightOrder, **kwargs
) -> Tuple[Optional[str], Optional[Dict[str, Any]]]:
"""
:return: A tuple of the exchange order ID and any misc order updates.
"""
...
@abstractmethod
async def batch_order_create(self, orders_to_create: List[InFlightOrder]) -> List[PlaceOrderResult]:
"""
:param orders_to_create: The collection of orders to create.
:return: The result of the batch order create attempt.
"""
...
@abstractmethod
async def cancel_order(self, order: GatewayInFlightOrder) -> Tuple[bool, Optional[Dict[str, Any]]]:
"""
:return: A tuple of the boolean indicating the cancelation success and any misc order updates.
"""
...
@abstractmethod
async def batch_order_cancel(self, orders_to_cancel: List[InFlightOrder]) -> List[CancelOrderResult]:
"""
:param orders_to_cancel: The collection of orders to cancel.
:return: The result of the batch order cancel attempt.
"""
...
@abstractmethod
async def get_last_traded_price(self, trading_pair: str) -> Decimal:
...
@abstractmethod
async def get_order_book_snapshot(self, trading_pair: str) -> OrderBookMessage:
...
@abstractmethod
async def get_account_balances(self) -> Dict[str, Dict[str, Decimal]]:
"""Returns a dictionary like
{
asset_name: {
"total_balance": Decimal,
"available_balance": Decimal,
}
}
"""
...
@abstractmethod
async def get_order_status_update(self, in_flight_order: InFlightOrder) -> OrderUpdate:
...
@abstractmethod
async def get_all_order_fills(self, in_flight_order: InFlightOrder) -> List[TradeUpdate]:
...
@abstractmethod
def is_order_not_found_during_status_update_error(self, status_update_exception: Exception) -> bool:
...
@abstractmethod
def is_order_not_found_during_cancelation_error(self, cancelation_exception: Exception) -> bool:
...
@abstractmethod
async def check_network_status(self) -> NetworkStatus:
...
@abstractmethod
def _check_markets_initialized(self) -> bool:
...
@abstractmethod
async def _update_markets(self):
...
@abstractmethod
def _parse_trading_rule(self, trading_pair: str, market_info: Any) -> TradingRule:
...
@abstractmethod
def _get_exchange_trading_pair_from_market_info(self, market_info: Any) -> str:
...
@abstractmethod
def _get_maker_taker_exchange_fee_rates_from_market_info(self, market_info: Any) -> MakerTakerExchangeFeeRates:
...
@property
def gateway_order_tracker(self):
return self._gateway_order_tracker
@property
def ready(self) -> bool:
return self._check_markets_initialized()
@gateway_order_tracker.setter
def gateway_order_tracker(self, tracker: GatewayOrderTracker):
if self._gateway_order_tracker is not None:
raise RuntimeError("Attempted to re-assign the order tracker.")
self._gateway_order_tracker = tracker
@staticmethod
def get_client_order_id(
is_buy: bool, trading_pair: str, hbot_order_id_prefix: str, max_id_len: Optional[int]
) -> str:
return get_new_client_order_id(is_buy, trading_pair, hbot_order_id_prefix, max_id_len)
async def get_trading_rules(self) -> Dict[str, TradingRule]:
self._check_markets_initialized() or await self._update_markets()
trading_rules = {
trading_pair: self._parse_trading_rule(trading_pair=trading_pair, market_info=market)
for trading_pair, market in self._markets_info.items()
}
return trading_rules
async def get_symbol_map(self) -> bidict[str, str]:
self._check_markets_initialized() or await self._update_markets()
mapping = bidict()
for trading_pair, market_info in self._markets_info.items():
exchange_symbol = self._get_exchange_trading_pair_from_market_info(market_info=market_info)
mapping[exchange_symbol] = trading_pair
return mapping
async def get_trading_fees(self) -> Mapping[str, MakerTakerExchangeFeeRates]:
self._check_markets_initialized() or await self._update_markets()
trading_fees = {}
for trading_pair, market_inf in self._markets_info.items():
trading_fees[trading_pair] = self._get_maker_taker_exchange_fee_rates_from_market_info(
market_info=market_inf
)
return trading_fees
def add_listener(self, event_tag: Enum, listener: EventListener):
self._publisher.add_listener(event_tag=event_tag, listener=listener)
def METHOD_NAME(self, event_tag: Enum, listener: EventListener):
self._publisher.METHOD_NAME(event_tag=event_tag, listener=listener)
| null |
1,731 |
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
transforms = LazyLoader("torchvision.transforms")
transformers = LazyLoader("transformers")
def initialize_transform(transform_name, config, dataset):
if transform_name is None:
return None
elif transform_name == "bert":
return METHOD_NAME(config)
elif transform_name == "image_base":
return initialize_image_base_transform(config, dataset)
elif transform_name == "image_resize_and_center_crop":
return initialize_image_resize_and_center_crop_transform(config, dataset)
elif transform_name == "poverty_train":
return initialize_poverty_train_transform()
else:
raise ValueError(f"{transform_name} not recognized")
def METHOD_NAME(config):
assert "bert" in config.model
assert config.max_token_length is not None
tokenizer = getBertTokenizer(config.model)
def transform(text):
tokens = tokenizer(
text,
padding="max_length",
truncation=True,
max_length=config.max_token_length,
return_tensors="pt",
)
if config.model == "bert-base-uncased":
x = torch.stack(
(
tokens["input_ids"],
tokens["attention_mask"],
tokens["token_type_ids"],
),
dim=2,
)
elif config.model == "distilbert-base-uncased":
x = torch.stack((tokens["input_ids"], tokens["attention_mask"]), dim=2)
x = torch.squeeze(x, dim=0) # First shape dim is always 1
return x
return transform
def getBertTokenizer(model):
if model == "bert-base-uncased":
tokenizer = transformers.BertTokenizerFast.from_pretrained(model)
elif model == "distilbert-base-uncased":
tokenizer = transformers.DistilBertTokenizerFast.from_pretrained(model)
else:
raise ValueError(f"Model: {model} not recognized.")
return tokenizer
def initialize_image_base_transform(config, dataset):
transform_steps = []
if dataset.original_resolution is not None and min(
dataset.original_resolution
) != max(dataset.original_resolution):
crop_size = min(dataset.original_resolution)
transform_steps.append(transforms.CenterCrop(crop_size))
if config.target_resolution is not None and config.dataset != "fmow":
transform_steps.append(transforms.Resize(config.target_resolution))
transform_steps += [
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
transform = transforms.Compose(transform_steps)
return transform
def initialize_image_resize_and_center_crop_transform(config, dataset):
"""Resizes the image to a slightly larger square then crops the center."""
assert dataset.original_resolution is not None
assert config.resize_scale is not None
scaled_resolution = tuple(
int(res * config.resize_scale) for res in dataset.original_resolution
)
if config.target_resolution is not None:
target_resolution = config.target_resolution
else:
target_resolution = dataset.original_resolution
transform = transforms.Compose(
[
transforms.Resize(scaled_resolution),
transforms.CenterCrop(target_resolution),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
return transform
def initialize_poverty_train_transform():
transforms_ls = [
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.8, contrast=0.8, saturation=0.8, hue=0.1),
transforms.ToTensor(),
]
rgb_transform = transforms.Compose(transforms_ls)
def transform_rgb(img):
# bgr to rgb and back to bgr
img[:3] = rgb_transform(img[:3][[2, 1, 0]])[[2, 1, 0]]
return img
transform = transforms.Lambda(lambda x: transform_rgb(x))
return transform
| null |
1,732 |
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
from guild import config
from guild import file_util
from guild import log as loglib
from guild import remote as remotelib
from guild import util
from guild import var
log = logging.getLogger("guild")
def require_env(name):
if name not in os.environ:
raise remotelib.OperationError(f"missing required {name} environment variable")
def set_remote_lock(remote_run, remote_name, runs_dir=None):
assert isinstance(remote_run, remotelib.RunProxy), remote_run
local_run_dir = _local_run_dir(remote_run, runs_dir)
_ensure_deleted_locks(local_run_dir)
_maybe_write_remote_lock_file(remote_run, remote_name, local_run_dir)
def _local_run_dir(remote_run, runs_dir=None):
runs_dir = runs_dir or var.runs_dir()
return os.path.join(runs_dir, remote_run.id)
def _ensure_deleted_locks(run_dir):
util.ensure_deleted(_lock_file_path(run_dir))
util.ensure_deleted(_remote_lock_file_path(run_dir))
def _lock_file_path(run_dir):
return os.path.join(run_dir, ".guild", "LOCK")
def _remote_lock_file_path(run_dir):
return os.path.join(run_dir, ".guild", "LOCK.remote")
def _maybe_write_remote_lock_file(remote_run, remote_name, local_run_dir):
if remote_run.status == "running":
_write_remote_lock_file(local_run_dir, remote_name)
def _write_remote_lock_file(run_dir, remote_name):
with open(_remote_lock_file_path(run_dir), "w") as f:
f.write(remote_name)
def config_path(path):
"""Returns an absolute path for a config-relative path.
Variable and user refs are resolved in path.
If path is None, returns None.
"""
if path is None:
return None
expanded = file_util.expand_path(path)
config_dir = os.path.dirname(config.user_config_path())
return os.path.abspath(os.path.join(config_dir, expanded))
def subprocess_call(cmd, extra_env=None, quiet=False, allowed_returncodes=(0,)):
env = dict(os.environ)
if extra_env:
env.update(extra_env)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
buffer = []
while True:
line = p.stdout.readline()
if not line:
break
if quiet:
buffer.append(line.decode())
else:
sys.stderr.write(line.decode())
returncode = p.wait()
if returncode not in allowed_returncodes:
for line in buffer:
sys.stderr.write(line)
raise SystemExit(f"error running {cmd[0]} - see above for details", returncode)
return returncode
def init_env(env_config):
return METHOD_NAME(_env_for_config(env_config))
def _env_for_config(env_config):
if isinstance(env_config, dict):
return env_config
if isinstance(env_config, str):
return _env_from_file(env_config)
if env_config is None:
return {}
log.warning("invalid value for remote env %r - ignoring", env_config)
return {}
def METHOD_NAME(env):
return {name: str(val) for name, val in env.items() if val}
def _env_from_file(path):
if path.lower().endswith(".gpg"):
env_str = _try_read_gpg(path)
else:
env_str = util.try_read(path)
if not env_str:
log.warning("cannot read remote env from %s - ignorning", path)
return {}
return _decode_env(env_str)
def _try_read_gpg(path):
path = os.path.expanduser(path)
cmd = _gpg_cmd() + [path]
log.debug("gpg cmd: %s", cmd)
try:
p = subprocess.Popen(
cmd, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError as e:
log.error("cannot decode %s with command '%s' (%s)", path, " ".join(cmd), e)
return None
else:
out, err = p.communicate()
if p.returncode != 0:
log.error(err.decode(errors="replace").strip())
return None
return out.decode(errors="replace")
def _gpg_cmd():
gpg_env = os.getenv("GPG_CMD")
if gpg_env:
return util.shlex_split(gpg_env)
return ["gpg", "-d"]
def _decode_env(s):
return dict([_split_env_line(line) for line in s.split("\n")])
def _split_env_line(s):
parts = s.split("=", 1)
if len(parts) == 1:
parts.append("")
return _strip_export(parts[0]), parts[1]
def _strip_export(s):
s = s.strip()
if s.startswith("export "):
s = s[7:]
return s
def remote_activity(msg, *args):
"""Log remote activity.
Used to report time consuming work that would otherwise not show
user feedback. E.g. use when synchronizing meta data.
"""
log.info(loglib.dim(msg), *args)
def strip_common_config(config):
"""Returns a copy of `config` that does not contain common config.
Common config is shared across all remote types.
Common config attributes are: `type`, `description`.
"""
return remotelib.RemoteConfig(
{key: config[key]
for key in config if key not in ("type", "description")}
)
| null |
1,733 |
#!/usr/bin/env python
import unittest
from hummingbot.strategy.pure_market_making.data_types import InventorySkewBidAskRatios
from hummingbot.strategy.pure_market_making.inventory_skew_calculator import \
calculate_bid_ask_ratios_from_base_asset_ratio
class InventorySkewCalculatorUnitTest(unittest.TestCase):
def setUp(self):
self.base_asset: float = 85000
self.quote_asset: float = 10000
self.price: float = 0.0036
self.target_ratio: float = 0.03
self.base_range: float = 20000.0
def METHOD_NAME(self):
self.base_asset = 100
self.quote_asset = 10
self.price = 1
self.target_ratio = 0.35
self.base_range = 200
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(2, bid_ask_ratios.ask_ratio)
self.base_asset = 10
self.quote_asset = 100
self.price = 1
self.target_ratio = 0.75
self.base_range = 200
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(2, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0, bid_ask_ratios.ask_ratio)
def test_balanced_portfolio(self):
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(1.04416666, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.95583333, bid_ask_ratios.ask_ratio)
def test_heavily_skewed_portfolio(self):
self.base_asset = 8500.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(2.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.0, bid_ask_ratios.ask_ratio)
self.base_asset = 200000.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(0.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(2.0, bid_ask_ratios.ask_ratio)
self.base_asset = 1000000.0
self.quote_asset = 0.0
self.assertAlmostEqual(0.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(2.0, bid_ask_ratios.ask_ratio)
def test_moderately_skewed_portfolio(self):
self.base_asset = 95000.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(0.55916666, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(1.440833333, bid_ask_ratios.ask_ratio)
self.base_asset = 70000.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(1.77166666, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.22833333, bid_ask_ratios.ask_ratio)
def test_empty_portfolio(self):
self.base_asset = 0.0
self.quote_asset = 0.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(0.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.0, bid_ask_ratios.ask_ratio)
self.quote_asset = 10000.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(2.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.0, bid_ask_ratios.ask_ratio)
self.base_asset = 85000.0
self.base_range = 0.0
bid_ask_ratios: InventorySkewBidAskRatios = calculate_bid_ask_ratios_from_base_asset_ratio(
self.base_asset, self.quote_asset, self.price, self.target_ratio, self.base_range
)
self.assertAlmostEqual(0.0, bid_ask_ratios.bid_ratio)
self.assertAlmostEqual(0.0, bid_ask_ratios.ask_ratio)
if __name__ == "__main__":
unittest.main()
| null |
1,734 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdms_enterprise.endpoint import endpoint_data
class RegisterInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dms-enterprise', '2018-11-01', 'RegisterInstance','dms-enterprise')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EcsRegion(self): # String
return self.get_query_params().get('EcsRegion')
def set_EcsRegion(self, EcsRegion): # String
self.add_query_param('EcsRegion', EcsRegion)
def get_DdlOnline(self): # Integer
return self.get_query_params().get('DdlOnline')
def set_DdlOnline(self, DdlOnline): # Integer
self.add_query_param('DdlOnline', DdlOnline)
def get_UseDsql(self): # Integer
return self.get_query_params().get('UseDsql')
def set_UseDsql(self, UseDsql): # Integer
self.add_query_param('UseDsql', UseDsql)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_Tid(self): # Long
return self.get_query_params().get('Tid')
def set_Tid(self, Tid): # Long
self.add_query_param('Tid', Tid)
def get_Sid(self): # String
return self.get_query_params().get('Sid')
def set_Sid(self, Sid): # String
self.add_query_param('Sid', Sid)
def get_EnableSellSitd(self): # String
return self.get_query_params().get('EnableSellSitd')
def set_EnableSellSitd(self, EnableSellSitd): # String
self.add_query_param('EnableSellSitd', EnableSellSitd)
def get_DataLinkName(self): # String
return self.get_query_params().get('DataLinkName')
def set_DataLinkName(self, DataLinkName): # String
self.add_query_param('DataLinkName', DataLinkName)
def get_TemplateType(self): # String
return self.get_query_params().get('TemplateType')
def set_TemplateType(self, TemplateType): # String
self.add_query_param('TemplateType', TemplateType)
def get_InstanceSource(self): # String
return self.get_query_params().get('InstanceSource')
def set_InstanceSource(self, InstanceSource): # String
self.add_query_param('InstanceSource', InstanceSource)
def get_EnvType(self): # String
return self.get_query_params().get('EnvType')
def set_EnvType(self, EnvType): # String
self.add_query_param('EnvType', EnvType)
def get_Host(self): # String
return self.get_query_params().get('Host')
def set_Host(self, Host): # String
self.add_query_param('Host', Host)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_QueryTimeout(self): # Integer
return self.get_query_params().get('QueryTimeout')
def set_QueryTimeout(self, QueryTimeout): # Integer
self.add_query_param('QueryTimeout', QueryTimeout)
def get_EcsInstanceId(self): # String
return self.get_query_params().get('EcsInstanceId')
def set_EcsInstanceId(self, EcsInstanceId): # String
self.add_query_param('EcsInstanceId', EcsInstanceId)
def get_ExportTimeout(self): # Integer
return self.get_query_params().get('ExportTimeout')
def set_ExportTimeout(self, ExportTimeout): # Integer
self.add_query_param('ExportTimeout', ExportTimeout)
def get_DatabasePassword(self): # String
return self.get_query_params().get('DatabasePassword')
def set_DatabasePassword(self, DatabasePassword): # String
self.add_query_param('DatabasePassword', DatabasePassword)
def get_InstanceAlias(self): # String
return self.get_query_params().get('InstanceAlias')
def set_InstanceAlias(self, InstanceAlias): # String
self.add_query_param('InstanceAlias', InstanceAlias)
def get_TemplateId(self): # Long
return self.get_query_params().get('TemplateId')
def set_TemplateId(self, TemplateId): # Long
self.add_query_param('TemplateId', TemplateId)
def get_DatabaseUser(self): # String
return self.get_query_params().get('DatabaseUser')
def set_DatabaseUser(self, DatabaseUser): # String
self.add_query_param('DatabaseUser', DatabaseUser)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('Port')
def set_Port(self, Port): # Integer
self.add_query_param('Port', Port)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_DbaUid(self): # Long
return self.get_query_params().get('DbaUid')
def set_DbaUid(self, DbaUid): # Long
self.add_query_param('DbaUid', DbaUid)
def get_SkipTest(self): # Boolean
return self.get_query_params().get('SkipTest')
def set_SkipTest(self, SkipTest): # Boolean
self.add_query_param('SkipTest', SkipTest)
def get_SafeRule(self): # String
return self.get_query_params().get('SafeRule')
def set_SafeRule(self, SafeRule): # String
self.add_query_param('SafeRule', SafeRule)
| null |
1,735 |
#!/usr/bin/python3 -u
# Copyright 2022 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import argparse
import os
import subprocess
import sys
import tempfile
import time
from typing import List
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", ".."))
def wait_for_server(port: int, delay: float = 0.1) -> float:
cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)]
while subprocess.call(cmd) != 0:
time.sleep(0.01)
time.sleep(delay)
def execute_tester(
binary: str,
queries: List[str],
should_fail: bool = False,
failure_message: str = "",
username: str = "",
password: str = "",
check_failure: bool = True,
) -> None:
args = [binary, "--username", username, "--password", password]
if should_fail:
args.append("--should-fail")
if failure_message:
args.extend(["--failure-message", failure_message])
if check_failure:
args.append("--check-failure")
args.extend(queries)
subprocess.run(args).check_returncode()
def execute_flag_check(binary: str, queries: List[str], expected: int, username: str = "", password: str = "") -> None:
args = [binary, "--username", username, "--password", password]
args.extend(queries)
args.append(str(expected))
subprocess.run(args).check_returncode()
def start_memgraph(memgraph_args: List[any]) -> subprocess:
memgraph = subprocess.Popen(list(map(str, memgraph_args)))
time.sleep(0.1)
assert memgraph.poll() is None, "Memgraph process died prematurely!"
wait_for_server(7687)
return memgraph
def execute_with_user(tester_binary: str, queries: List[str]) -> None:
return execute_tester(
tester_binary, queries, should_fail=False, check_failure=True, username="admin", password="admin"
)
def METHOD_NAME(
tester_binary: str,
queries: List[str],
should_fail: bool = False,
failure_message: str = "",
check_failure: bool = True,
) -> None:
return execute_tester(tester_binary, queries, should_fail, failure_message, "", "", check_failure)
def cleanup(memgraph: subprocess):
if memgraph.poll() is None:
memgraph.terminate()
assert memgraph.wait() == 0, "Memgraph process didn't exit cleanly!"
def test_without_any_files(tester_binary: str, memgraph_args: List[str]):
memgraph = start_memgraph(memgraph_args)
METHOD_NAME(tester_binary, ["MATCH (n) RETURN n"], False)
cleanup(memgraph)
def test_init_file(tester_binary: str, memgraph_args: List[str]):
memgraph = start_memgraph(memgraph_args)
execute_with_user(tester_binary, ["MATCH (n) RETURN n"])
METHOD_NAME(tester_binary, ["MATCH (n) RETURN n"], True, "Handshake with the server failed!", True)
cleanup(memgraph)
def test_init_data_file(flag_checker_binary: str, memgraph_args: List[str]):
memgraph = start_memgraph(memgraph_args)
execute_flag_check(flag_checker_binary, ["MATCH (n) RETURN n"], 2, "user", "user")
cleanup(memgraph)
def test_init_and_init_data_file(flag_checker_binary: str, tester_binary: str, memgraph_args: List[str]):
memgraph = start_memgraph(memgraph_args)
execute_with_user(tester_binary, ["MATCH (n) RETURN n"])
METHOD_NAME(tester_binary, ["MATCH (n) RETURN n"], True, "Handshake with the server failed!", True)
execute_flag_check(flag_checker_binary, ["MATCH (n) RETURN n"], 2, "user", "user")
cleanup(memgraph)
def execute_test(memgraph_binary: str, tester_binary: str, flag_checker_binary: str) -> None:
storage_directory = tempfile.TemporaryDirectory()
memgraph_args = [memgraph_binary, "--data-directory", storage_directory.name]
# Start the memgraph binary
with open(os.path.join(os.getcwd(), "dummy_init_file.cypherl"), "w") as temp_file:
temp_file.write("CREATE USER admin IDENTIFIED BY 'admin';\n")
temp_file.write("CREATE USER user IDENTIFIED BY 'user';\n")
with open(os.path.join(os.getcwd(), "dummy_init_data_file.cypherl"), "w") as temp_file:
temp_file.write("CREATE (n:RANDOM) RETURN n;\n")
temp_file.write("CREATE (n:RANDOM {name:'1'}) RETURN n;\n")
# Run the test with all combinations of permissions
print("\033[1;36m~~ Starting env variable check test ~~\033[0m")
test_without_any_files(tester_binary, memgraph_args)
memgraph_args_with_init_file = memgraph_args + [
"--init-file",
os.path.join(os.getcwd(), "dummy_init_file.cypherl"),
]
test_init_file(tester_binary, memgraph_args_with_init_file)
memgraph_args_with_init_data_file = memgraph_args + [
"--init-data-file",
os.path.join(os.getcwd(), "dummy_init_data_file.cypherl"),
]
test_init_data_file(flag_checker_binary, memgraph_args_with_init_data_file)
memgraph_args_with_init_file_and_init_data_file = memgraph_args + [
"--init-file",
os.path.join(os.getcwd(), "dummy_init_file.cypherl"),
"--init-data-file",
os.path.join(os.getcwd(), "dummy_init_data_file.cypherl"),
]
test_init_and_init_data_file(flag_checker_binary, tester_binary, memgraph_args_with_init_file_and_init_data_file)
print("\033[1;36m~~ Ended env variable check test ~~\033[0m")
os.remove(os.path.join(os.getcwd(), "dummy_init_data_file.cypherl"))
os.remove(os.path.join(os.getcwd(), "dummy_init_file.cypherl"))
if __name__ == "__main__":
memgraph_binary = os.path.join(PROJECT_DIR, "build", "memgraph")
tester_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "flag_check", "tester")
flag_checker_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "flag_check", "flag_check")
parser = argparse.ArgumentParser()
parser.add_argument("--memgraph", default=memgraph_binary)
parser.add_argument("--tester", default=tester_binary)
parser.add_argument("--flag_checker", default=flag_checker_binary)
args = parser.parse_args()
execute_test(args.memgraph, args.tester, args.flag_checker)
sys.exit(0)
| null |
1,736 |
import os
import sys
from os.path import dirname, join, realpath
from typing import Type
from prompt_toolkit.shortcuts import input_dialog, message_dialog
from prompt_toolkit.styles import Style
from hummingbot import root_path
from hummingbot.client.config.conf_migration import migrate_configs, migrate_non_secure_configs_only
from hummingbot.client.config.config_crypt import BaseSecretsManager, store_password_verification
from hummingbot.client.config.security import Security
from hummingbot.client.settings import CONF_DIR_PATH
sys.path.insert(0, str(root_path()))
with open(realpath(join(dirname(__file__), '../../VERSION'))) as version_file:
version = version_file.read().strip()
def login_prompt(secrets_manager_cls: Type[BaseSecretsManager], style: Style):
err_msg = None
secrets_manager = None
if Security.new_password_required():
if METHOD_NAME():
secrets_manager = migrate_configs_prompt(secrets_manager_cls, style)
else:
show_welcome(style)
password = input_dialog(
title="Set Password",
text="""
Create a password to protect your sensitive data.
This password is not shared with us nor with anyone else, so please store it securely.
Enter your new password:""",
password=True,
style=style).run()
if password is None:
return None
if password == str():
err_msg = "The password must not be empty."
else:
re_password = input_dialog(
title="Set Password",
text="Please re-enter your password:",
password=True,
style=style).run()
if re_password is None:
return None
if password != re_password:
err_msg = "Passwords entered do not match, please try again."
else:
secrets_manager = secrets_manager_cls(password)
store_password_verification(secrets_manager)
else:
password = input_dialog(
title="Welcome back to Hummingbot",
text="Enter your password:",
password=True,
style=style).run()
if password is None:
return None
secrets_manager = secrets_manager_cls(password)
if err_msg is None and not Security.login(secrets_manager):
err_msg = "Invalid password - please try again."
if err_msg is not None:
message_dialog(
title='Error',
text=err_msg,
style=style).run()
return login_prompt(secrets_manager_cls, style)
return secrets_manager
def METHOD_NAME() -> bool:
encrypted_conf_prefix = "encrypted_"
encrypted_conf_postfix = ".json"
exist = False
for f in sorted(os.listdir(CONF_DIR_PATH)):
f_path = CONF_DIR_PATH / f
if os.path.isfile(f_path) and f.startswith(encrypted_conf_prefix) and f.endswith(encrypted_conf_postfix):
exist = True
break
return exist
def migrate_configs_prompt(secrets_manager_cls: Type[BaseSecretsManager], style: Style) -> BaseSecretsManager:
message_dialog(
title='Configs Migration',
text="""
CONFIGS MIGRATION:
We have recently refactored the way hummingbot handles configurations.
To migrate your legacy configuration files to the new format,
please enter your password on the following screen.
""",
style=style).run()
password = input_dialog(
title="Input Password",
text="\n\nEnter your previous password:",
password=True,
style=style).run()
if password is None:
raise ValueError("Wrong password.")
secrets_manager = secrets_manager_cls(password)
errors = migrate_configs(secrets_manager)
if len(errors) != 0:
_migration_errors_dialog(errors, style)
else:
message_dialog(
title='Configs Migration Success',
text="""
CONFIGS MIGRATION SUCCESS:
The migration process was completed successfully.
""",
style=style).run()
return secrets_manager
def migrate_non_secure_only_prompt(style: Style):
message_dialog(
title='Configs Migration',
text="""
CONFIGS MIGRATION:
We have recently refactored the way hummingbot handles configurations.
We will now attempt to migrate any legacy config files to the new format.
""",
style=style).run()
errors = migrate_non_secure_configs_only()
if len(errors) != 0:
_migration_errors_dialog(errors, style)
else:
message_dialog(
title='Configs Migration Success',
text="""
CONFIGS MIGRATION SUCCESS:
The migration process was completed successfully.
""",
style=style).run()
def _migration_errors_dialog(errors, style: Style):
padding = "\n "
errors_str = padding + padding.join(errors)
message_dialog(
title='Configs Migration Errors',
text=f"""
CONFIGS MIGRATION ERRORS:
{errors_str}
""",
style=style).run()
def show_welcome(style: Style):
message_dialog(
title='Welcome to Hummingbot',
text="""
██╗ ██╗██╗ ██╗███╗ ███╗███╗ ███╗██╗███╗ ██╗ ██████╗ ██████╗ ██████╗ ████████╗
██║ ██║██║ ██║████╗ ████║████╗ ████║██║████╗ ██║██╔════╝ ██╔══██╗██╔═══██╗╚══██╔══╝
███████║██║ ██║██╔████╔██║██╔████╔██║██║██╔██╗ ██║██║ ███╗██████╔╝██║ ██║ ██║
██╔══██║██║ ██║██║╚██╔╝██║██║╚██╔╝██║██║██║╚██╗██║██║ ██║██╔══██╗██║ ██║ ██║
██║ ██║╚██████╔╝██║ ╚═╝ ██║██║ ╚═╝ ██║██║██║ ╚████║╚██████╔╝██████╔╝╚██████╔╝ ██║
╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═╝
=======================================================================================
Version: {version}
Codebase: https://github.com/hummingbot/hummingbot
""".format(version=version),
style=style).run()
message_dialog(
title='Important Warning',
text="""
PLEASE READ THIS CAREFULLY BEFORE USING HUMMINGBOT:
Hummingbot is a free and open source software client that helps you build algorithmic
crypto trading strategies.
Algorithmic crypto trading is a risky activity. You will be building a "bot" that
automatically places orders and trades based on parameters that you set. Please take
the time to understand how each strategy works before you risk real capital with it.
You are solely responsible for the trades that you perform using Hummingbot.
""",
style=style).run()
message_dialog(
title='Important Warning',
text="""
SET A SECURE PASSWORD:
To use Hummingbot, you will need to give it access to your crypto assets by entering
your exchange API keys and/or wallet private keys. These keys are not shared with
anyone, including us.
On the next screen, you will set a password to protect these keys and other sensitive
data. Please store this password safely since there is no way to reset it.
""",
style=style).run()
| null |
1,737 |
# Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from cocotb.types import Bit, Logic
def test_logic_conversions():
l = Logic("0")
assert Logic("l") == l
assert Logic("L") == l
assert Logic(0) == l
assert Logic(False) == l
assert Logic(Logic("0")) == l
l = Logic("1")
assert Logic(1) == l
assert Logic(True) == l
assert Logic("h") == l
assert Logic("H") == l
assert Logic(Logic("1")) == l
l = Logic("X")
assert Logic("x") == l
assert Logic("w") == l
assert Logic("W") == l
assert Logic("u") == l
assert Logic("U") == l
assert Logic("-") == l
assert Logic(Logic("X")) == l
l = Logic("Z")
assert Logic("z") == l
assert Logic(Logic("Z")) == l
for value in ("j", 2, object()):
with pytest.raises(ValueError):
Logic(value)
def test_bit_conversions():
b = Bit(0)
assert Bit(False) == b
assert Bit("0") == b
assert Bit(Bit(0)) == b
b = Bit(1)
assert Bit(True) == b
assert Bit("1") == b
assert Bit(Bit(1)) == b
for value in ("X", 2, object()):
with pytest.raises(ValueError):
Bit(value)
def test_bit_logic_conversions():
Logic(Bit(0))
Logic(Bit(1))
Bit(Logic(0))
Bit(Logic(1))
with pytest.raises(ValueError):
Bit(Logic("X"))
with pytest.raises(ValueError):
Bit(Logic("Z"))
def test_logic_equality():
assert Logic(0) == Logic("0")
assert Logic(0) != Logic("X")
assert Logic(0) != object()
def test_bit_equality():
assert Bit(0) == Bit(False)
assert Bit(1) != Bit("0")
assert Bit(1) != object()
def test_logic_bit_equality():
assert Logic(0) == Bit(0)
assert Logic(1) == Bit(1)
def test_logic_hashability():
s = {Logic("0"), Logic("1"), Logic("X"), Logic("Z")}
assert len(s) == 4
def test_bit_hashability():
s = {Bit(0), Bit(1)}
assert len(s) == 2
def test_logic_bit_hashability():
s = {Logic("0"), Logic("1"), Logic("X"), Logic("Z"), Bit("0"), Bit("1")}
assert len(s) == 4
def test_logic_default_value():
assert Logic() == Logic("X")
def test_bit_default_value():
assert Bit() == Bit("0")
def test_logic_bool_conversions():
assert bool(Logic("1")) is True
assert bool(Logic("0")) is False
with pytest.raises(ValueError):
bool(Logic("X"))
with pytest.raises(ValueError):
bool(Logic("Z"))
def test_bit_bool_conversions():
assert bool(Bit(1)) is True
assert bool(Bit(0)) is False
def test_logic_str_conversions():
assert str(Logic("0")) == "0"
assert str(Logic("1")) == "1"
assert str(Logic("X")) == "X"
assert str(Logic("Z")) == "Z"
def test_bit_str_conversions():
assert str(Bit(0)) == "0"
assert str(Bit(1)) == "1"
def test_logic_int_conversions():
assert int(Logic("0")) == 0
assert int(Logic("1")) == 1
with pytest.raises(ValueError):
int(Logic("X"))
with pytest.raises(ValueError):
int(Logic("Z"))
def test_bit_int_conversions():
assert int(Bit("0")) == 0
assert int(Bit("1")) == 1
def test_logic_repr():
assert eval(repr(Logic("0"))) == Logic("0")
assert eval(repr(Logic("1"))) == Logic("1")
assert eval(repr(Logic("X"))) == Logic("X")
assert eval(repr(Logic("Z"))) == Logic("Z")
def test_bit_repr():
assert eval(repr(Bit("0"))) == Bit("0")
assert eval(repr(Bit("1"))) == Bit("1")
def test_logic_and():
# will not be exhaustive
assert Logic("0") & Logic("Z") == Logic(0)
assert Logic(1) & Logic("1") == Logic(1)
assert Logic("X") & Logic("Z") == Logic("X")
with pytest.raises(TypeError):
Logic("1") & 8
with pytest.raises(TypeError):
8 & Logic("1")
def test_bit_and():
assert Bit("0") & Bit("1") == Bit(0)
assert Bit(1) & Bit("1") == Bit(1)
with pytest.raises(TypeError):
Bit("1") & 8
with pytest.raises(TypeError):
8 & Bit("1")
def test_logic_bit_and():
r = Logic(0) & Bit(1)
assert type(r) == Logic
assert r == Logic(0)
r = Bit(1) & Logic(0)
assert type(r) == Logic
assert r == Logic(0)
def test_logic_or():
# will not be exhaustive
assert Logic("1") | Logic("Z") == Logic("1")
assert Logic(0) | Logic("0") == Logic(0)
assert Logic("X") | Logic("Z") == Logic("X")
with pytest.raises(TypeError):
8 | Logic(0)
with pytest.raises(TypeError):
Logic(0) | 8
def METHOD_NAME():
assert Bit("0") | Bit("1") == Bit(1)
assert Bit(0) | Bit(False) == Bit(0)
with pytest.raises(TypeError):
8 | Bit(0)
with pytest.raises(TypeError):
Bit(0) | 8
def test_logic_bit_or():
r = Logic(0) | Bit(1)
assert type(r) == Logic
assert r == Logic(1)
r = Bit(1) | Logic(0)
assert type(r) == Logic
assert r == Logic(1)
def test_logic_xor():
# will not be exhaustive
assert (Logic("1") ^ Logic(True)) == Logic(0)
assert (Logic(1) ^ Logic("X")) == Logic("X")
assert (Logic(1) ^ Logic(False)) == Logic(1)
with pytest.raises(TypeError):
Logic(1) ^ ()
with pytest.raises(TypeError):
() ^ Logic(1)
def test_bit_xor():
assert Bit(0) ^ Bit("1") == Bit(1)
assert Bit(False) ^ Bit(0) == Bit("0")
with pytest.raises(TypeError):
Bit(1) ^ ()
with pytest.raises(TypeError):
() ^ Bit(1)
def test_logic_bit_xor():
r = Logic(0) ^ Bit(1)
assert type(r) == Logic
assert r == Logic(1)
r = Bit(0) ^ Logic(0)
assert type(r) == Logic
assert r == Logic(0)
def test_logic_invert():
assert ~Logic(0) == Logic(1)
assert ~Logic(1) == Logic(0)
assert ~Logic("X") == Logic("X")
assert ~Logic("Z") == Logic("X")
def test_bit_invert():
assert ~Bit(0) == Bit(1)
assert ~Bit(1) == Bit(0)
def test_logic_identity():
assert Logic(0) is Logic(False)
assert Logic("1") is Logic(1)
assert Logic("X") is Logic("x")
assert Logic("z") is Logic("Z")
def test_bit_identity():
assert Bit(0) is Bit(False)
assert Bit(Logic(1)) is Bit("1")
| null |
1,738 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class ModifyDomainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'ModifyDomain','waf')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IpFollowStatus(self): # Integer
return self.get_query_params().get('IpFollowStatus')
def set_IpFollowStatus(self, IpFollowStatus): # Integer
self.add_query_param('IpFollowStatus', IpFollowStatus)
def get_Keepalive(self): # Boolean
return self.get_query_params().get('Keepalive')
def set_Keepalive(self, Keepalive): # Boolean
self.add_query_param('Keepalive', Keepalive)
def get_SniHost(self): # String
return self.get_query_params().get('SniHost')
def set_SniHost(self, SniHost): # String
self.add_query_param('SniHost', SniHost)
def get_HttpPort(self): # String
return self.get_query_params().get('HttpPort')
def set_HttpPort(self, HttpPort): # String
self.add_query_param('HttpPort', HttpPort)
def get_Http2Port(self): # String
return self.get_query_params().get('Http2Port')
def set_Http2Port(self, Http2Port): # String
self.add_query_param('Http2Port', Http2Port)
def get_WriteTime(self): # Integer
return self.get_query_params().get('WriteTime')
def set_WriteTime(self, WriteTime): # Integer
self.add_query_param('WriteTime', WriteTime)
def get_AccessHeaderMode(self): # Integer
return self.get_query_params().get('AccessHeaderMode')
def METHOD_NAME(self, AccessHeaderMode): # Integer
self.add_query_param('AccessHeaderMode', AccessHeaderMode)
def get_AccessHeaders(self): # String
return self.get_query_params().get('AccessHeaders')
def set_AccessHeaders(self, AccessHeaders): # String
self.add_query_param('AccessHeaders', AccessHeaders)
def get_KeepaliveTimeout(self): # Integer
return self.get_query_params().get('KeepaliveTimeout')
def set_KeepaliveTimeout(self, KeepaliveTimeout): # Integer
self.add_query_param('KeepaliveTimeout', KeepaliveTimeout)
def get_ClusterType(self): # Integer
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # Integer
self.add_query_param('ClusterType', ClusterType)
def get_HttpsRedirect(self): # Integer
return self.get_query_params().get('HttpsRedirect')
def set_HttpsRedirect(self, HttpsRedirect): # Integer
self.add_query_param('HttpsRedirect', HttpsRedirect)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_ReadTime(self): # Integer
return self.get_query_params().get('ReadTime')
def set_ReadTime(self, ReadTime): # Integer
self.add_query_param('ReadTime', ReadTime)
def get_HttpsPort(self): # String
return self.get_query_params().get('HttpsPort')
def set_HttpsPort(self, HttpsPort): # String
self.add_query_param('HttpsPort', HttpsPort)
def get_SniStatus(self): # Integer
return self.get_query_params().get('SniStatus')
def set_SniStatus(self, SniStatus): # Integer
self.add_query_param('SniStatus', SniStatus)
def get_Retry(self): # Boolean
return self.get_query_params().get('Retry')
def set_Retry(self, Retry): # Boolean
self.add_query_param('Retry', Retry)
def get_KeepaliveRequests(self): # Integer
return self.get_query_params().get('KeepaliveRequests')
def set_KeepaliveRequests(self, KeepaliveRequests): # Integer
self.add_query_param('KeepaliveRequests', KeepaliveRequests)
def get_AccessType(self): # String
return self.get_query_params().get('AccessType')
def set_AccessType(self, AccessType): # String
self.add_query_param('AccessType', AccessType)
def get_LogHeaders(self): # String
return self.get_query_params().get('LogHeaders')
def set_LogHeaders(self, LogHeaders): # String
self.add_query_param('LogHeaders', LogHeaders)
def get_ConnectionTime(self): # Integer
return self.get_query_params().get('ConnectionTime')
def set_ConnectionTime(self, ConnectionTime): # Integer
self.add_query_param('ConnectionTime', ConnectionTime)
def get_CloudNativeInstances(self): # String
return self.get_query_params().get('CloudNativeInstances')
def set_CloudNativeInstances(self, CloudNativeInstances): # String
self.add_query_param('CloudNativeInstances', CloudNativeInstances)
def get_SourceIps(self): # String
return self.get_query_params().get('SourceIps')
def set_SourceIps(self, SourceIps): # String
self.add_query_param('SourceIps', SourceIps)
def get_IsAccessProduct(self): # Integer
return self.get_query_params().get('IsAccessProduct')
def set_IsAccessProduct(self, IsAccessProduct): # Integer
self.add_query_param('IsAccessProduct', IsAccessProduct)
def get_LoadBalancing(self): # Integer
return self.get_query_params().get('LoadBalancing')
def set_LoadBalancing(self, LoadBalancing): # Integer
self.add_query_param('LoadBalancing', LoadBalancing)
def get_HttpToUserIp(self): # Integer
return self.get_query_params().get('HttpToUserIp')
def set_HttpToUserIp(self, HttpToUserIp): # Integer
self.add_query_param('HttpToUserIp', HttpToUserIp)
| null |
1,739 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateFile')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_FileType(self): # Integer
return self.get_body_params().get('FileType')
def set_FileType(self, FileType): # Integer
self.add_body_params('FileType', FileType)
def get_DependentNodeIdList(self): # String
return self.get_body_params().get('DependentNodeIdList')
def set_DependentNodeIdList(self, DependentNodeIdList): # String
self.add_body_params('DependentNodeIdList', DependentNodeIdList)
def get_Content(self): # String
return self.get_body_params().get('Content')
def set_Content(self, Content): # String
self.add_body_params('Content', Content)
def get_ProjectIdentifier(self): # String
return self.get_body_params().get('ProjectIdentifier')
def set_ProjectIdentifier(self, ProjectIdentifier): # String
self.add_body_params('ProjectIdentifier', ProjectIdentifier)
def get_ResourceGroupId(self): # Long
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # Long
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_StartImmediately(self): # Boolean
return self.get_body_params().get('StartImmediately')
def set_StartImmediately(self, StartImmediately): # Boolean
self.add_body_params('StartImmediately', StartImmediately)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_AdvancedSettings(self): # String
return self.get_body_params().get('AdvancedSettings')
def set_AdvancedSettings(self, AdvancedSettings): # String
self.add_body_params('AdvancedSettings', AdvancedSettings)
def get_StartEffectDate(self): # Long
return self.get_body_params().get('StartEffectDate')
def set_StartEffectDate(self, StartEffectDate): # Long
self.add_body_params('StartEffectDate', StartEffectDate)
def get_CycleType(self): # String
return self.get_body_params().get('CycleType')
def set_CycleType(self, CycleType): # String
self.add_body_params('CycleType', CycleType)
def get_Owner(self): # String
return self.get_body_params().get('Owner')
def set_Owner(self, Owner): # String
self.add_body_params('Owner', Owner)
def get_AutoRerunIntervalMillis(self): # Integer
return self.get_body_params().get('AutoRerunIntervalMillis')
def set_AutoRerunIntervalMillis(self, AutoRerunIntervalMillis): # Integer
self.add_body_params('AutoRerunIntervalMillis', AutoRerunIntervalMillis)
def get_InputList(self): # String
return self.get_body_params().get('InputList')
def set_InputList(self, InputList): # String
self.add_body_params('InputList', InputList)
def get_CreateFolderIfNotExists(self): # Boolean
return self.get_body_params().get('CreateFolderIfNotExists')
def set_CreateFolderIfNotExists(self, CreateFolderIfNotExists): # Boolean
self.add_body_params('CreateFolderIfNotExists', CreateFolderIfNotExists)
def get_RerunMode(self): # String
return self.get_body_params().get('RerunMode')
def set_RerunMode(self, RerunMode): # String
self.add_body_params('RerunMode', RerunMode)
def get_ConnectionName(self): # String
return self.get_body_params().get('ConnectionName')
def set_ConnectionName(self, ConnectionName): # String
self.add_body_params('ConnectionName', ConnectionName)
def get_OutputParameters(self): # String
return self.get_body_params().get('OutputParameters')
def set_OutputParameters(self, OutputParameters): # String
self.add_body_params('OutputParameters', OutputParameters)
def get_ParaValue(self): # String
return self.get_body_params().get('ParaValue')
def set_ParaValue(self, ParaValue): # String
self.add_body_params('ParaValue', ParaValue)
def get_ResourceGroupIdentifier(self): # String
return self.get_body_params().get('ResourceGroupIdentifier')
def set_ResourceGroupIdentifier(self, ResourceGroupIdentifier): # String
self.add_body_params('ResourceGroupIdentifier', ResourceGroupIdentifier)
def get_AutoRerunTimes(self): # Integer
return self.get_body_params().get('AutoRerunTimes')
def set_AutoRerunTimes(self, AutoRerunTimes): # Integer
self.add_body_params('AutoRerunTimes', AutoRerunTimes)
def get_CronExpress(self): # String
return self.get_body_params().get('CronExpress')
def set_CronExpress(self, CronExpress): # String
self.add_body_params('CronExpress', CronExpress)
def get_IgnoreParentSkipRunningProperty(self): # Boolean
return self.get_body_params().get('IgnoreParentSkipRunningProperty')
def set_IgnoreParentSkipRunningProperty(self, IgnoreParentSkipRunningProperty): # Boolean
self.add_body_params('IgnoreParentSkipRunningProperty', IgnoreParentSkipRunningProperty)
def get_EndEffectDate(self): # Long
return self.get_body_params().get('EndEffectDate')
def set_EndEffectDate(self, EndEffectDate): # Long
self.add_body_params('EndEffectDate', EndEffectDate)
def METHOD_NAME(self): # String
return self.get_body_params().get('FileName')
def set_FileName(self, FileName): # String
self.add_body_params('FileName', FileName)
def get_InputParameters(self): # String
return self.get_body_params().get('InputParameters')
def set_InputParameters(self, InputParameters): # String
self.add_body_params('InputParameters', InputParameters)
def get_Stop(self): # Boolean
return self.get_body_params().get('Stop')
def set_Stop(self, Stop): # Boolean
self.add_body_params('Stop', Stop)
def get_DependentType(self): # String
return self.get_body_params().get('DependentType')
def set_DependentType(self, DependentType): # String
self.add_body_params('DependentType', DependentType)
def get_FileFolderPath(self): # String
return self.get_body_params().get('FileFolderPath')
def set_FileFolderPath(self, FileFolderPath): # String
self.add_body_params('FileFolderPath', FileFolderPath)
def get_FileDescription(self): # String
return self.get_body_params().get('FileDescription')
def set_FileDescription(self, FileDescription): # String
self.add_body_params('FileDescription', FileDescription)
def get_AutoParsing(self): # Boolean
return self.get_body_params().get('AutoParsing')
def set_AutoParsing(self, AutoParsing): # Boolean
self.add_body_params('AutoParsing', AutoParsing)
def get_SchedulerType(self): # String
return self.get_body_params().get('SchedulerType')
def set_SchedulerType(self, SchedulerType): # String
self.add_body_params('SchedulerType', SchedulerType)
| null |
1,740 |
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from models import SOKDenseDemo
import argparse
import sys
sys.path.append("../")
import utility
from utility import sparse_operation_kit as sok
import nvtx
def main(args):
strategy = tf.distribute.MirroredStrategy()
dataset = utility.TFDataset(
filename=args.data_filename,
batchsize=args.global_batch_size,
as_sparse_tensor=False,
repeat=1,
)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
sok.Init(global_batch_size=args.global_batch_size)
model = SOKDenseDemo(
max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,
embedding_vec_size=args.embedding_vec_size,
slot_num=args.slot_num,
nnz_per_slot=args.nnz_per_slot,
num_dense_layers=args.num_dense_layers,
)
embedding_optimizer = utility.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)
dense_optimizer = utility.get_dense_optimizer(args.optimizer)(learning_rate=0.1)
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)
@tf.function
def METHOD_NAME(inputs, labels):
with tf.GradientTape() as tape:
logit = model(inputs, training=True)
loss = _replica_loss(labels, logit)
emb_variable, other_variable = sok.split_embedding_variable_from_others(
model.trainable_variables
)
grads, emb_grads = tape.gradient(loss, [other_variable, emb_variable])
if "plugin" not in args.optimizer:
with sok.OptimizerScope(emb_variable):
embedding_optimizer.apply_gradients(
zip(emb_grads, emb_variable), experimental_aggregate_gradients=False
)
else:
embedding_optimizer.apply_gradients(
zip(emb_grads, emb_variable), experimental_aggregate_gradients=False
)
dense_optimizer.apply_gradients(zip(grads, other_variable))
return loss
for i, (inputs, labels) in enumerate(dataset):
if args.stop_at_iter > 0 and i >= args.stop_at_iter:
break
rng = nvtx.start_range(message="Iteration_" + str(i), color="blue")
replica_loss = strategy.run(METHOD_NAME, args=(inputs, labels))
loss = strategy.reduce(tf.distribute.ReduceOp.SUM, replica_loss, axis=None)
nvtx.end_range(rng)
print("[INFO]: Iteration: {}, loss={}".format(i, loss))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="run DNN model with SparseOperationKit")
parser.add_argument(
"--data_filename", type=str, help="the filename of training data", required=True
)
parser.add_argument("--global_batch_size", type=int, required=True)
parser.add_argument("--max_vocabulary_size_per_gpu", type=int, required=True)
parser.add_argument("--slot_num", type=int, required=True, help="the number of feature fields")
parser.add_argument(
"--nnz_per_slot", type=int, required=True, help="the number of keys in each slot"
)
parser.add_argument(
"--num_dense_layers",
type=int,
required=True,
help="the number of fully connected layers in this DNN model",
)
parser.add_argument(
"--embedding_vec_size", type=int, required=True, help="the dimension of embedding vectors"
)
parser.add_argument(
"--optimizer",
type=str,
help="use what optimizer",
required=False,
default="plugin_adam",
choices=["plugin_adam", "adam", "sgd"],
)
parser.add_argument(
"--stop_at_iter",
type=int,
required=False,
help="early stop the process if iteration reaches this setting.",
default=-1,
)
args = parser.parse_args()
main(args)
| null |
1,741 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class ExportSuspEventsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'ExportSuspEvents')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TimeEnd(self): # String
return self.get_query_params().get('TimeEnd')
def set_TimeEnd(self, TimeEnd): # String
self.add_query_param('TimeEnd', TimeEnd)
def get_TargetType(self): # String
return self.get_query_params().get('TargetType')
def set_TargetType(self, TargetType): # String
self.add_query_param('TargetType', TargetType)
def get_Remark(self): # String
return self.get_query_params().get('Remark')
def set_Remark(self, Remark): # String
self.add_query_param('Remark', Remark)
def get_ContainerFieldName(self): # String
return self.get_query_params().get('ContainerFieldName')
def set_ContainerFieldName(self, ContainerFieldName): # String
self.add_query_param('ContainerFieldName', ContainerFieldName)
def get_SourceIp(self): # String
return self.get_query_params().get('SourceIp')
def set_SourceIp(self, SourceIp): # String
self.add_query_param('SourceIp', SourceIp)
def get_ContainerFieldValue(self): # String
return self.get_query_params().get('ContainerFieldValue')
def set_ContainerFieldValue(self, ContainerFieldValue): # String
self.add_query_param('ContainerFieldValue', ContainerFieldValue)
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_From(self): # String
return self.get_query_params().get('From')
def set_From(self, _From): # String
self.add_query_param('From', _From)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Dealed(self): # String
return self.get_query_params().get('Dealed')
def set_Dealed(self, Dealed): # String
self.add_query_param('Dealed', Dealed)
def get_CurrentPage(self): # String
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # String
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_AssetsTypeLists(self): # RepeatList
return self.get_query_params().get('AssetsTypeList')
def set_AssetsTypeLists(self, AssetsTypeList): # RepeatList
for depth1 in range(len(AssetsTypeList)):
self.add_query_param('AssetsTypeList.' + str(depth1 + 1), AssetsTypeList[depth1])
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_TimeStart(self): # String
return self.get_query_params().get('TimeStart')
def set_TimeStart(self, TimeStart): # String
self.add_query_param('TimeStart', TimeStart)
def get_Levels(self): # String
return self.get_query_params().get('Levels')
def set_Levels(self, Levels): # String
self.add_query_param('Levels', Levels)
def METHOD_NAME(self): # String
return self.get_query_params().get('ParentEventTypes')
def set_ParentEventTypes(self, ParentEventTypes): # String
self.add_query_param('ParentEventTypes', ParentEventTypes)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
| null |
1,742 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateCmsOrderRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'CreateCmsOrder','cms')
self.set_method('POST')
def get_SmsCount(self): # String
return self.get_query_params().get('SmsCount')
def set_SmsCount(self, SmsCount): # String
self.add_query_param('SmsCount', SmsCount)
def get_AutoUseCoupon(self): # Boolean
return self.get_query_params().get('AutoUseCoupon')
def set_AutoUseCoupon(self, AutoUseCoupon): # Boolean
self.add_query_param('AutoUseCoupon', AutoUseCoupon)
def get_LogMonitorStream(self): # String
return self.get_query_params().get('LogMonitorStream')
def set_LogMonitorStream(self, LogMonitorStream): # String
self.add_query_param('LogMonitorStream', LogMonitorStream)
def get_CustomTimeSeries(self): # String
return self.get_query_params().get('CustomTimeSeries')
def set_CustomTimeSeries(self, CustomTimeSeries): # String
self.add_query_param('CustomTimeSeries', CustomTimeSeries)
def get_ApiCount(self): # String
return self.get_query_params().get('ApiCount')
def set_ApiCount(self, ApiCount): # String
self.add_query_param('ApiCount', ApiCount)
def get_PhoneCount(self): # String
return self.get_query_params().get('PhoneCount')
def set_PhoneCount(self, PhoneCount): # String
self.add_query_param('PhoneCount', PhoneCount)
def get_AutoRenewPeriod(self): # Integer
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self, AutoRenewPeriod): # Integer
self.add_query_param('AutoRenewPeriod', AutoRenewPeriod)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def METHOD_NAME(self): # String
return self.get_query_params().get('SuggestType')
def set_SuggestType(self, SuggestType): # String
self.add_query_param('SuggestType', SuggestType)
def get_EventStoreNum(self): # String
return self.get_query_params().get('EventStoreNum')
def set_EventStoreNum(self, EventStoreNum): # String
self.add_query_param('EventStoreNum', EventStoreNum)
def get_SiteTaskNum(self): # String
return self.get_query_params().get('SiteTaskNum')
def set_SiteTaskNum(self, SiteTaskNum): # String
self.add_query_param('SiteTaskNum', SiteTaskNum)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_SiteOperatorNum(self): # String
return self.get_query_params().get('SiteOperatorNum')
def set_SiteOperatorNum(self, SiteOperatorNum): # String
self.add_query_param('SiteOperatorNum', SiteOperatorNum)
def get_SiteEcsNum(self): # String
return self.get_query_params().get('SiteEcsNum')
def set_SiteEcsNum(self, SiteEcsNum): # String
self.add_query_param('SiteEcsNum', SiteEcsNum)
def get_EventStoreTime(self): # String
return self.get_query_params().get('EventStoreTime')
def set_EventStoreTime(self, EventStoreTime): # String
self.add_query_param('EventStoreTime', EventStoreTime)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
| null |
1,743 |
"""Tools to open ``*.py`` files as Unicode.
Uses the encoding specified within the file, as per PEP 263.
Much of the code is taken from the tokenize module in Python 3.2.
This file was forked from the IPython project:
* Copyright (c) 2008-2014, IPython Development Team
* Copyright (C) 2001-2007 Fernando Perez <[email protected]>
* Copyright (c) 2001, Janko Hauser <[email protected]>
* Copyright (c) 2001, Nathaniel Gray <[email protected]>
"""
import io
import re
from xonsh.lazyasd import LazyObject
from xonsh.tokenize import detect_encoding, tokopen
cookie_comment_re = LazyObject(
lambda: re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE),
globals(),
"cookie_comment_re",
)
def source_to_unicode(txt, errors="replace", skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, str):
return txt
if isinstance(txt, bytes):
buf = io.BytesIO(txt)
else:
buf = txt
try:
encoding, _ = detect_encoding(buf.readline)
except SyntaxError:
encoding = "ascii"
buf.seek(0)
text = io.TextIOWrapper(buf, encoding, errors=errors, line_buffering=True)
text.mode = "r"
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read()
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
yield from it
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
with tokopen(filename) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read()
def read_py_url(url, errors="replace", skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
# Deferred import for faster start
try:
from urllib.request import urlopen # Py 3
except ImportError:
from urllib import urlopen
response = urlopen(url)
buf = io.BytesIO(response.read())
return source_to_unicode(buf, errors, skip_encoding_cookie)
def METHOD_NAME(x):
"""Given a list, returns a readline() function that returns the next element
with each call.
"""
x = iter(x)
def readline():
return next(x)
return readline
| null |
1,744 |
# Copyright 2022 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import sys
import pytest
from mgclient import DatabaseError
from common import connect, execute_and_fetch_all, reset_update_permissions
update_property_query = "MATCH (n:update_label) SET n.prop = 2 RETURN n.prop;"
update_properties_query = "MATCH (n:update_label) SET n = {prop: 2, prop2: 3} RETURN n.prop;"
remove_property_query = "MATCH (n:update_label) REMOVE n.prop RETURN n.prop;"
def test_can_read_node_when_given_update_grant():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT UPDATE ON LABELS :update_label TO user;")
test_cursor = connect(username="user", password="test").cursor()
results = execute_and_fetch_all(test_cursor, "MATCH (n:update_label) RETURN n;")
assert len(results) == 1
def test_can_update_node_when_given_update_grant():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT UPDATE ON LABELS :update_label TO user;")
test_cursor = connect(username="user", password="test").cursor()
update_property_actual = execute_and_fetch_all(test_cursor, update_property_query)
update_properties_actual = execute_and_fetch_all(test_cursor, update_properties_query)
remove_property_actual = execute_and_fetch_all(test_cursor, remove_property_query)
assert update_property_actual[0][0] == 2
assert update_properties_actual[0][0] == 2
assert remove_property_actual[0][0] is None
def test_can_not_update_node_when_given_deny():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT READ ON LABELS :update_label TO user;")
test_cursor = connect(username="user", password="test").cursor()
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_property_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_properties_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, remove_property_query)
def test_can_not_update_node_when_given_read():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT READ ON LABELS :update_label TO user;")
test_cursor = connect(username="user", password="test").cursor()
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_property_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_properties_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, remove_property_query)
def test_can_not_update_node_when_given_read_globally():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT READ ON LABELS * TO user;")
test_cursor = connect(username="user", password="test").cursor()
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_property_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, update_properties_query)
with pytest.raises(DatabaseError):
execute_and_fetch_all(test_cursor, remove_property_query)
def test_can_update_node_when_given_update_globally():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT UPDATE ON LABELS * TO user;")
test_cursor = connect(username="user", password="test").cursor()
update_property_actual = execute_and_fetch_all(test_cursor, update_property_query)
update_properties_actual = execute_and_fetch_all(test_cursor, update_properties_query)
remove_property_actual = execute_and_fetch_all(test_cursor, remove_property_query)
assert update_property_actual[0][0] == 2
assert update_properties_actual[0][0] == 2
assert remove_property_actual[0][0] is None
def METHOD_NAME():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT CREATE_DELETE ON LABELS * TO user;")
test_cursor = connect(username="user", password="test").cursor()
update_property_actual = execute_and_fetch_all(test_cursor, update_property_query)
update_properties_actual = execute_and_fetch_all(test_cursor, update_properties_query)
remove_property_actual = execute_and_fetch_all(test_cursor, remove_property_query)
assert update_property_actual[0][0] == 2
assert update_properties_actual[0][0] == 2
assert remove_property_actual[0][0] is None
def test_can_update_node_when_given_create_delete():
admin_cursor = connect(username="admin", password="test").cursor()
reset_update_permissions(admin_cursor)
execute_and_fetch_all(admin_cursor, "GRANT CREATE_DELETE ON LABELS :update_label TO user;")
test_cursor = connect(username="user", password="test").cursor()
update_property_actual = execute_and_fetch_all(test_cursor, update_property_query)
update_properties_actual = execute_and_fetch_all(test_cursor, update_properties_query)
remove_property_actual = execute_and_fetch_all(test_cursor, remove_property_query)
assert update_property_actual[0][0] == 2
assert update_properties_actual[0][0] == 2
assert remove_property_actual[0][0] is None
if __name__ == "__main__":
sys.exit(pytest.main([__file__, "-rA"]))
| null |
1,745 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import math
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
PADDING_SYMBOL = 0
DECODER_START_SYMBOL = 1
class Seq2SlateMode(Enum):
RANK_MODE = "rank"
PER_SEQ_LOG_PROB_MODE = "per_sequence_log_prob"
PER_SYMBOL_LOG_PROB_DIST_MODE = "per_symbol_log_prob_dist"
DECODE_ONE_STEP_MODE = "decode_one_step"
ENCODER_SCORE_MODE = "encoder_score_mode"
class Seq2SlateOutputArch(Enum):
# Only output encoder scores
ENCODER_SCORE = "encoder_score"
# A decoder outputs a sequence in an autoregressive way
AUTOREGRESSIVE = "autoregressive"
# Using encoder scores, a decoder outputs a sequence using
# frechet sort (equivalent to iterative softmax)
FRECHET_SORT = "frechet_sort"
def print_model_info(seq2slate):
def _num_of_params(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {_num_of_params(seq2slate)}")
logger.info(f"Num of Encoder params: {_num_of_params(seq2slate.encoder)}")
logger.info(
f"Num of Candidate Embedder params: {_num_of_params(seq2slate.candidate_embedder)}"
)
logger.info(
f"Num of State Embedder params: {_num_of_params(seq2slate.state_embedder)}"
)
if seq2slate.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}"
)
elif seq2slate.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
logger.info(
f"Num of Positional Encoding params: {_num_of_params(seq2slate.positional_encoding_decoder)}"
)
logger.info(f"Num of Decoder params: {_num_of_params(seq2slate.decoder)}")
elif seq2slate.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}"
)
def mask_logits_by_idx(logits, tgt_in_idx):
# logits shape: batch_size, seq_len, candidate_size
# tgt_in_idx shape: batch_size, seq_len
# the first two symbols are reserved for padding and decoder-starting symbols
# so they should never be a possible output label
logits[:, :, :2] = float("-inf")
batch_size, seq_len = tgt_in_idx.shape
mask_indices = torch.tril(
tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), diagonal=0
)
logits = logits.scatter(2, mask_indices, float("-inf"))
return logits
def subsequent_mask(size: int, device: torch.device):
"""
Mask out subsequent positions. Mainly used in the decoding process,
in which an item should not attend subsequent items.
mask_ijk = 0 if the item should be ignored; 1 if the item should be paid attention
"""
subsequent_mask = ~torch.triu(
torch.ones(1, size, size, device=device, dtype=torch.bool), diagonal=1
)
return subsequent_mask
# TODO (@czxttkl): use when we introduce padding
def subsequent_and_padding_mask(tgt_in_idx):
"""Create a mask to hide padding and future items"""
# tgt_in_idx shape: batch_size, seq_len
# tgt_tgt_mask shape: batch_size, 1, seq_len
tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8)
# subseq_mask shape: 1, seq_len, seq_len
subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device)
# tgt_tgt_mask shape: batch_size, seq_len, seq_len
tgt_tgt_mask = tgt_tgt_mask & subseq_mask
return tgt_tgt_mask
def clones(module, N):
"""
Produce N identical layers.
:param module: nn.Module class
:param N: number of copies
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask, d_k):
"""Scaled Dot Product Attention"""
# mask shape: batch_size x 1 x seq_len x seq_len
# scores shape: batch_size x num_heads x seq_len x seq_len
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
scores = scores.masked_fill(mask == 0, float("-inf"))
# p_attn shape: batch_size x num_heads x seq_len x seq_len
p_attn = F.softmax(scores, dim=3)
# attn shape: batch_size x num_heads x seq_len x d_k
attn = torch.matmul(p_attn, value)
return attn, p_attn
def METHOD_NAME(per_symbol_log_probs, tgt_out_idx):
"""Gather per-symbol log probabilities into per-seq log probabilities"""
# per_symbol_log_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# per_symbol_log_probs is log probability of each symbol in the tgt_out_idx
# shape: batch_size, seq_len
log_probs = torch.gather(per_symbol_log_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(
2
)
# shape: batch_size, 1
return log_probs.sum(dim=1, keepdim=True)
def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):
"""Gather per-symbol probabilities into per-seq probabilities"""
# per_symbol_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# output shape: batch_size, 1
return torch.clamp(
torch.prod(
torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(2),
dim=1,
keepdim=True,
),
# prevent zero probabilities, which cause torch.log return -inf
min=1e-40,
)
def pytorch_decoder_mask(
memory: torch.Tensor, tgt_in_idx: torch.Tensor, num_heads: int
):
"""
Compute the masks used in the PyTorch Transformer-based decoder for
self-attention and attention over encoder outputs
mask_ijk = 1 if the item should be ignored; 0 if the item should be paid attention
Input:
memory shape: batch_size, src_seq_len, dim_model
tgt_in_idx (+2 offseted) shape: batch_size, tgt_seq_len
Return:
tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len
tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len
"""
batch_size, src_seq_len, _ = memory.shape
tgt_seq_len = tgt_in_idx.shape[1]
device = memory.device
mask_indices = torch.tril(
tgt_in_idx.repeat(1, tgt_seq_len).reshape(batch_size, tgt_seq_len, tgt_seq_len),
diagonal=0,
).to(device)
tgt_src_mask_augmented = torch.zeros(
batch_size, tgt_seq_len, src_seq_len + 2, dtype=torch.bool, device=device
).scatter(2, mask_indices, 1)
tgt_src_mask = tgt_src_mask_augmented[:, :, 2:].repeat_interleave(num_heads, dim=0)
tgt_tgt_mask = (subsequent_mask(tgt_seq_len, device) == 0).repeat(
batch_size * num_heads, 1, 1
)
return tgt_tgt_mask, tgt_src_mask
| null |
1,746 |
# Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from collections import defaultdict
import glob
import os
import unittest
from sublime import active_window
from NeoVintageous.tests.unittest import Region
_path_to_test_specs = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# NOTE
#
# Command tests are declared in a special text format in files with the
# .cmd-test extension. Several tests can be declared in the same file. This
# makes it easier to group tests.
#
# Special attention must be payed to whitespace: it counts for tests.
_TEST_HEADER_DELIM = '***\n' # Comes after the header.
_TEST_DELIM = '\n---///---\n' # Delimits tests.
_TEST_RESULTS_DELIM = '\n---\n' # Separates the test declaration from the expected result.
_CONVERTERS = defaultdict(lambda: (lambda x: str(x))) # type: dict
_CONVERTERS['mode'] = str
_CONVERTERS['count'] = int
def _make_args(args):
arg_dict = {}
for a in args:
name, value = a.split(':', 1)
arg_dict[name] = _CONVERTERS[name](value)
return arg_dict
def _process_notation(text, sel_start_token='^', sel_end_token='$'):
"""
Process @text assuming it contains markers defining selections.
@text
Text that contains @sel_start_token's and @sel_end_token's to define
selection regions.
@sel_start_token
Marks the start of a selection region. Removed from the test.
@sel_end_token
Marks the end of a selection region. Removed from the text.
Reversed selections can be defined too.
Returns (selections, processed_text), where `selections` are valid ST
ranges, and `processed_text` is @text without the special symbols.
"""
deletions = 0
start = None
selections = []
chars = []
pos = 0
while pos < len(text):
c = text[pos]
if c == sel_start_token:
if start == sel_start_token:
raise ValueError('unexpected token %s at %d', c, pos)
if start is None:
start = pos - deletions
else:
selections.append(Region(start, pos - deletions)) # type: ignore[unreachable]
start = None
deletions += 1
elif c == sel_end_token:
if start == sel_end_token:
raise ValueError('unexpected token %s at %d', c, pos)
if start is None:
start = pos - deletions
else:
selections.append(Region(start, pos - deletions))
start = None
deletions += 1
else:
chars.append(c)
pos += 1
if start is not None:
raise ValueError('wrong format, orphan ^ at %d', start + deletions)
return selections, ''.join(chars)
class CommandTest(object):
def __init__(self, cmd_name, args, description, before_text, after_text, file_name, test_nr, options=None):
self.cmd_name = cmd_name
self.args = args
self.description = description
self.before_text = before_text
self.after_text = after_text
self.file_name = file_name
self.test_nr = test_nr
self.options = options
@property
def message(self):
return "Failure in File: {0} Test Nr.: {1} -- {2}".format(self.file_name, self.test_nr, self.description)
@staticmethod
def from_text(text, file_name, test_nr):
"""Create a test instance from a textual representation."""
header, body = text.split(_TEST_HEADER_DELIM, 1)
header, description = header.split('\n', 1)
description, options = CommandTest.process_description(description)
cmd_name, args = header.split(' ', 1)
args = _make_args(args.split())
assert 'mode' in args, 'all commands need to know the current mode'
before, after = body.split(_TEST_RESULTS_DELIM)
return CommandTest(cmd_name, args, description, before, after, file_name, test_nr, options)
@staticmethod
def process_description(text):
lines = text.split('\n')
description = lines
options_line = lines[0]
opts = {} # type: dict
if options_line.startswith('//options: '):
description = lines[1:]
raw_opts = options_line[11:].split()
opts = _make_args(raw_opts)
return '\n'.join(description), opts
def run_with(self, runner):
before_sels, before_text = _process_notation(self.before_text)
runner.append(before_text)
runner.METHOD_NAME(before_sels)
view = runner.view
view.run_command(self.cmd_name, self.args)
after_sels, after_text = _process_notation(self.after_text)
runner.assertEqual(view.substr(Region(0, view.size())), after_text, self.message)
runner.assertEqual(list(view.sel()), after_sels, self.message)
class CommandTestCase(unittest.TestCase):
"""
Runs tests based in cmd-test spec files (cmd-test).
Subclasses must implement setUp() and in it set self.path_to_test_specs.
"""
def get_motion_tests(self):
specs = self.get_tests("*.motion-test")
return specs
def get_action_tests(self):
specs = self.get_tests("*.cmd-test")
return specs
def get_tests(self, ext):
"""Yield `CommandTest`s found under the self.path_to_test_specs dir."""
specs = glob.glob(os.path.join(self.path_to_test_specs, ext + "-solo"))
if specs:
specs = specs[0:1]
else:
specs = glob.glob(os.path.join(self.path_to_test_specs, ext))
return specs
def iter_tests(self):
specs = self.get_motion_tests() + self.get_action_tests()
for spec_path in specs:
spec_path = os.path.abspath(spec_path)
content = None
with open(spec_path, 'rt') as f:
content = f.read()
tests = content.split(_TEST_DELIM)
for i, test in enumerate(tests):
if not test:
continue
yield CommandTest.from_text(test, spec_path, i)
def append(self, text):
self.view.run_command('append', {'characters': text}) # type: ignore[has-type]
def reset(self):
if getattr(self, "view", None):
self.view.close() # type: ignore[has-type]
self.view = active_window().new_file()
self.view.set_scratch(True)
def METHOD_NAME(self, sels):
"""
Enable adding selections to the buffer text using a minilanguage.
S = add empty sel before S and delete S
x = add empty sel before x
v = add sel from before the first 'v' to after the last contiguous 'v'
"""
self.view.sel().clear()
self.view.sel().add_all(sels)
class TestAllCommands(CommandTestCase):
def setUp(self):
self.path_to_test_specs = _path_to_test_specs
def test_all(self):
self.reset()
for test in self.iter_tests():
test.run_with(self)
self.reset()
if self.view.is_scratch():
self.view.close()
def tearDown(self):
if self.view.is_scratch():
self.view.close()
super().tearDown()
| null |
1,747 |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
import time
import urllib3
from requests import ConnectionError
API_METADATA_LOAD = 'metadata/load'
API_METADATA_UPDATE = 'metadata/update'
API_CALL_RETRIES_COUNT = 10
API_CALL_RETRIES_TIMEOUT_SEC = 60.0
class API(object):
def __init__(self, api_host_url, access_key):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.api = api_host_url + '/pipeline/restapi'
self.__headers__ = {'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(access_key)}
def get_url_for_method(self, method):
return '{}/{}'.format(self.api.strip('/'), method)
def get_headers(self):
return self.__headers__
def call(self, method, data=None, params=None, http_method=None, error_message=None, files=None):
retries = API_CALL_RETRIES_COUNT
while retries > 0:
try:
return self.call_plain(method, data, params, http_method, error_message, files)
except ConnectionError:
retries -= 1
print('{} is unreachable, waiting for a timeout and trying again'.format(self.api))
time.sleep(API_CALL_RETRIES_TIMEOUT_SEC)
raise RuntimeError('{} is unreachable and retries count exceeded!'.format(self.api))
def call_plain(self, method, data, params, http_method, error_message, files):
url = '{}/{}'.format(self.api.strip('/'), method)
if not http_method:
if data:
response = requests.post(url, data, headers=self.__headers__, verify=False)
else:
response = requests.get(url, headers=self.__headers__, verify=False)
else:
if http_method.lower() == 'get':
response = requests.get(url, headers=self.__headers__, params=params, verify=False)
elif http_method.lower() == 'put':
response = requests.put(url, data, headers=self.__headers__, params=params, verify=False, files=files)
elif http_method.lower() == 'post':
headers = {}
if files:
headers.update(self.__headers__)
headers.pop('Content-Type')
else:
headers = self.__headers__
response = requests.post(url, data, headers=headers, params=params, verify=False, files=files)
elif http_method.lower() == 'delete':
if data:
response = requests.delete(url, data=data, headers=self.__headers__, verify=False)
else:
response = requests.delete(url, headers=self.__headers__, params=params, verify=False)
else:
if data:
response = requests.post(url, data, headers=self.__headers__, verify=False)
else:
response = requests.get(url, headers=self.__headers__, verify=False)
content_type = response.headers.get('Content-Type')
if content_type.startswith('application/json'):
response_data = json.loads(response.text)
message_text = error_message if error_message else 'Failed to fetch data from server'
if 'status' not in response_data:
raise RuntimeError('{}. Server responded with status: {}.'
.format(message_text, str(response_data.status_code)))
if response_data['status'] != 'OK':
raise RuntimeError('{}. Server responded with message: {}'.format(message_text, response_data['message']))
else:
return response_data
else:
return response.content
def load_entities_metadata(self, entities_ids, entity_class):
data = []
for entity_id in entities_ids:
data.append({'entityId': entity_id, 'entityClass': entity_class})
response = self.call(API_METADATA_LOAD, data=json.dumps(data), http_method='POST')
return self.METHOD_NAME(response, default_value=[])
def upload_metadata(self, metadata_entity):
self.call(API_METADATA_UPDATE, data=json.dumps(metadata_entity), http_method='POST')
@staticmethod
def to_json(obj):
return json.dumps(obj)
@staticmethod
def METHOD_NAME(response, default_value=None):
value = default_value
if 'payload' in response and response['payload']:
value = response['payload']
return value
| null |
1,748 |
import functools
import socket
import sys
from typing import Tuple
from django.conf import settings
from django.core.cache import caches
from django.http import HttpRequest
from django_ratelimit import UNSAFE
from django_ratelimit.core import get_header
from django_ratelimit.decorators import ratelimit
from django_ratelimit.exceptions import Ratelimited
from redis import ConnectionError
def get_user_ip_from_cloudfront_headers(request: HttpRequest) -> str:
"""Make a good key to use for caching the request's IP
CloudFront provides a header that returns the user's IP and port. Weirdly,
the port seems to be random, so we need to strip it to make the user's IP
a consistent key.
So we go from something like:
96.23.39.106:51396
To:
96.23.39.106
:param request: The HTTP request from the user
:return: A simple key that can be used to throttle the user if needed.
"""
header = get_header(request, "CloudFront-Viewer-Address")
return header.split(":")[0]
def get_ip_for_ratelimiter(group: str, request: HttpRequest) -> str:
"""A wrapper to get the IP in a ratelimiter
:param group: Unused: The group key from the ratelimiter
:param request: The HTTP request from the user
:return: A simple key that can be used to throttle the user if needed.
"""
return get_user_ip_from_cloudfront_headers(request)
def get_path_to_make_key(group: str, request: HttpRequest) -> str:
"""Return a string representing the full path to the requested page. This
helper makes a good key to create a global limit to throttle requests.
:param group: Unused: The group key from the ratelimiter
:param request: The HTTP request from the user
:return: A key that can be used to throttle request to a single URL if needed.
"""
return request.path
ratelimiter_all_250_per_h = ratelimit(
key=get_ip_for_ratelimiter,
rate="250/h",
)
# Decorators can't easily be mocked, and we need to not trigger this decorator
# during tests or else the first test works and the rest are blocked. So,
# check if we're doing a test and adjust the decorator accordingly.
if "test" in sys.argv:
ratelimiter_all_2_per_m = lambda func: func
ratelimiter_unsafe_3_per_m = lambda func: func
ratelimiter_unsafe_10_per_m = lambda func: func
ratelimiter_unsafe_2000_per_h = lambda func: func
else:
ratelimiter_all_2_per_m = ratelimit(
key=get_ip_for_ratelimiter,
rate="2/m",
)
ratelimiter_unsafe_3_per_m = ratelimit(
key=get_ip_for_ratelimiter,
rate="3/m",
method=UNSAFE,
)
ratelimiter_unsafe_10_per_m = ratelimit(
key=get_ip_for_ratelimiter,
rate="10/m",
method=UNSAFE,
)
ratelimiter_unsafe_2000_per_h = ratelimit(
key=get_path_to_make_key,
rate="2000/h",
method=UNSAFE,
)
# See: https://www.bing.com/webmaster/help/how-to-verify-bingbot-3905dc26
# and: https://support.google.com/webmasters/answer/80553?hl=en
APPROVED_DOMAINS = [
"google.com",
"googlebot.com",
"search.msn.com",
"localhost", # For dev.
]
def ratelimit_deny_list(view):
"""A wrapper for the ratelimit function that adds an allowlist for approved
crawlers.
"""
ratelimited_view = ratelimiter_all_250_per_h(view)
@functools.wraps(view)
def wrapper(request, *args, **kwargs):
try:
return ratelimited_view(request, *args, **kwargs)
except Ratelimited as e:
if is_allowlisted(request):
return view(request, *args, **kwargs)
else:
raise e
except ConnectionError:
# Unable to connect to redis, let the view proceed this time.
return view(request, *args, **kwargs)
return wrapper
def get_host_from_IP(ip_address: str) -> str:
"""Get the host for an IP address by doing a reverse DNS lookup. Return
the value as a string.
"""
return socket.getfqdn(ip_address)
def get_ip_from_host(host: str) -> str:
"""Do a forward DNS lookup of the host found in step one."""
return socket.gethostbyname(host)
def host_is_approved(host: str) -> bool:
"""Check whether the domain is in our approved allowlist."""
return any(
[
host.endswith(approved_domain)
for approved_domain in APPROVED_DOMAINS
]
)
def verify_ip_address(ip_address: str) -> bool:
"""Do authentication checks for the IP address requesting the page."""
# First we do a rDNS lookup of the IP.
host = get_host_from_IP(ip_address)
# Then we check the returned host to ensure it's an approved crawler
if host_is_approved(host):
# If it's approved, do a forward DNS lookup to get the IP from the host.
# If that matches the original IP, we're good.
if ip_address == get_ip_from_host(host):
# Everything checks out!
return True
return False
def is_allowlisted(request: HttpRequest) -> bool:
"""Checks if the IP address is allowlisted due to belonging to an approved
crawler.
Returns True if so, else False.
"""
cache_name = getattr(settings, "RATELIMIT_USE_CACHE", "default")
cache = caches[cache_name]
allowlist_cache_prefix = "rl:allowlist"
ip_address = get_user_ip_from_cloudfront_headers(request)
if ip_address is None:
return False
allowlist_key = f"{allowlist_cache_prefix}:{ip_address}"
# Check if the ip address is in our allowlist.
if cache.get(allowlist_key):
return True
# If not whitelisted, verify the IP address and add it to the cache for
# future requests.
approved_crawler = verify_ip_address(ip_address)
if approved_crawler:
# Add the IP to our cache with a one week expiration date
a_week = 60 * 60 * 24 * 7
cache.set(allowlist_key, ip_address, a_week)
return approved_crawler
def METHOD_NAME(rate: str) -> Tuple[int, int]:
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
(Stolen from Django Rest Framework.)
"""
num, period = rate.split("/")
num_requests = int(num)
if len(period) > 1:
# It takes the form of a 5d, or 10s, or whatever
duration_multiplier = int(period[0:-1])
duration_unit = period[-1]
else:
duration_multiplier = 1
duration_unit = period[-1]
duration_base = {"s": 1, "m": 60, "h": 3600, "d": 86400}[duration_unit]
duration = duration_base * duration_multiplier
return num_requests, duration
| null |
1,749 |
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import functools
import logging
import os
import shutil
import tempfile
from guild import config
from guild import run as runlib
from guild import util
log = logging.getLogger("guild")
def path(*names):
names = [name for name in names if name]
return os.path.join(config.guild_home(), *names)
def runs_dir(deleted=False):
if deleted:
return trash_dir("runs")
return path("runs")
def trash_dir(name=None):
return path("trash", name)
def cache_dir(name=None):
return path("cache", name)
def pidfile(name):
return path("proc", name)
def logfile(name):
return path("log", name)
def remote_dir(name=None):
# Use directory containing user config to store remote info.
rest_path = [name] if name else []
config_path = config.user_config_path()
if config_path:
return os.path.join(os.path.dirname(config_path), "remotes", *rest_path)
return path("remotes", name)
def runs(root=None, sort=None, filter=None, force_root=False, base_runs=None):
filter = filter or (lambda _: True)
all_runs = (
_all_runs_f(root, force_root) if base_runs is None #
else lambda: base_runs
)
runs = [run for run in all_runs() if filter(run)]
if sort:
runs = sorted(runs, key=_run_sort_key(sort))
return runs
def _all_runs_f(root, force_root):
root = root or runs_dir()
if force_root:
return _default_all_runs_f(root)
return util.find_apply(
[
_zipfile_all_runs_f,
_runs_under_parent_f,
_default_all_runs_f,
],
root,
)
def _default_all_runs_f(root):
return lambda: _all_runs(root)
def _zipfile_all_runs_f(root):
if not root or not root.lower().endswith(".zip"):
return None
from . import run_zip_proxy
def f():
try:
return run_zip_proxy.all_runs(root)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("getting runs for zip file %s", root)
log.error("cannot read from %s: %s", root, e)
return []
return f
def _runs_under_parent_f(root):
runs_parent = os.getenv("GUILD_RUNS_PARENT")
if not runs_parent:
return None
log.debug("limitting to runs under parent %s", runs_parent)
return lambda: _runs_for_parent(runs_parent, root)
def _runs_for_parent(parent, root):
parent_path = os.path.join(root, parent)
try:
names = os.listdir(parent_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
else:
return _runs_for_parent_links(parent_path, names, root)
def _runs_for_parent_links(parent_path, names, runs_dir):
real_paths = [util.realpath(os.path.join(parent_path, name)) for name in names]
return [
runlib.for_dir(path) for path in real_paths
if METHOD_NAME(path, runs_dir)
]
def METHOD_NAME(path, runs_dir):
return util.compare_paths(os.path.dirname(path), runs_dir)
def run_filter(name, *args):
if name.startswith("!"):
name = name[1:]
maybe_negate = lambda f: lambda r: not f(r)
else:
maybe_negate = lambda f: f
if name == "true":
filter = lambda _: True
elif name == "attr":
name, expected = args
filter = lambda r: _run_attr(r, name) == expected
elif name == "all":
(filters,) = args
filter = lambda r: all((f(r) for f in filters))
elif name == "any":
(filters,) = args
filter = lambda r: any((f(r) for f in filters))
else:
raise ValueError(name)
return maybe_negate(filter)
def _all_runs(root):
return [runlib.Run(name, path) for name, path in _iter_dirs(root)]
def iter_run_dirs(root=None):
return _iter_dirs(root or runs_dir())
def _iter_dirs(root):
try:
names = os.listdir(root)
except OSError:
names = []
for name in names:
path = os.path.join(root, name)
if _opref_exists(path):
yield name, path
def _opref_exists(run_dir):
opref_path = os.path.join(run_dir, ".guild", "opref")
return os.path.exists(opref_path)
def _run_sort_key(sort):
return functools.cmp_to_key(lambda x, y: _run_cmp(x, y, sort))
def _run_cmp(x, y, sort):
for attr in sort:
attr_cmp = _run_attr_cmp(x, y, attr)
if attr_cmp != 0:
return attr_cmp
return 0
def _run_attr_cmp(x, y, attr):
if attr.startswith("-"):
attr = attr[1:]
rev = -1
else:
rev = 1
x_val = _run_attr(x, attr)
if x_val is None:
return -rev
y_val = _run_attr(y, attr)
if y_val is None:
return rev
return rev * ((x_val > y_val) - (x_val < y_val))
def _run_attr(run, name):
if name in runlib.Run.__properties__:
return getattr(run, name)
return run.get(name)
def delete_runs(runs, permanent=False):
for run in runs:
src = run.dir
if permanent:
_delete_run(src)
else:
dest = os.path.join(runs_dir(deleted=True), run.id)
_move(src, dest)
def purge_runs(runs):
for run in runs:
_delete_run(run.dir)
def _delete_run(src):
assert src and src != os.path.sep, src
assert src.startswith(runs_dir()) or src.startswith(runs_dir(deleted=True)), src
log.debug("deleting %s", src)
shutil.rmtree(src)
def _move(src, dest):
util.ensure_dir(os.path.dirname(dest))
log.debug("moving %s to %s", src, dest)
if os.path.exists(dest):
_move_to_backup(dest)
shutil.move(src, dest)
def _move_to_backup(path):
dir = os.path.dirname(path)
prefix = f"{os.path.basename(path)}_"
backup = tempfile.NamedTemporaryFile(prefix=prefix, dir=dir, delete=True)
log.warning("%s exists, moving to %s", path, backup.name)
backup.close()
shutil.move(path, backup.name)
def restore_runs(runs):
for run in runs:
src = os.path.join(run.dir)
dest = os.path.join(runs_dir(), run.id)
if util.compare_paths(src, dest):
log.warning("%s is already restored, skipping", run.id)
continue
_move(src, dest)
def find_runs(run_id_prefix, root=None):
root = root or runs_dir()
return (
(name, path) for name, path in _iter_dirs(root)
if name.startswith(run_id_prefix)
)
def get_run(run_id, root=None):
root = root or runs_dir()
path = os.path.join(root, run_id)
if os.path.exists(path):
return runlib.Run(run_id, path)
raise LookupError(run_id)
| null |
1,750 |
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, StrictStr, conint, constr, validator
from lightly.openapi_generated.swagger_client.models.docker_worker_config import DockerWorkerConfig
class DockerWorkerConfigData(BaseModel):
"""
DockerWorkerConfigData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
version: Optional[StrictStr] = None
config: DockerWorkerConfig = Field(...)
config_orig: Optional[DockerWorkerConfig] = Field(None, alias="configOrig")
created_at: Optional[conint(strict=True, ge=0)] = Field(None, alias="createdAt", description="unix timestamp in milliseconds")
__properties = ["id", "version", "config", "configOrig", "createdAt"]
@validator('id')
def METHOD_NAME(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DockerWorkerConfigData:
"""Create an instance of DockerWorkerConfigData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of config
if self.config:
_dict['config' if by_alias else 'config'] = self.config.to_dict(by_alias=by_alias)
# override the default output from pydantic by calling `to_dict()` of config_orig
if self.config_orig:
_dict['configOrig' if by_alias else 'config_orig'] = self.config_orig.to_dict(by_alias=by_alias)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DockerWorkerConfigData:
"""Create an instance of DockerWorkerConfigData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DockerWorkerConfigData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DockerWorkerConfigData) in the input: " + str(obj))
_obj = DockerWorkerConfigData.parse_obj({
"id": obj.get("id"),
"version": obj.get("version"),
"config": DockerWorkerConfig.from_dict(obj.get("config")) if obj.get("config") is not None else None,
"config_orig": DockerWorkerConfig.from_dict(obj.get("configOrig")) if obj.get("configOrig") is not None else None,
"created_at": obj.get("createdAt")
})
return _obj
| null |
1,751 |
import re
import pytest
from barril.units import Scalar
from alfasim_sdk._internal.constants import EmulsionDropletSizeModelType
from alfasim_sdk._internal.constants import EmulsionInversionPointModelType
from alfasim_sdk._internal.constants import EmulsionRelativeViscosityModelType
from alfasim_sdk._internal.constants import HydrodynamicModelType
from alfasim_sdk._internal.constants import SolidsModelType
from alfasim_sdk._internal.context import EdgeInfo
from alfasim_sdk._internal.context import EmulsionModelInfo
from alfasim_sdk._internal.context import HydrodynamicModelInfo
from alfasim_sdk._internal.context import NodeInfo
from alfasim_sdk._internal.context import PhysicsOptionsInfo
from alfasim_sdk._internal.context import PipelineInfo
from alfasim_sdk._internal.context import PipelineSegmentInfo
def test_plugin_info():
from alfasim_sdk._internal.context import PluginInfo
error_msg = "'name' must be 'str' (got 1 that is a 'int')"
with pytest.raises(TypeError, match=re.escape(error_msg)):
PluginInfo(name=1, caption="Caption", enabled="True", models="Anything")
error_msg = "'enabled' must be <class 'bool'> (got 'True' that is a <class 'str'>)."
with pytest.raises(TypeError, match=re.escape(error_msg)):
PluginInfo(name="Acme", caption="Caption", enabled="True", models="Anything")
error_msg = (
"'models' must be <class 'list'> (got 'Anything' that is a <class 'str'>)."
)
with pytest.raises(TypeError, match=re.escape(error_msg)):
PluginInfo(name="Acme", caption="Caption", enabled=True, models="Anything")
error_msg = "'models' must be <class 'str'> (got 1 that is a <class 'int'>)."
with pytest.raises(TypeError, match=re.escape(error_msg)):
PluginInfo(name="Acme", caption="Caption", enabled=True, models=[1, 2, 3])
PluginInfo(name="Acme", caption="Caption", enabled=True, models=["1", "2"])
def test_pipeline_info():
pipeline_segment_info = PipelineSegmentInfo(
inner_diameter=Scalar("diameter", 0.15, "m"),
start_position=Scalar(0.0, "m"),
is_custom=True,
roughness=Scalar(0.0, "m"),
)
pipeline_info = PipelineInfo(
name="Foo",
edge_name="Foo 2",
segments=[pipeline_segment_info],
total_length=Scalar(0.0, "m"),
)
assert pipeline_info
def test_pipeline_segments():
pipeline_segment_info = PipelineSegmentInfo(
inner_diameter=Scalar("diameter", 0.15, "m"),
start_position=Scalar(0.0, "m"),
is_custom=True,
roughness=Scalar(0.0, "m"),
)
assert pipeline_segment_info
diameter_msg = "'inner_diameter' must be <class 'barril.units._scalar.Scalar'> (got 1 that is a <class 'int'>)."
position_msg = "'start_position' must be <class 'barril.units._scalar.Scalar'> (got 1 that is a <class 'int'>)."
is_custom_msg = (
"'is_custom' must be <class 'bool'> (got None that is a <class 'NoneType'>)."
)
roughness_msg = "'roughness' must be <class 'barril.units._scalar.Scalar'> (got 1 that is a <class 'int'>)."
with pytest.raises(TypeError, match=re.escape(diameter_msg)):
PipelineSegmentInfo(
inner_diameter=1, start_position=1, is_custom=None, roughness=1
)
with pytest.raises(TypeError, match=re.escape(position_msg)):
PipelineSegmentInfo(
inner_diameter=Scalar("diameter", 0.15, "m"),
start_position=1,
is_custom=None,
roughness=1,
)
with pytest.raises(TypeError, match=re.escape(is_custom_msg)):
PipelineSegmentInfo(
inner_diameter=Scalar("diameter", 0.15, "m"),
start_position=Scalar(0.0, "m"),
is_custom=None,
roughness=1,
)
with pytest.raises(TypeError, match=re.escape(roughness_msg)):
PipelineSegmentInfo(
inner_diameter=Scalar("diameter", 0.15, "m"),
start_position=Scalar(0.0, "m"),
is_custom=True,
roughness=1,
)
@pytest.mark.parametrize("class_with_info", [NodeInfo, EdgeInfo])
def METHOD_NAME(class_with_info):
assert class_with_info(name="Foo", number_of_phases_from_associated_pvt=1)
assert class_with_info(name="Foo", number_of_phases_from_associated_pvt=None)
# number_of_phases_from_associated_pvt must be int or None
with pytest.raises(TypeError):
class_with_info(name="Foo", number_of_phases_from_associated_pvt="1")
# name must be string and not empty
with pytest.raises(TypeError):
class_with_info(name=None, number_of_phases_from_associated_pvt=1)
# name must be string and not empty
with pytest.raises(ValueError):
class_with_info(name="", number_of_phases_from_associated_pvt=1)
def test_physics_option():
assert PhysicsOptionsInfo(
emulsion_model=EmulsionModelInfo(
enabled=True,
relative_viscosity_model=EmulsionRelativeViscosityModelType.Mooney1951a,
droplet_size_model=EmulsionDropletSizeModelType.Boxall2012,
inversion_point_model=EmulsionInversionPointModelType.BraunerUllmann2002,
),
solids_model=SolidsModelType.NoModel,
hydrodynamic_model=HydrodynamicModelInfo(
selected_base_type=HydrodynamicModelType.ThreeLayersGasOilWater,
phases=["1", "2"],
fields=["3", "4"],
layers=["5", "6"],
has_water_phase=True,
),
)
| null |
1,752 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from typing import List
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.optim as optim
from reagent.reporting import CompoundReporter, ReporterBase
from reagent.training import MultiStageTrainer, ReAgentLightningModule
from torch.utils.data import DataLoader, TensorDataset
class DummyReporter(ReporterBase):
def __init__(self, name: str, expected_epochs: List[int]):
super().__init__({}, {})
self.name = name
self.expected_epochs = expected_epochs
self._log_count = 0
self._flush_count = 0
self._testing = False
def log(self, **kwargs) -> None:
self._log_count += 1
def METHOD_NAME(self, epoch: int):
if not self._testing:
assert epoch in self.expected_epochs, f"{epoch} {self.expected_epochs}"
self._flush_count += 1
class DummyTrainer(ReAgentLightningModule):
def __init__(
self,
name: str,
input_dim: int,
expected_epochs: List[int],
validation_keys: List[str],
test_keys: List[str],
):
super().__init__()
self.name = name
self.linear1 = nn.Linear(input_dim, 1)
self.linear2 = nn.Linear(input_dim, 1)
self.loss_fn = nn.BCEWithLogitsLoss()
self._call_count = {
"train": 0,
"validation": 0,
"test": 0,
}
self.expected_epochs = expected_epochs
self.validation_keys = validation_keys
self.test_keys = test_keys
def configure_optimizers(self):
return [
optim.SGD(self.linear1.parameters(), lr=1e2),
optim.SGD(self.linear2.parameters(), lr=1e2),
]
def on_test_start(self):
self.reporter._testing = True
def on_test_end(self):
self.reporter._testing = False
def train_step_gen(self, training_batch, batch_idx: int):
print(f"train_step_gen {self.name}")
assert (
self.current_epoch in self.expected_epochs
), f"{self.current_epoch} {self.expected_epochs}"
self._call_count["train"] += 1
x, label = training_batch
self.reporter.log()
y = self.linear1(x)
yield self.loss_fn(y, label)
y = self.linear2(x)
yield self.loss_fn(y, label)
def validation_step(self, batch, batch_idx: int):
print(f"validation_step {self.name}")
self._call_count["validation"] += 1
assert self.current_epoch in self.expected_epochs
return {k: torch.ones(2, 3) for k in self.validation_keys}
def validation_epoch_end(self, outputs):
print(f"validation_step_end {self.name}")
print(outputs)
for output in outputs:
assert set(output.keys()) == set(self.validation_keys)
def test_step(self, batch, batch_idx: int):
print(f"test_step {self.name}")
self._call_count["test"] += 1
return {k: torch.ones(2, 3) for k in self.test_keys}
def test_epoch_end(self, outputs):
print(f"test_epoch_end {self.name}")
print(outputs)
for output in outputs:
assert set(output.keys()) == set(self.test_keys)
def make_dataset(input_dim, size):
return TensorDataset(
torch.randn(size, input_dim),
torch.randint(0, 2, (size, 1), dtype=torch.float32),
)
def _merge_report(reporters):
pass
class TestMultiStageTrainer(unittest.TestCase):
def test_multi_stage_trainer(self):
input_dim = 5
stage1 = DummyTrainer(
"stage1",
input_dim,
expected_epochs=[0, 1, 2],
validation_keys=["a", "b", "c"],
test_keys=["d", "e"],
)
stage2 = DummyTrainer(
"stage2",
input_dim,
expected_epochs=[3, 4, 5],
validation_keys=["x", "y", "z"],
test_keys=["u", "v"],
)
multi_stage_trainer = MultiStageTrainer(
[stage1, stage2],
epochs=[3, 3],
)
reporters = [
DummyReporter("stage1", expected_epochs=[0, 1, 2]),
DummyReporter("stage2", expected_epochs=[3, 4, 5]),
]
compound_reporter = CompoundReporter(reporters, _merge_report)
multi_stage_trainer.set_reporter(compound_reporter)
training_size = 100
validation_size = 20
train_dataloader = DataLoader(
make_dataset(input_dim, training_size), batch_size=5
)
validation_dataloader = DataLoader(
make_dataset(input_dim, validation_size),
batch_size=5,
)
trainer = pl.Trainer(max_epochs=6, min_epochs=6)
trainer.fit(multi_stage_trainer, train_dataloader, validation_dataloader)
test_size = 20
test_dataloader = DataLoader(
make_dataset(input_dim, test_size),
batch_size=5,
)
trainer.test(dataloaders=test_dataloader)
print(f"stage1 {stage1._call_count}")
print(f"stage2 {stage2._call_count}")
self.assertEqual(stage1._call_count["train"], 60)
# It seems that lightning call validation 2 times at the beginning
self.assertEqual(stage1._call_count["validation"], 14)
self.assertEqual(stage1._call_count["test"], 4)
self.assertEqual(stage2._call_count["train"], 60)
self.assertEqual(stage2._call_count["validation"], 12)
self.assertEqual(stage2._call_count["test"], 4)
for reporter, t in zip(reporters, [stage1, stage2]):
print(f"{reporter.name} {reporter._log_count} {reporter._flush_count}")
self.assertEqual(reporter._log_count, t._call_count["train"])
# flush got called in train & validation 3 times each.
# In stage1, there is an additional call to validation at the beginning
self.assertEqual(reporter._flush_count, 8 if t == stage1 else 7)
| null |
1,753 |
import contextlib
import os
import shutil
import signal
import socket
import subprocess
import tempfile
import time
from eth_utils import (
is_checksum_address,
to_text,
)
from web3 import (
constants,
)
from web3.exceptions import (
TransactionNotFound,
)
# use same coinbase value as in `web3.py/tests/integration/common.py`
COINBASE = "0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd"
COINBASE_PK = "0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d"
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = "web3py-test"
KEYFILE_FILENAME = "UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd" # noqa: E501
RAW_TXN_ACCOUNT = "0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6"
UNLOCKABLE_PRIVATE_KEY = (
"0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01"
)
UNLOCKABLE_ACCOUNT = "0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13"
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"config": {
"chainId": 131277322940537, # the string 'web3py' as an integer
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"istanbulBlock": 0,
"petersburgBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"arrowGlacierBlock": 0,
"grayGlacierBlock": 0,
"shanghaiTime": 0,
},
"nonce": "0x0000000000000042",
"alloc": {
COINBASE: {"balance": "1000000000000000000000000000"},
UNLOCKABLE_ACCOUNT: {"balance": "1000000000000000000000000000"},
RAW_TXN_ACCOUNT: {"balance": "1000000000000000000000000000"},
"0000000000000000000000000000000000000001": {"balance": "1"},
"0000000000000000000000000000000000000002": {"balance": "1"},
"0000000000000000000000000000000000000003": {"balance": "1"},
"0000000000000000000000000000000000000004": {"balance": "1"},
"0000000000000000000000000000000000000005": {"balance": "1"},
"0000000000000000000000000000000000000006": {"balance": "1"},
},
"timestamp": "0x00",
"parentHash": constants.HASH_ZERO,
"extraData": "0x3535353535353535353535353535353535353535353535353535353535353535",
"gasLimit": "0x3b9aca00", # 1,000,000,000
"difficulty": "0x10000",
"mixhash": constants.HASH_ZERO,
"coinbase": COINBASE,
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_geth_binary():
from geth.install import (
get_executable_path,
install_geth,
)
if "GETH_BINARY" in os.environ:
return os.environ["GETH_BINARY"]
elif "GETH_VERSION" in os.environ:
geth_version = os.environ["GETH_VERSION"]
_geth_binary = get_executable_path(geth_version)
if not os.path.exists(_geth_binary):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return "geth"
def METHOD_NAME(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
METHOD_NAME(proc, 13)
if proc.poll() is None:
proc.terminate()
METHOD_NAME(proc, 5)
if proc.poll() is None:
proc.kill()
METHOD_NAME(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def get_geth_process(
geth_binary, datadir, genesis_file_path, ipc_path, port, networkid, skip_init=False
):
if not skip_init:
init_datadir_command = (
geth_binary,
"--datadir",
datadir,
"init",
genesis_file_path,
)
print(" ".join(init_datadir_command))
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_geth_command = (
geth_binary,
"--datadir",
datadir,
"--ipcpath",
ipc_path,
"--nodiscover",
"--port",
port,
"--networkid",
networkid,
"--etherbase",
COINBASE[2:],
)
print(" ".join(run_geth_command))
try:
proc = get_process(run_geth_command)
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Geth Process Exited:\n"
f"stdout:{to_text(output)}\n\n"
f"stderr:{to_text(errors)}\n\n"
)
def get_process(run_command):
proc = subprocess.Popen(
run_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return proc
def mine_block(w3):
origin_block_number = w3.eth.block_number
start_time = time.time()
w3.geth.miner.start(1)
while time.time() < start_time + 120:
block_number = w3.eth.block_number
if block_number > origin_block_number:
w3.geth.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def mine_transaction_hash(w3, txn_hash):
start_time = time.time()
w3.geth.miner.start(1)
while time.time() < start_time + 120:
try:
receipt = w3.eth.get_transaction_receipt(txn_hash)
except TransactionNotFound:
continue
if receipt is not None:
w3.geth.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError(
"Math contract deploy transaction not mined during wait period"
)
def deploy_contract(w3, name, factory):
name = name.upper()
w3.geth.personal.unlock_account(w3.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.constructor().transact({"from": w3.eth.coinbase})
print(f"{name}_CONTRACT_DEPLOY_HASH: {deploy_txn_hash}")
deploy_receipt = mine_transaction_hash(w3, deploy_txn_hash)
print(f"{name}_CONTRACT_DEPLOY_TRANSACTION_MINED")
contract_address = deploy_receipt["contractAddress"]
assert is_checksum_address(contract_address)
print(f"{name}_CONTRACT_ADDRESS: {contract_address}")
return deploy_receipt
| null |
1,754 |
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
## This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" crashme_test_management:
code related to the gathering / analysis / management of
the test cases
ie - collecting the list of tests in each suite, then
gathering additional, relevant information for crashme mode
"""
# imports
import os
import re
import sys
import imp
import lib.test_mgmt.test_management as test_management
class testCase:
"""Holds info on a single crashme test
"""
def __init__( self
, system_manager
, name=None
, fullname = None
, server_requirements=[[]]
, comment=None
, cnf_path=None
, request_dict=None
, skip_flag=False
, skip_reason=''
, expect_fail=False
, test_path = None
, debug=False ):
self.system_manager = system_manager
self.logging = self.system_manager.logging
self.skip_keys = ['system_manager','logging']
self.name = name
self.fullname = fullname
self.master_sh = None
self.comment = comment
self.server_requirements = server_requirements
self.cnf_path = cnf_path
self.server_requests = request_dict
self.skip_flag = skip_flag
self.skip_reason = skip_reason
self.expect_fail = expect_fail
self.test_path = test_path
self.disable = False
if debug:
self.system_manager.logging.debug_class(self)
def should_run(self):
if self.skip_flag or self.disable:
return 0
else:
return 1
class testManager(test_management.testManager):
"""Deals with scanning test directories, gathering test cases, and
collecting per-test information (opt files, etc) for use by the
test-runner
"""
def __init__( self, variables, system_manager):
super(testManager, self).__init__( variables, system_manager)
server_type = variables['defaultservertype']
if server_type == 'mysql' or server_type =='galera':
server_type = 'percona'
if variables['suitepaths']:
self.suitepaths = variables['suitepaths']
else:
self.suitepaths = [os.path.join(self.testdir,'%s_tests' %(server_type))]
if variables['suitelist'] is None:
self.suitelist = ['main']
else:
self.suitelist = variables['suitelist']
def process_suite(self,suite_dir):
"""Process a test suite.
Look for tests, which are nice clean python unittest files
"""
# We know this based on how we organize native test conf files
suite_name = os.path.basename(suite_dir)
self.system_manager.logging.verbose("Processing suite: %s" %(suite_name))
testlist = [os.path.join(suite_dir,test_file) for test_file in sorted(os.listdir(suite_dir)) if test_file.endswith('_test.py')]
# Search for specific test names
if self.desired_tests: # We have specific, named tests we want from the suite(s)
tests_to_use = []
for test in self.desired_tests:
if test.endswith('.py'):
pass
else:
test = test+'.py'
test = os.path.join(suite_dir,test)
if test in testlist:
tests_to_use.append(test)
testlist = tests_to_use
for test_case in testlist:
self.add_test(self.process_test_file( suite_name
, test_case
))
def get_server_reqs(self, module_file):
""" Code to handle extraction of server_requests & requirements
from unittest test modules
"""
module_name = os.path.basename(module_file).replace('.py','')
my_module = imp.load_source(module_name, module_file)
server_requirements = None
server_requests = None
try:
server_requirements = my_module.server_requirements
except AttributeError, NameError: pass
try:
server_requests = my_module.server_requests
except AttributeError, NameError: pass
return server_requirements, server_requests
def pretest_check(self, module_file):
""" Code to determine if there are any pre-test functions
/ status hints and use them for the testCase
"""
module_name = os.path.basename(module_file).replace('.py','')
my_module = imp.load_source(module_name, module_file)
skip_flag = False
skip_reason = ''
expect_fail = False
try:
skip_flag, skip_reason = my_module.skip_checks(self.system_manager)
except AttributeError, NameError:
pass
try:
expect_fail = my_module.expect_fail
except AttributeError, NameError: pass
return skip_flag, skip_reason, expect_fail
def process_test_file(self, suite_name, testfile):
""" We convert the info in a testfile into a testCase object """
# test_name = filename - .py...simpler
test_name = os.path.basename(testfile).replace('.py','')
test_comment = None
skip_flag, skip_reason, expect_fail = self.pretest_check(testfile)
server_requirements, server_requests = self.get_server_reqs(testfile)
return testCase( self.system_manager
, name = test_name
, fullname = "%s.%s" %(suite_name, test_name)
, server_requirements = server_requirements
, cnf_path = None
, request_dict = server_requests
, skip_flag = skip_flag
, skip_reason = skip_reason
, expect_fail = expect_fail
, test_path = testfile
, debug = self.debug )
def METHOD_NAME(self, test_case, test_status, output, exec_time):
""" Accept the results of an executed testCase for further
processing.
"""
if test_status not in self.executed_tests:
self.executed_tests[test_status] = [test_case]
else:
self.executed_tests[test_status].append(test_case)
# report
self.logging.test_report( test_case.fullname, test_status
, str(exec_time), output
, report_output= True)
| null |
1,755 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class CreateScalingRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'CreateScalingRule','ess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AlarmDimensions(self): # RepeatList
return self.get_query_params().get('AlarmDimension')
def set_AlarmDimensions(self, AlarmDimension): # RepeatList
for depth1 in range(len(AlarmDimension)):
if AlarmDimension[depth1].get('DimensionValue') is not None:
self.add_query_param('AlarmDimension.' + str(depth1 + 1) + '.DimensionValue', AlarmDimension[depth1].get('DimensionValue'))
if AlarmDimension[depth1].get('DimensionKey') is not None:
self.add_query_param('AlarmDimension.' + str(depth1 + 1) + '.DimensionKey', AlarmDimension[depth1].get('DimensionKey'))
def get_StepAdjustments(self): # RepeatList
return self.get_query_params().get('StepAdjustment')
def set_StepAdjustments(self, StepAdjustment): # RepeatList
for depth1 in range(len(StepAdjustment)):
if StepAdjustment[depth1].get('MetricIntervalUpperBound') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.MetricIntervalUpperBound', StepAdjustment[depth1].get('MetricIntervalUpperBound'))
if StepAdjustment[depth1].get('MetricIntervalLowerBound') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.MetricIntervalLowerBound', StepAdjustment[depth1].get('MetricIntervalLowerBound'))
if StepAdjustment[depth1].get('ScalingAdjustment') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.ScalingAdjustment', StepAdjustment[depth1].get('ScalingAdjustment'))
def get_ScalingGroupId(self): # String
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self, ScalingGroupId): # String
self.add_query_param('ScalingGroupId', ScalingGroupId)
def get_DisableScaleIn(self): # Boolean
return self.get_query_params().get('DisableScaleIn')
def set_DisableScaleIn(self, DisableScaleIn): # Boolean
self.add_query_param('DisableScaleIn', DisableScaleIn)
def get_InitialMaxSize(self): # Integer
return self.get_query_params().get('InitialMaxSize')
def set_InitialMaxSize(self, InitialMaxSize): # Integer
self.add_query_param('InitialMaxSize', InitialMaxSize)
def get_ScalingRuleName(self): # String
return self.get_query_params().get('ScalingRuleName')
def set_ScalingRuleName(self, ScalingRuleName): # String
self.add_query_param('ScalingRuleName', ScalingRuleName)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('Cooldown')
def set_Cooldown(self, Cooldown): # Integer
self.add_query_param('Cooldown', Cooldown)
def get_PredictiveValueBehavior(self): # String
return self.get_query_params().get('PredictiveValueBehavior')
def set_PredictiveValueBehavior(self, PredictiveValueBehavior): # String
self.add_query_param('PredictiveValueBehavior', PredictiveValueBehavior)
def get_ScaleInEvaluationCount(self): # Integer
return self.get_query_params().get('ScaleInEvaluationCount')
def set_ScaleInEvaluationCount(self, ScaleInEvaluationCount): # Integer
self.add_query_param('ScaleInEvaluationCount', ScaleInEvaluationCount)
def get_ScalingRuleType(self): # String
return self.get_query_params().get('ScalingRuleType')
def set_ScalingRuleType(self, ScalingRuleType): # String
self.add_query_param('ScalingRuleType', ScalingRuleType)
def get_MetricName(self): # String
return self.get_query_params().get('MetricName')
def set_MetricName(self, MetricName): # String
self.add_query_param('MetricName', MetricName)
def get_PredictiveScalingMode(self): # String
return self.get_query_params().get('PredictiveScalingMode')
def set_PredictiveScalingMode(self, PredictiveScalingMode): # String
self.add_query_param('PredictiveScalingMode', PredictiveScalingMode)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_AdjustmentValue(self): # Integer
return self.get_query_params().get('AdjustmentValue')
def set_AdjustmentValue(self, AdjustmentValue): # Integer
self.add_query_param('AdjustmentValue', AdjustmentValue)
def get_EstimatedInstanceWarmup(self): # Integer
return self.get_query_params().get('EstimatedInstanceWarmup')
def set_EstimatedInstanceWarmup(self, EstimatedInstanceWarmup): # Integer
self.add_query_param('EstimatedInstanceWarmup', EstimatedInstanceWarmup)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_PredictiveTaskBufferTime(self): # Integer
return self.get_query_params().get('PredictiveTaskBufferTime')
def set_PredictiveTaskBufferTime(self, PredictiveTaskBufferTime): # Integer
self.add_query_param('PredictiveTaskBufferTime', PredictiveTaskBufferTime)
def get_AdjustmentType(self): # String
return self.get_query_params().get('AdjustmentType')
def set_AdjustmentType(self, AdjustmentType): # String
self.add_query_param('AdjustmentType', AdjustmentType)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PredictiveValueBuffer(self): # Integer
return self.get_query_params().get('PredictiveValueBuffer')
def set_PredictiveValueBuffer(self, PredictiveValueBuffer): # Integer
self.add_query_param('PredictiveValueBuffer', PredictiveValueBuffer)
def get_ScaleOutEvaluationCount(self): # Integer
return self.get_query_params().get('ScaleOutEvaluationCount')
def set_ScaleOutEvaluationCount(self, ScaleOutEvaluationCount): # Integer
self.add_query_param('ScaleOutEvaluationCount', ScaleOutEvaluationCount)
def get_MinAdjustmentMagnitude(self): # Integer
return self.get_query_params().get('MinAdjustmentMagnitude')
def set_MinAdjustmentMagnitude(self, MinAdjustmentMagnitude): # Integer
self.add_query_param('MinAdjustmentMagnitude', MinAdjustmentMagnitude)
def get_TargetValue(self): # Float
return self.get_query_params().get('TargetValue')
def set_TargetValue(self, TargetValue): # Float
self.add_query_param('TargetValue', TargetValue)
| null |
1,756 |
#
# junitxml: extensions to Python unittest to get output junitxml
# Copyright (C) 2009 Robert Collins <[email protected]>
#
# Copying permitted under the LGPL-3 licence, included with this library.
"""unittest compatible JUnit XML output."""
import datetime
import re
import time
import unittest
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 7, 0, 'alpha', 0)
def test_suite():
import junitxml.tests
return junitxml.tests.test_suite()
class LocalTimezone(datetime.tzinfo):
def __init__(self):
self._offset = None
# It seems that the minimal possible implementation is to just return all
# None for every function, but then it breaks...
def utcoffset(self, dt):
if self._offset is None:
t = 1260423030 # arbitrary, but doesn't handle dst very well
dt = datetime.datetime
self._offset = (dt.fromtimestamp(t) - dt.utcfromtimestamp(t))
return self._offset
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return None
def _error_name(eclass):
module = eclass.__module__
if module not in ("__main__", "builtins", "exceptions"):
return ".".join([module, eclass.__name__])
return eclass.__name__
_non_cdata = "[\0-\b\x0B-\x1F\uD800-\uDFFF\uFFFE\uFFFF]+"
if "\\u" in _non_cdata:
_non_cdata = _non_cdata.decode("unicode-escape")
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
if not isinstance(s, unicode):
try:
s = s.decode("utf-8")
except UnicodeDecodeError:
s = s.decode("ascii", "replace")
return _sub("", s).encode("utf-8")
else:
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
return _sub("", s)
def _escape_content(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>"))
def _escape_attr(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>")
.replace('"', """)
.replace("\t", "	")
.replace("\n", "
"))
class JUnitXmlResult(unittest.TestResult):
"""A TestResult which outputs JUnit compatible XML."""
def __init__(self, stream):
"""Create a JUnitXmlResult.
:param stream: A stream to write results to. Note that due to the
nature of JUnit XML output, nnothing will be written to the stream
until stopTestRun() is called.
"""
self.__super = super(JUnitXmlResult, self)
self.__super.__init__()
# GZ 2010-09-03: We have a problem if passed a text stream in Python 3
# as really we want to write raw UTF-8 to ensure that
# the encoding is not mangled later
self._stream = stream
self._results = []
self._set_time = None
self._test_start = None
self._run_start = None
self._tz_info = None
def startTestRun(self):
"""Start a test run."""
self._run_start = self._now()
def _get_tzinfo(self):
if self._tz_info is None:
self._tz_info = LocalTimezone()
return self._tz_info
def _now(self):
if self._set_time is not None:
return self._set_time
else:
return datetime.datetime.now(self._get_tzinfo())
def time(self, a_datetime):
self._set_time = a_datetime
if (self._run_start is not None and
self._run_start > a_datetime):
self._run_start = a_datetime
def METHOD_NAME(self, test):
self.__super.METHOD_NAME(test)
self._test_start = self._now()
def _duration(self, from_datetime):
try:
delta = self._now() - from_datetime
except TypeError:
n = self._now()
delta = datetime.timedelta(-1)
seconds = delta.days * 3600*24 + delta.seconds
return seconds + 0.000001 * delta.microseconds
def _test_case_string(self, test):
duration = self._duration(self._test_start)
test_id = test.id()
# Split on the last dot not inside a parameter
class_end = test_id.rfind(".", 0, test_id.find("("))
if class_end == -1:
classname, name = "", test_id
else:
classname, name = test_id[:class_end], test_id[class_end+1:]
self._results.append('<testcase classname="%s" name="%s" '
'time="%0.3f"' % (_escape_attr(classname), _escape_attr(name), duration))
def stopTestRun(self):
"""Stop a test run.
This allows JUnitXmlResult to output the XML representation of the test
run.
"""
duration = self._duration(self._run_start)
self._stream.write('<testsuite errors="%d" failures="%d" name="" '
'tests="%d" time="%0.3f">\n' % (len(self.errors),
len(self.failures) + len(getattr(self, "unexpectedSuccesses", ())),
self.testsRun, duration))
self._stream.write(''.join(self._results))
self._stream.write('</testsuite>\n')
def addError(self, test, error):
self.__super.addError(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<error type="%s">%s</error>\n</testcase>\n' % (
_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addFailure(self, test, error):
self.__super.addFailure(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="%s">%s</failure>\n</testcase>\n' %
(_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addSuccess(self, test):
self.__super.addSuccess(test)
self._test_case_string(test)
self._results.append('/>\n')
def addSkip(self, test, reason):
try:
self.__super.addSkip(test, reason)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<skip>%s</skip>\n</testcase>\n'% _escape_attr(reason))
def addUnexpectedSuccess(self, test):
try:
self.__super.addUnexpectedSuccess(test)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="unittest.case._UnexpectedSuccess"/>\n</testcase>\n')
def addExpectedFailure(self, test, error):
try:
self.__super.addExpectedFailure(test, error)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('/>\n')
| null |
1,757 |
""" Module contains test fixtures meant to aide in the testing of jobs and
tool evaluation. Such extensive "fixtures" are something of an anti-pattern
so use of this should be limitted to tests of very 'extensive' classes.
"""
import os.path
import shutil
import string
import tempfile
from collections import defaultdict
from typing import (
cast,
Optional,
)
import galaxy.datatypes.registry
import galaxy.model
from galaxy.app import UniverseApplication
from galaxy.app_unittest_utils.galaxy_mock import MockApp
from galaxy.tool_util.parser import get_tool_source
from galaxy.tools import create_tool_from_source
from galaxy.util.bunch import Bunch
datatypes_registry = galaxy.datatypes.registry.Registry()
datatypes_registry.load_datatypes()
galaxy.model.set_datatypes_registry(datatypes_registry)
class UsesApp:
def setup_app(self):
self.test_directory = tempfile.mkdtemp()
self.app = cast(UniverseApplication, MockApp())
self.app.config.new_file_path = os.path.join(self.test_directory, "new_files")
self.app.config.admin_users = "[email protected]"
def tear_down_app(self):
shutil.rmtree(self.test_directory)
# Simple tool with just one text parameter and output.
SIMPLE_TOOL_CONTENTS = """<tool id="${tool_id}" name="Test Tool" version="$version" profile="$profile">
<command>echo "$param1" < $out1</command>
<inputs>
<param type="text" name="param1" value="" />
</inputs>
<outputs>
<data name="out1" format="data" label="Output ($param1)" />
</outputs>
</tool>
"""
# A tool with data parameters (kind of like cat1) my favorite test tool :)
SIMPLE_CAT_TOOL_CONTENTS = """<tool id="${tool_id}" name="Test Tool" version="$version" profile="$profile">
<command>cat "$param1" #for $r in $repeat# "$r.param2" #end for# < $out1</command>
<inputs>
<param type="data" format="tabular" name="param1" value="" />
<repeat name="repeat1" label="Repeat 1">
<param type="data" format="tabular" name="param2" value="" />
</repeat>
</inputs>
<outputs>
<data name="out1" format="data" />
</outputs>
</tool>
"""
class MockActionI:
def execute(self, tool, trans, **kwds):
pass
class UsesTools(UsesApp):
tool_action: Optional[MockActionI] = None
def _init_tool(
self,
tool_contents=SIMPLE_TOOL_CONTENTS,
filename="tool.xml",
version="1.0",
profile="16.01",
tool_id="test_tool",
extra_file_contents=None,
extra_file_path=None,
tool_path=None,
):
if tool_path is None:
self.tool_file = os.path.join(self.test_directory, filename)
contents_template = string.Template(tool_contents)
tool_contents = contents_template.safe_substitute(dict(version=version, profile=profile, tool_id=tool_id))
self.__write_tool(tool_contents)
if extra_file_contents and extra_file_path:
self.__write_tool(extra_file_contents, path=os.path.join(self.test_directory, extra_file_path))
else:
self.tool_file = tool_path
return self.__setup_tool()
def METHOD_NAME(self, tool_file):
self.tool_file = tool_file
return self.__setup_tool()
def setup_app(self):
super().setup_app()
self.app.config.drmaa_external_runjob_script = ""
self.app.config.tool_secret = "testsecret"
self.app.config.track_jobs_in_database = False
def __setup_tool(self):
tool_source = get_tool_source(self.tool_file)
self.tool = create_tool_from_source(self.app, tool_source, config_file=self.tool_file)
if getattr(self, "tool_action", None):
self.tool.tool_action = self.tool_action
return self.tool
def __write_tool(self, contents, path=None):
path = path or self.tool_file
with open(path, "w") as out:
out.write(contents)
class MockContext:
def __init__(self, model_objects=None):
self.expunged_all = False
self.flushed = False
self.model_objects = model_objects or defaultdict(lambda: {})
self.created_objects = []
self.current = self
def expunge_all(self):
self.expunged_all = True
def query(self, clazz):
return MockQuery(self.model_objects.get(clazz))
def flush(self):
self.flushed = True
def add(self, object):
self.created_objects.append(object)
class MockQuery:
def __init__(self, class_objects):
self.class_objects = class_objects
def filter_by(self, **kwds):
return Bunch(first=lambda: None)
def get(self, id):
return self.class_objects.get(id, None)
__all__ = ("UsesApp",)
| null |
1,758 |
"""
HAR Formatter for REDbot.
"""
import datetime
import json
from typing import Any, Dict, List
from typing_extensions import TypedDict
from redbot import __version__
from redbot.formatter import Formatter
from redbot.message.headers import StrHeaderListType
from redbot.resource import HttpResource
class HarLogDict(TypedDict):
version: str
creator: Dict[str, str]
browser: Dict[str, str]
pages: List[Any]
entries: List[Dict[str, Any]]
class HarDict(TypedDict):
log: HarLogDict
class HarFormatter(Formatter):
"""
Format a HttpResource object (and any descendants) as HAR.
"""
can_multiple = True
name = "har"
media_type = "application/json"
def __init__(self, *args: Any, **kw: Any) -> None:
Formatter.__init__(self, *args, **kw)
self.har: HarDict = {
"log": {
"version": "1.1",
"creator": {"name": "REDbot", "version": __version__},
"browser": {"name": "REDbot", "version": __version__},
"pages": [],
"entries": [],
}
}
self.last_id = 0
def start_output(self) -> None:
pass
def status(self, status: str) -> None:
pass
def feed(self, sample: bytes) -> None:
pass
def finish_output(self) -> None:
"Fill in the template with RED's results."
if self.resource.response.complete:
page_id = self.add_page(self.resource)
self.add_entry(self.resource, page_id)
for linked_resource in [d[0] for d in self.resource.linked]:
# filter out incomplete responses
if linked_resource.response.complete:
self.add_entry(linked_resource, page_id)
self.output(json.dumps(self.har, indent=4))
def error_output(self, message: str) -> None:
self.output(message)
def add_entry(self, resource: HttpResource, page_ref: int = None) -> None:
entry = {
"startedDateTime": isoformat(resource.request.start_time),
"time": int(
(resource.response.complete_time - resource.request.start_time) * 1000
),
"_red_messages": self.format_notes(resource),
}
if page_ref:
entry["pageref"] = f"page{page_ref}"
request = {
"method": resource.request.method,
"url": resource.request.uri,
"httpVersion": "HTTP/1.1",
"cookies": [],
"headers": self.METHOD_NAME(resource.request.headers),
"queryString": [],
"headersSize": -1,
"bodySize": -1,
}
response = {
"status": resource.response.status_code,
"statusText": resource.response.status_phrase,
"httpVersion": f"HTTP/{resource.response.version}",
"cookies": [],
"headers": self.METHOD_NAME(resource.response.headers),
"content": {
"size": resource.response.decoded_len,
"compression": resource.response.decoded_len
- resource.response.payload_len,
"mimeType": resource.response.parsed_headers.get("content-type", ""),
},
"redirectURL": resource.response.parsed_headers.get("location", ""),
"headersSize": resource.response.header_length,
"bodySize": resource.response.payload_len,
}
cache: Dict[None, None] = {}
timings = {
"dns": -1,
"connect": -1,
"blocked": 0,
"send": 0,
"wait": int(
(resource.response.start_time - resource.request.start_time) * 1000
),
"receive": int(
(resource.response.complete_time - resource.response.start_time) * 1000
),
}
entry.update(
{
"request": request,
"response": response,
"cache": cache,
"timings": timings,
}
)
self.har["log"]["entries"].append(entry)
def add_page(self, resource: HttpResource) -> int:
page_id = self.last_id + 1
page = {
"startedDateTime": isoformat(resource.request.start_time),
"id": f"page{page_id}",
"title": "",
"pageTimings": {"onContentLoad": -1, "onLoad": -1},
}
self.har["log"]["pages"].append(page)
return page_id
@staticmethod
def METHOD_NAME(hdrs: StrHeaderListType) -> List[Dict[str, str]]:
return [{"name": n, "value": v} for n, v in hdrs]
def format_notes(self, resource: HttpResource) -> List[Dict[str, str]]:
out = []
for note in resource.notes:
msg = {
"note_id": note.__class__.__name__,
"subject": note.subject,
"category": note.category.name,
"level": note.level.name,
"summary": note.show_summary(self.lang),
}
out.append(msg)
return out
def isoformat(timestamp: float) -> str:
return f"{datetime.datetime.utcfromtimestamp(timestamp).isoformat()}Z"
| null |
1,759 |
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Type import Type
class Func(Symbol):
def __init__(
self,
table,
ident,
name,
location,
return_type,
param_types,
param_strings,
body,
pairs,
):
super().__init__(table, ident, location, pairs)
self.return_type = return_type
self.param_types = param_types
self.param_strings = param_strings
self.body = body
self.isInternalMachineFunc = False
self.c_ident = ident
self.c_name = name
self.class_name = ""
def __repr__(self):
return ""
@property
def prototype(self):
if "external" in self:
return ""
return_type = self.return_type.c_ident
void_type = self.symtab.find("void", Type)
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
elif "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
return f"{return_type} {self.c_name}({', '.join(self.param_strings)});"
def writeCodeFiles(self, path, includes):
return
def checkArguments(self, args):
if len(args) != len(self.param_types):
self.error(
"Wrong number of arguments passed to function : '%s'"
+ " Expected %d, got %d",
self.c_ident,
len(self.param_types),
len(args),
)
cvec = []
type_vec = []
for expr, expected_type in zip(args, self.param_types):
# Check the types of the parameter
actual_type, param_code = expr.inline(True)
if (
str(actual_type) != "OOD"
and str(actual_type) != str(expected_type)
and str(actual_type["interface"]) != str(expected_type)
):
expr.error(
f"Type mismatch: expected: {expected_type} actual: {actual_type}"
)
cvec.append(param_code)
type_vec.append(expected_type)
return cvec, type_vec
def METHOD_NAME(self):
"""This write a function of object Chip"""
if "external" in self:
return ""
code = self.symtab.codeFormatter()
# Generate function header
void_type = self.symtab.find("void", Type)
return_type = self.return_type.c_ident
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
if "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
params = ", ".join(self.param_strings)
code(
"""
$return_type
${{self.class_name}}::${{self.c_name}}($params)
{
${{self.body}}
}
"""
)
return str(code)
__all__ = ["Func"]
| null |
1,760 |
# Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was forked from https://github.com/marvis/pytorch-yolo2 ,
# licensed under the MIT License (see LICENSE.external for more details).
from utils import *
import numpy as np
def METHOD_NAME(pred_boxes, target, anchors, num_anchors,
num_classes, nH, nW, coord_scale, noobject_scale,
object_scale, cls_scale, sil_thresh, seen):
nB = target.shape[0]
nA = num_anchors
nC = num_classes
anchor_step = len(anchors)//num_anchors
# The mask arrays `coord_mask`, `conf_mask` and `cls_mask` indicate not only
# whether the specific bounding box is evaluated or not, but also are
# multiplied by loss coefficients `coord_scale`, `noobject_scale`,
# `object_scale` and `cls_scale`.
conf_mask = np.ones((nB, nA, nH, nW), dtype=np.float32) * noobject_scale
coord_mask, cls_mask, tx, ty, tw, th, tconf, tcls = [
np.zeros((nB, nA, nH, nW), dtype=np.float32) for _ in range(8)]
nAnchors = nA*nH*nW
nPixels = nH*nW
for b in range(nB):
cur_pred_boxes = np.transpose(pred_boxes[b*nAnchors:(b+1)*nAnchors])
cur_ious = np.zeros(nAnchors, dtype=np.float32)
for t in range(50):
if target[b][t*5+1] == 0:
break
gx = target[b][t*5+1]*nW
gy = target[b][t*5+2]*nH
gw = target[b][t*5+3]*nW
gh = target[b][t*5+4]*nH
cur_gt_boxes = np.array([[gx, gy, gw, gh]], dtype=np.float32)
cur_gt_boxes = np.repeat(cur_gt_boxes, nAnchors, axis=0)
cur_gt_boxes = np.transpose(cur_gt_boxes)
cur_ious = np.maximum(cur_ious, bbox_ious_numpy(
cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
conf_mask_b = conf_mask[b]
conf_mask_b_orig_shape = conf_mask_b.shape
conf_mask_b = np.reshape(conf_mask_b, -1)
conf_mask_b[cur_ious > sil_thresh] = 0
conf_mask_b = np.reshape(conf_mask_b, conf_mask_b_orig_shape)
conf_mask[b] = conf_mask_b
if seen < 12800:
if anchor_step == 4:
tx = anchors.reshape((nA, anchor_step))[
:, 2].reshape((1, nA, 1, 1))
for t_axis, t_n in zip([0, 2, 3], [nB, nH, nW]):
tx = np.repeat(tx, t_n, axis=t_axis)
ty = anchors.reshape((num_anchors, anchor_step))[
:, 2].reshape((1, nA, 1, 1))
for t_axis, t_n in zip([0, 2, 3], [nB, nH, nW]):
ty = np.repeat(tx, t_n, axis=t_axis)*2
else:
tx.fill(0.5)
ty.fill(0.5)
tw.fill(0.0)
th.fill(0.0)
coord_mask.fill(0.01)
nGT = 0
nCorrect = 0
avg_iou = 0.0
count = 0
for b in range(nB):
for t in range(50):
if target[b][t*5+1] == 0:
break
nGT = nGT + 1
best_iou = 0.0
best_n = -1
min_dist = 10000
gx = target[b][t*5+1] * nW
gy = target[b][t*5+2] * nH
gi = int(gx)
gj = int(gy)
truthw = target[b][t*5+3]
truthh = target[b][t*5+4]
gw = truthw * nW
gh = truthh * nH
gt_box = [0, 0, gw, gh]
for n in range(nA):
aw = anchors[anchor_step*n]
ah = anchors[anchor_step*n+1]
anchor_box = [0, 0, aw, ah]
iou = bbox_iou_numpy(anchor_box, gt_box, x1y1x2y2=False)
if anchor_step == 4:
ax = anchors[anchor_step*n+2]
ay = anchors[anchor_step*n+3]
dist = pow(((gi+ax) - gx), 2) + pow(((gj+ay) - gy), 2)
if iou > best_iou:
best_iou = iou
best_n = n
elif anchor_step == 4 and iou == best_iou and dist < min_dist:
best_iou = iou
best_n = n
min_dist = dist
gt_box = [gx, gy, gw, gh]
pred_box = pred_boxes[b*nAnchors+best_n*nPixels+gj*nW+gi]
coord_mask[b][best_n][gj][gi] = coord_scale * (2 - truthw * truthh)
cls_mask[b][best_n][gj][gi] = cls_scale
conf_mask[b][best_n][gj][gi] = object_scale
tx[b][best_n][gj][gi] = target[b][t*5+1] * nW - gi
ty[b][best_n][gj][gi] = target[b][t*5+2] * nH - gj
tw[b][best_n][gj][gi] = np.log(gw/anchors[anchor_step*best_n])
th[b][best_n][gj][gi] = np.log(gh/anchors[anchor_step*best_n+1])
iou = bbox_iou_numpy(gt_box, pred_box, x1y1x2y2=False) # best_iou
tconf[b][best_n][gj][gi] = iou
tcls[b][best_n][gj][gi] = target[b][t*5]
if iou > 0.5:
nCorrect = nCorrect + 1
avg_iou += iou
count += 1
return nGT, nCorrect, avg_iou / count, coord_mask[:, :, None], conf_mask[:, :, None], cls_mask[:, :, None], np.stack((tx, ty, tw, th), axis=2), tconf[:, :, None], tcls[:, :, None]
| null |
1,761 |
from __future__ import annotations
import logging
import time
from decimal import Decimal
from typing import Final, Iterable, Tuple
import attrs
from aiohttp import web
from aiotools import apartial
from ai.backend.common import redis_helper
from ai.backend.common.defs import REDIS_RLIM_DB
from ai.backend.common.logging import BraceStyleAdapter
from ai.backend.common.types import RedisConnectionInfo
from .context import RootContext
from .exceptions import RateLimitExceeded
from .types import CORSOptions, WebMiddleware, WebRequestHandler
log = BraceStyleAdapter(logging.getLogger(__spec__.name)) # type: ignore[name-defined]
_time_prec: Final = Decimal("1e-3") # msec
_rlim_window: Final = 60 * 15
# We implement rate limiting using a rolling counter, which prevents
# last-minute and first-minute bursts between the intervals.
_rlim_script = """
local access_key = KEYS[1]
local now = tonumber(ARGV[1])
local window = tonumber(ARGV[2])
local request_id = tonumber(redis.call('INCR', '__request_id'))
if request_id >= 1e12 then
redis.call('SET', '__request_id', 1)
end
if redis.call('EXISTS', access_key) == 1 then
redis.call('ZREMRANGEBYSCORE', access_key, 0, now - window)
end
redis.call('ZADD', access_key, now, tostring(request_id))
redis.call('EXPIRE', access_key, window)
return redis.call('ZCARD', access_key)
"""
@web.middleware
async def rlim_middleware(
app: web.Application,
request: web.Request,
handler: WebRequestHandler,
) -> web.StreamResponse:
# This is a global middleware: request.app is the root app.
app_ctx: PrivateContext = app["ratelimit.context"]
now = Decimal(time.time()).quantize(_time_prec)
rr = app_ctx.redis_rlim
if request["is_authorized"]:
rate_limit = request["keypair"]["rate_limit"]
access_key = request["keypair"]["access_key"]
ret = await redis_helper.execute_script(
rr,
"ratelimit",
_rlim_script,
[access_key],
[str(now), str(_rlim_window)],
)
if ret is None:
remaining = rate_limit
else:
rolling_count = int(ret)
if rolling_count > rate_limit:
raise RateLimitExceeded
remaining = rate_limit - rolling_count
response = await handler(request)
response.headers["X-RateLimit-Limit"] = str(rate_limit)
response.headers["X-RateLimit-Remaining"] = str(remaining)
response.headers["X-RateLimit-Window"] = str(_rlim_window)
return response
else:
# No checks for rate limiting for non-authorized queries.
response = await handler(request)
response.headers["X-RateLimit-Limit"] = "1000"
response.headers["X-RateLimit-Remaining"] = "1000"
response.headers["X-RateLimit-Window"] = str(_rlim_window)
return response
@attrs.define(slots=True, auto_attribs=True, METHOD_NAME=False)
class PrivateContext:
redis_rlim: RedisConnectionInfo
redis_rlim_script: str
async def METHOD_NAME(app: web.Application) -> None:
root_ctx: RootContext = app["_root.context"]
app_ctx: PrivateContext = app["ratelimit.context"]
app_ctx.redis_rlim = redis_helper.get_redis_object(
root_ctx.shared_config.data["redis"], db=REDIS_RLIM_DB
)
app_ctx.redis_rlim_script = await redis_helper.execute(
app_ctx.redis_rlim, lambda r: r.script_load(_rlim_script)
)
async def shutdown(app: web.Application) -> None:
app_ctx: PrivateContext = app["ratelimit.context"]
await redis_helper.execute(app_ctx.redis_rlim, lambda r: r.flushdb())
await app_ctx.redis_rlim.close()
def create_app(
default_cors_options: CORSOptions,
) -> Tuple[web.Application, Iterable[WebMiddleware]]:
app = web.Application()
app["api_versions"] = (1, 2, 3, 4)
app["ratelimit.context"] = PrivateContext()
app.on_startup.append(METHOD_NAME)
app.on_shutdown.append(shutdown)
# middleware must be wrapped by web.middleware at the outermost level.
return app, [web.middleware(apartial(rlim_middleware, app))]
| null |
1,762 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmpaas.endpoint import endpoint_data
class UploadMcubeMiniPackageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mPaaS', '2019-08-21', 'UploadMcubeMiniPackage')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AutoInstall(self):
return self.get_body_params().get('AutoInstall')
def set_AutoInstall(self,AutoInstall):
self.add_body_params('AutoInstall', AutoInstall)
def get_InstallType(self):
return self.get_body_params().get('InstallType')
def set_InstallType(self,InstallType):
self.add_body_params('InstallType', InstallType)
def get_OnexFlag(self):
return self.get_body_params().get('OnexFlag')
def set_OnexFlag(self,OnexFlag):
self.add_body_params('OnexFlag', OnexFlag)
def get_EnableOptionMenu(self):
return self.get_body_params().get('EnableOptionMenu')
def set_EnableOptionMenu(self,EnableOptionMenu):
self.add_body_params('EnableOptionMenu', EnableOptionMenu)
def get_H5Version(self):
return self.get_body_params().get('H5Version')
def set_H5Version(self,H5Version):
self.add_body_params('H5Version', H5Version)
def get_EnableTabBar(self):
return self.get_body_params().get('EnableTabBar')
def set_EnableTabBar(self,EnableTabBar):
self.add_body_params('EnableTabBar', EnableTabBar)
def get_UserId(self):
return self.get_body_params().get('UserId')
def set_UserId(self,UserId):
self.add_body_params('UserId', UserId)
def get_Uuid(self):
return self.get_body_params().get('Uuid')
def set_Uuid(self,Uuid):
self.add_body_params('Uuid', Uuid)
def get_ResourceFileUrl(self):
return self.get_body_params().get('ResourceFileUrl')
def set_ResourceFileUrl(self,ResourceFileUrl):
self.add_body_params('ResourceFileUrl', ResourceFileUrl)
def get_H5Id(self):
return self.get_body_params().get('H5Id')
def set_H5Id(self,H5Id):
self.add_body_params('H5Id', H5Id)
def get_ExtendInfo(self):
return self.get_body_params().get('ExtendInfo')
def set_ExtendInfo(self,ExtendInfo):
self.add_body_params('ExtendInfo', ExtendInfo)
def get_MainUrl(self):
return self.get_body_params().get('MainUrl')
def set_MainUrl(self,MainUrl):
self.add_body_params('MainUrl', MainUrl)
def get_ClientVersionMin(self):
return self.get_body_params().get('ClientVersionMin')
def set_ClientVersionMin(self,ClientVersionMin):
self.add_body_params('ClientVersionMin', ClientVersionMin)
def get_EnableKeepAlive(self):
return self.get_body_params().get('EnableKeepAlive')
def set_EnableKeepAlive(self,EnableKeepAlive):
self.add_body_params('EnableKeepAlive', EnableKeepAlive)
def get_Vhost(self):
return self.get_body_params().get('Vhost')
def set_Vhost(self,Vhost):
self.add_body_params('Vhost', Vhost)
def get_ClientVersionMax(self):
return self.get_body_params().get('ClientVersionMax')
def set_ClientVersionMax(self,ClientVersionMax):
self.add_body_params('ClientVersionMax', ClientVersionMax)
def get_PackageType(self):
return self.get_body_params().get('PackageType')
def set_PackageType(self,PackageType):
self.add_body_params('PackageType', PackageType)
def METHOD_NAME(self):
return self.get_body_params().get('WorkspaceId')
def set_WorkspaceId(self,WorkspaceId):
self.add_body_params('WorkspaceId', WorkspaceId)
def get_H5Name(self):
return self.get_body_params().get('H5Name')
def set_H5Name(self,H5Name):
self.add_body_params('H5Name', H5Name)
def get_Platform(self):
return self.get_body_params().get('Platform')
def set_Platform(self,Platform):
self.add_body_params('Platform', Platform)
def get_TenantId(self):
return self.get_body_params().get('TenantId')
def set_TenantId(self,TenantId):
self.add_body_params('TenantId', TenantId)
def get_ResourceType(self):
return self.get_body_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_body_params('ResourceType', ResourceType)
def get_IconFileUrl(self):
return self.get_body_params().get('IconFileUrl')
def set_IconFileUrl(self,IconFileUrl):
self.add_body_params('IconFileUrl', IconFileUrl)
def get_AppId(self):
return self.get_body_params().get('AppId')
def set_AppId(self,AppId):
self.add_body_params('AppId', AppId
| null |
1,763 |
from abc import abstractmethod
from dataclasses import dataclass
from typing import List
import numpy as np
from scipy.stats import t # type: ignore
from gbstats.shared.models import (
FrequentistTestResult,
Statistic,
Uplift,
)
from gbstats.shared.tests import BaseABTest
@dataclass
class FrequentistConfig:
alpha: float = 0.05
test_value: float = 0
@dataclass
class SequentialConfig(FrequentistConfig):
sequential_tuning_parameter: float = 5000
class TTest(BaseABTest):
def __init__(
self,
stat_a: Statistic,
stat_b: Statistic,
config: FrequentistConfig = FrequentistConfig(),
):
"""Base class for one- and two-sided T-Tests with unequal variance.
All values are with respect to relative effects, not absolute effects.
A result prepared for integration with the stats runner can be
generated by calling `.compute_result()`
Args:
stat_a (Statistic): the "control" or "baseline" statistic
stat_b (Statistic): the "treatment" or "variation" statistic
"""
super().__init__(stat_a, stat_b)
self.alpha = config.alpha
self.test_value = config.test_value
@property
def variance(self) -> float:
return self.stat_b.variance / (
pow(self.stat_a.unadjusted_mean, 2) * self.stat_b.n
) + self.stat_a.variance * pow(self.stat_b.unadjusted_mean, 2) / (
pow(self.stat_a.unadjusted_mean, 4) * self.stat_a.n
)
@property
def point_estimate(self) -> float:
return (self.stat_b.mean - self.stat_a.mean) / self.stat_a.unadjusted_mean
@property
def METHOD_NAME(self) -> float:
return (self.point_estimate - self.test_value) / np.sqrt(self.variance)
@property
def dof(self) -> float:
# welch-satterthwaite approx
return pow(
self.stat_b.variance / self.stat_b.n + self.stat_a.variance / self.stat_a.n,
2,
) / (
pow(self.stat_b.variance, 2) / (pow(self.stat_b.n, 2) * (self.stat_b.n - 1))
+ pow(self.stat_a.variance, 2)
/ (pow(self.stat_a.n, 2) * (self.stat_a.n - 1))
)
@property
@abstractmethod
def p_value(self) -> float:
pass
@property
@abstractmethod
def confidence_interval(self) -> List[float]:
pass
def _default_output(self) -> FrequentistTestResult:
"""Return uninformative output when AB test analysis can't be performed
adequately
"""
return FrequentistTestResult(
expected=0,
ci=[0, 0],
p_value=1,
uplift=Uplift(
dist="normal",
mean=0,
stddev=0,
),
)
def compute_result(self) -> FrequentistTestResult:
"""Compute the test statistics and return them
for the main gbstats runner
Returns:
FrequentistTestResult -
note the values are with respect to percent uplift,
not absolute differences
"""
if self.stat_a.mean == 0:
return self._default_output()
if self.stat_a.unadjusted_mean == 0:
return self._default_output()
if self._has_zero_variance():
return self._default_output()
return FrequentistTestResult(
expected=self.point_estimate,
ci=self.confidence_interval,
p_value=self.p_value,
uplift=Uplift(
dist="normal",
mean=self.point_estimate,
stddev=np.sqrt(self.variance),
),
)
class TwoSidedTTest(TTest):
@property
def p_value(self) -> float:
return 2 * (1 - t.cdf(abs(self.METHOD_NAME), self.dof))
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha / 2, self.dof) * np.sqrt(self.variance)
return [self.point_estimate - width, self.point_estimate + width]
class OneSidedTreatmentGreaterTTest(TTest):
@property
def p_value(self) -> float:
return 1 - t.cdf(self.METHOD_NAME, self.dof)
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha, self.dof) * np.sqrt(self.variance)
return [self.point_estimate - width, np.inf]
class OneSidedTreatmentLesserTTest(TTest):
@property
def p_value(self) -> float:
return t.cdf(self.METHOD_NAME, self.dof)
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha, self.dof) * np.sqrt(self.variance)
return [-np.inf, self.point_estimate - width]
class SequentialTwoSidedTTest(TTest):
def __init__(
self,
stat_a: Statistic,
stat_b: Statistic,
config: SequentialConfig = SequentialConfig(),
):
super().__init__(
stat_a,
stat_b,
FrequentistConfig(alpha=config.alpha, test_value=config.test_value),
)
self.sequential_tuning_parameter = config.sequential_tuning_parameter
@property
def confidence_interval(self) -> List[float]:
# eq 9 in Waudby-Smith et al. 2023 https://arxiv.org/pdf/2103.06476v7.pdf
N = self.stat_a.n + self.stat_b.n
rho = self.rho
s2 = self.variance * N
width: float = np.sqrt(s2) * np.sqrt(
(
(2 * (N * np.power(rho, 2) + 1))
* np.log(np.sqrt(N * np.power(rho, 2) + 1) / self.alpha)
/ (np.power(N * rho, 2))
)
)
return [self.point_estimate - width, self.point_estimate + width]
@property
def rho(self) -> float:
# eq 161 in https://arxiv.org/pdf/2103.06476v7.pdf
return np.sqrt(
(-2 * np.log(self.alpha) + np.log(-2 * np.log(self.alpha) + 1))
/ self.sequential_tuning_parameter
)
@property
def p_value(self) -> float:
# eq 155 in https://arxiv.org/pdf/2103.06476v7.pdf
N = self.stat_a.n + self.stat_b.n
# slight reparameterization for this quantity below
st2 = np.power(self.point_estimate - self.test_value, 2) * N / (self.variance)
tr2p1 = N * np.power(self.rho, 2) + 1
evalue = np.exp(np.power(self.rho, 2) * st2 / (2 * tr2p1)) / np.sqrt(tr2p1)
return min(1 / evalue, 1)
| null |
1,764 |
import socket
import urllib.request
import json
import random
import math
import sys
import logManager
import requests
from functions.colors import convert_rgb_xy, convert_xy, rgbBrightness
from time import sleep
from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf, ZeroconfServiceTypes
logging = logManager.logger.get_logger(__name__)
discovered_lights = []
Connections = {}
def on_mdns_discover(zeroconf, service_type, name, state_change):
global discovered_lights
if "wled" in name and state_change is ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
if info:
addresses = ["%s" % (socket.inet_ntoa(addr))
for addr in info.addresses]
discovered_lights.append([addresses[0], name])
def discover(detectedLights, device_ips):
logging.info('<WLED> discovery started')
ip_version = IPVersion.V4Only
zeroconf = Zeroconf(ip_version=ip_version)
services = ["_http._tcp.local."]
browser = ServiceBrowser(zeroconf, services, handlers=[on_mdns_discover])
sleep(2)
if len(discovered_lights) == 0:
# Didn't find anything using mdns, trying device_ips
logging.info(
"<WLED> Nothing found using mDNS, trying device_ips method...")
for ip in device_ips:
try:
response = requests.get(
"http://" + ip + "/json/info", timeout=3)
if response.status_code == 200:
json_resp = json.loads(response.content)
if json_resp['brand'] == "WLED":
discovered_lights.append([ip, json_resp['name']])
except Exception as e:
logging.debug("<WLED> ip %s is unknown device", ip)
lights = []
for device in discovered_lights:
try:
x = WledDevice(device[0], device[1])
logging.info("<WLED> Found device: %s with %d segments" %
(device[1], x.segmentCount))
modelid = "LST002" # Gradient Strip
segmentid = 0
for _ in range(1, x.segmentCount+1):
lights.append({"protocol": "wled",
"name": x.name + "_seg" + str(segmentid),
"modelid": modelid,
"protocol_cfg": {
"ip": x.ip,
"ledCount": x.ledCount,
"mdns_name": device[1],
"mac": x.mac,
"segmentId": segmentid
}
})
segmentid = segmentid + 1
for light in lights:
detectedLights.append(light)
except:
break
def set_light(light, data):
ip = light.protocol_cfg['ip']
if ip in Connections:
c = Connections[ip]
else:
c = WledDevice(ip, light.protocol_cfg['mdns_name'])
Connections[ip] = c
if "lights" in data:
# We ignore the segment count of hue provides atm
destructured_data = data["lights"][list(data["lights"].keys())[0]]
send_light_data(c, light, destructured_data)
else:
send_light_data(c, light, data)
def send_light_data(c, light, data):
state = {}
# Always turn on the segment and handle the on/off at light level
seg = {
"id": light.protocol_cfg['segmentId'],
"on": True
}
for k, v in data.items():
if k == "on":
# Handle on/off at light level
if v:
state["on"] = True
else:
state["on"] = False
elif k == "bri":
seg["bri"] = v+1
elif k == "ct":
kelvin = round(translateRange(v, 153, 500, 6500, 2000))
color = kelvinToRgb(kelvin)
seg["col"] = [[color[0], color[1], color[2]]]
elif k == "xy":
color = convert_xy(v[0], v[1], 255)
seg["col"] = [[color[0], color[1], color[2]]]
elif k == "alert" and v != "none":
state = c.getSegState(light.protocol_cfg['segmentId'])
c.setBriSeg(0, light.protocol_cfg['segmentId'])
sleep(0.6)
c.setBriSeg(state["bri"], light.protocol_cfg['segmentId'])
return
state["seg"] = [seg]
c.METHOD_NAME(state)
def get_light_state(light):
ip = light.protocol_cfg['ip']
if ip in Connections:
c = Connections[ip]
else:
c = WledDevice(ip, light.protocol_cfg['mdns_name'])
Connections[ip] = c
return c.getSegState(light.protocol_cfg['segmentId'])
def translateRange(value, leftMin, leftMax, rightMin, rightMax):
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
valueScaled = float(value - leftMin) / float(leftSpan)
return rightMin + (valueScaled * rightSpan)
def clamp(num, min_val, max_val):
return max(min(num, max_val), min_val)
def kelvinToRgb(temp):
tmpKelvin = clamp(temp, 1000, 40000) / 100
r = 255 if tmpKelvin <= 66 else clamp(
329.698727446 * pow(tmpKelvin - 60, -0.1332047592), 0, 255)
g = clamp(99.4708025861 * math.log(tmpKelvin) - 161.1195681661, 0,
255) if tmpKelvin <= 66 else clamp(288.1221695283 * (pow(tmpKelvin - 60, -0.0755148492)), 0, 255)
if tmpKelvin >= 66:
b = 255
elif tmpKelvin <= 19:
b = 0
else:
b = clamp(138.5177312231 * math.log(tmpKelvin - 10) -
305.0447927307, 0, 255)
return [r, g, b]
class WledDevice:
def __init__(self, ip, mdns_name):
self.ip = ip
self.name = mdns_name.split(".")[0]
self.url = 'http://' + self.ip
self.ledCount = 0
self.mac = None
self.segmentCount = 1 # Default number of segments in WLED
self.segments = []
self.getInitialState()
def getInitialState(self):
self.state = self.getLightState()
self.getSegments()
self.getLedCount()
self.getMacAddr()
def getLedCount(self):
self.ledCount = self.state['info']['leds']['count']
def getMacAddr(self):
self.mac = ':'.join(self.state[
'info']['mac'][i:i+2] for i in range(0, 12, 2))
def getSegments(self):
self.segments = self.state['state']['seg']
self.segmentCount = len(self.segments)
def getLightState(self):
with urllib.request.urlopen(self.url + '/json') as resp:
data = json.loads(resp.read())
return data
def getSegState(self, seg):
state = {}
data = self.getLightState()['state']
seg = data['seg'][seg]
state['bri'] = seg['bri']
state['on'] = data['on'] # Get on/off at light level
state['bri'] = seg['bri']
# Weird division by zero when a color is 0
r = int(seg['col'][0][0])+1
g = int(seg['col'][0][1])+1
b = int(seg['col'][0][2])+1
state['xy'] = convert_rgb_xy(r, g, b)
state["colormode"] = "xy"
return state
def setRGBSeg(self, r, g, b, seg):
state = {"seg": [{"id": seg, "col": [[r, g, b]]}]}
self.METHOD_NAME(state)
def setOnSeg(self, on, seg):
state = {"seg": [{"id": seg, "on": on}]}
self.METHOD_NAME(state)
def setBriSeg(self, bri, seg):
state = {"seg": [{"id": seg, "bri": bri}]}
self.METHOD_NAME(state)
def METHOD_NAME(self, data):
req = urllib.request.Request(self.url + "/json")
req.add_header('Content-Type', 'application/json; charset=utf-8')
jsondata = json.dumps(data)
jsondataasbytes = jsondata.encode('utf-8')
req.add_header('Content-Length', len(jsondataasbytes))
response = urllib.request.urlopen(req, jsondataasbytes)
| null |
1,765 |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.pipeline_utils import *
import os
MAX_REPETITIONS = 200
def get_tool_info(tool, max_retry=100):
def METHOD_NAME():
api = os.environ['API']
token = os.environ['API_TOKEN']
command = [
'curl', '-H', 'Authorization: Bearer {}'.format(token), '-k', '-L', '{}/{}'.format(api.strip("/"), "tool/load?image={}".format(tool))
]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return process.wait(), ''.join(process.stdout.readlines())
rep_count = 1
code, result = METHOD_NAME()
while rep_count < max_retry and code != 0:
code, result = METHOD_NAME()
if code == 0:
if 'payload' in result:
return json.loads(result)['payload']
raise RuntimeError("Can't load tool info from API")
def update_tool_info(tool, max_retry=100):
def curl_tool_update_api():
api = os.environ['API']
token = os.environ['API_TOKEN']
command = [
'curl', '-H', "Content-Type: application/json", '-X', 'POST', '-H', 'Authorization: Bearer {}'.format(token),
'-k', '-L', '{}/{}'.format(api.strip("/"), "tool/update"), '--data', json.dumps(tool)
]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return process.wait(), process.stdout.readlines()
rep_count = 1
code, result = curl_tool_update_api()
while rep_count < max_retry and code != 0:
code = curl_tool_update_api()
if code != 0:
raise RuntimeError("Can't update tool info from API")
def run_test(tool, command, endpoints_structure, url_checker=None, check_access=True, friendly_url=None,
no_machine=False, spark=False, custom_dns_endpoints=0):
run_id, node_name = run(tool, command, no_machine=no_machine, spark=spark, friendly_url=friendly_url)
edge_services = get_edge_services()
# calculate number of endpoints should be generated regarding to existing edges
number_of_endpoints = custom_dns_endpoints + (len(endpoints_structure) - custom_dns_endpoints) * len(edge_services)
try:
endpoints = get_endpoint_urls(run_id)
check_for_number_of_endpoints(endpoints, number_of_endpoints)
for endpoint in endpoints:
url = endpoint["url"]
name = endpoint["name"]
region = endpoint["region"]
pattern = endpoints_structure[name].format(run_id=run_id)
structure_is_fine = check_service_url_structure(url, pattern, checker=url_checker)
assert structure_is_fine, "service url: {}, has wrong format.".format(url)
is_accessible = not check_access or follow_service_url(url, 100)
assert is_accessible, "service url: {} : {} : {}, is not accessible.".format(name, region, url)
return run_id, node_name
finally:
stop_pipe_with_retry(run_id)
def get_edge_services(max_retry=100):
def curl_edge_api():
api = os.environ['API']
token = os.environ['API_TOKEN']
command = [
'curl', '-H', 'Authorization: Bearer {}'.format(token), '-k', '-L',
'{}/{}'.format(api.strip("/"), "edge/services")
]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return process.wait(), ''.join(process.stdout.readlines())
rep_count = 1
code, result = curl_edge_api()
while rep_count < max_retry and code != 0:
code, result = curl_edge_api()
if code == 0:
if 'payload' in result:
return json.loads(result)['payload']
raise RuntimeError("Can't load edges info from API")
def run(image, command="echo {test_case}; sleep infinity", no_machine=False, spark=False, friendly_url=None,
test_case=None):
args = ["-id", "50",
"-pt", "on-demand",
"-cmd", command.format(test_case=test_case),
"-di", image, "-np"]
if friendly_url:
args.append("--friendly-url")
args.append(friendly_url)
args.append("CP_CAP_LIMIT_MOUNTS")
args.append('None')
if no_machine:
args.append("CP_CAP_DESKTOP_NM")
args.append('boolean?true')
if spark:
args.append("CP_CAP_SPARK")
args.append('boolean?true')
node_name = None
(run_id, _) = run_tool(*args)
try:
logging.info("Pipeline run with ID %s." % run_id)
wait_for_instance_creation(run_id, MAX_REPETITIONS)
logging.info("Instance %s created." % run_id)
node_state = wait_for_node_up(run_id, MAX_REPETITIONS)
node_name = get_node_name_from_cluster_state(node_state)
logging.info("Used node %s." % node_name)
wait_for_run_initialized(run_id, MAX_REPETITIONS)
wait_for_service_urls(run_id, MAX_REPETITIONS / 4)
logging.info("Pipeline %s has initialized successfully." % run_id)
except BaseException as e:
stop_pipe_with_retry(run_id)
raise e
return run_id, node_name
def check_for_number_of_endpoints(urls, number_of_endpoints):
assert len(urls) == number_of_endpoints, "Number of endpoints is not correct. Required: {}, actual: {}".format(number_of_endpoints, len(urls))
def check_service_url_structure(url, pattern, checker):
if checker is None:
return url.endswith(pattern)
return checker(url, pattern)
def follow_service_url(url, max_rep_count, check=lambda x: "HTTP/1.1 200" in x):
token = os.environ['API_TOKEN']
result = curl_service_url(url, token, check)
rep = 0
while rep < max_rep_count:
if result:
return result
sleep(5)
rep = rep + 1
result = curl_service_url(url, token, check)
return False
def curl_service_url(url, token, check):
command = ['curl', '-H', 'Authorization: Bearer {}'.format(token), '-k', '-L', '-s', '-I', url]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
process.wait()
result = ''.join(process.stdout.readlines())
return check(result)
| null |
1,766 |
import argparse
import json
import os
import shutil
DESCRIPTION = (
"This script does two things. First you need to copy and paste your "
"generated hypercube scenes both debug and non-debug to a directory "
"called 'hypercube_scenes'. There are 4 args. '--change_file_names' "
"is for changing file names to be the Cell ID of the scene. "
"*Note* this will also delete the non-debug .json files for you. "
"The end result are files called something like A1, B2, C3 which "
"are all debug scenes.'--videos' will loop through all of the .json files "
"in the 'hypercube_scenes_directory' and make videos. It will output "
"the videos and metadata folders to a directory called "
"'hypercube_output'. *Note* by default the videos will only go "
"through the actions in the action_list. If you want your own actions "
"use the '--action_file' argument to input your own action_file that can "
"go through the actions in the action_list and do additional actions. "
"You also need to specifify the unity build path with the '--unity' "
"argument only if you are using the '--videos' argument. *Note* You can "
"combine '--change_file_names' and '-videos' to change the file names "
"and then make the videos, or can do each one indivudally if you "
"just need to change the file names or you just need to make videos."
)
class FileRenamerVideoCreator:
def __init__(self, files: bool, videos: bool,
unity: str, action_file: str):
self._files = files
self._videos = videos
self._unity_build_path = unity
self._action_file = action_file
self._actions = None
if not self._action_file:
self._action_file = "action_file.txt"
self._actions = ["Pass"] * 10
def run(self):
if self._files:
self._change_file_names()
if self._videos:
self._make_videos()
def _create_temp_action_file(self):
with open("action_file.txt", "w") as file:
for action in self._actions:
file.write(action + "\n")
def _delete_temp_action_file(self):
if os.path.exists("action_file.txt"):
os.remove("action_file.txt")
def _change_file_names(self):
dir_path = os.path.join(os.getcwd(), "hypercube_scenes")
for filename in os.listdir(dir_path):
if "debug" not in filename:
file_path = os.path.join(dir_path, filename)
os.remove(file_path)
continue
file_path = os.path.join(dir_path, filename)
with open(file_path, "r") as f:
data = json.load(f)
new_file_name = filename.split("_")[-2]
data["name"] = new_file_name
with open(file_path, "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
new_file_path = os.path.join(dir_path, new_file_name + ".json")
os.rename(file_path, new_file_path)
def _make_videos(self):
if self._actions:
self._create_temp_action_file()
# Get a list of all files in the directory
files = os.listdir("./hypercube_scenes/")
# Loop through each file in the directory
for file_name in files:
command = (
f"python run_action_file.py --mcs_unity_build_file "
f"{self._unity_build_path} ./hypercube_scenes/{file_name} "
f"{self._action_file} --save-videos")
# Run the command
os.system(command)
# Move the generated folder and video to the HyperCubeOutput folder
folder_name = file_name.split(".")[0]
src_folder = folder_name
dst_folder = f"./hypercube_output/{folder_name}"
src_video = f"{folder_name}.mp4"
dst_video = f"./hypercube_output/{folder_name}.mp4"
shutil.move(src_folder, dst_folder)
shutil.move(src_video, dst_video)
if self._actions:
self._delete_temp_action_file()
def METHOD_NAME():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-f', '--change_file_names',
help='Change the file names to the Cell ID. Example A1, B2, C3..',
action='store_true')
parser.add_argument(
'-v',
'--videos',
help=("Make the videos and output them to the "
"'hypercube_output' directory. If the directory does not exist "
"it will be created."),
action='store_true')
parser.add_argument(
'-u', '--unity',
help='The unity build path for videos.')
parser.add_argument(
'-a',
'--action_file',
help=('Set the action file instead of having one generated. '
'The generated one is useful if there is an action_list '
'in the scene because the generated one will only go '
'through the action_list actions and then end the scene.'))
args = parser.parse_args()
FileRenamerVideoCreator(
args.change_file_names,
args.videos,
args.unity,
args.action_file
).run()
METHOD_NAME()
| null |
1,767 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateTableRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateTable')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # String
return self.get_query_params().get('Schema')
def set_Schema(self, Schema): # String
self.add_query_param('Schema', Schema)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Columnss(self): # RepeatList
return self.get_body_params().get('Columns')
def set_Columnss(self, Columns): # RepeatList
for depth1 in range(len(Columns)):
if Columns[depth1].get('SeqNumber') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.SeqNumber', Columns[depth1].get('SeqNumber'))
if Columns[depth1].get('IsPartitionCol') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.IsPartitionCol', Columns[depth1].get('IsPartitionCol'))
if Columns[depth1].get('ColumnNameCn') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.ColumnNameCn', Columns[depth1].get('ColumnNameCn'))
if Columns[depth1].get('Length') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.Length', Columns[depth1].get('Length'))
if Columns[depth1].get('Comment') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.Comment', Columns[depth1].get('Comment'))
if Columns[depth1].get('ColumnName') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.ColumnName', Columns[depth1].get('ColumnName'))
if Columns[depth1].get('ColumnType') is not None:
self.add_body_params('Columns.' + str(depth1 + 1) + '.ColumnType', Columns[depth1].get('ColumnType'))
def get_LifeCycle(self): # Integer
return self.get_query_params().get('LifeCycle')
def set_LifeCycle(self, LifeCycle): # Integer
self.add_query_param('LifeCycle', LifeCycle)
def get_Themess(self): # RepeatList
return self.get_body_params().get('Themes')
def set_Themess(self, Themes): # RepeatList
for depth1 in range(len(Themes)):
if Themes[depth1].get('ThemeLevel') is not None:
self.add_body_params('Themes.' + str(depth1 + 1) + '.ThemeLevel', Themes[depth1].get('ThemeLevel'))
if Themes[depth1].get('ThemeId') is not None:
self.add_body_params('Themes.' + str(depth1 + 1) + '.ThemeId', Themes[depth1].get('ThemeId'))
def get_LogicalLevelId(self): # Long
return self.get_query_params().get('LogicalLevelId')
def set_LogicalLevelId(self, LogicalLevelId): # Long
self.add_query_param('LogicalLevelId', LogicalLevelId)
def get_Endpoint(self): # String
return self.get_body_params().get('Endpoint')
def set_Endpoint(self, Endpoint): # String
self.add_body_params('Endpoint', Endpoint)
def get_EnvType(self): # Integer
return self.get_body_params().get('EnvType')
def set_EnvType(self, EnvType): # Integer
self.add_body_params('EnvType', EnvType)
def get_HasPart(self): # Integer
return self.get_query_params().get('HasPart')
def set_HasPart(self, HasPart): # Integer
self.add_query_param('HasPart', HasPart)
def get_TableName(self): # String
return self.get_query_params().get('TableName')
def set_TableName(self, TableName): # String
self.add_query_param('TableName', TableName)
def get_AppGuid(self): # String
return self.get_query_params().get('AppGuid')
def set_AppGuid(self, AppGuid): # String
self.add_query_param('AppGuid', AppGuid)
def get_ProjectId(self): # Long
return self.get_query_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_query_param('ProjectId', ProjectId)
def get_CategoryId(self): # Long
return self.get_query_params().get('CategoryId')
def set_CategoryId(self, CategoryId): # Long
self.add_query_param('CategoryId', CategoryId)
def get_Visibility(self): # Integer
return self.get_query_params().get('Visibility')
def set_Visibility(self, Visibility): # Integer
self.add_query_param('Visibility', Visibility)
def get_PhysicsLevelId(self): # Long
return self.get_query_params().get('PhysicsLevelId')
def set_PhysicsLevelId(self, PhysicsLevelId): # Long
self.add_query_param('PhysicsLevelId', PhysicsLevelId)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
def get_IsView(self): # Integer
return self.get_query_params().get('IsView')
def set_IsView(self, IsView): # Integer
self.add_query_param('IsView', IsView)
def get_ExternalTableType(self): # String
return self.get_query_params().get('ExternalTableType')
def set_ExternalTableType(self, ExternalTableType): # String
self.add_query_param('ExternalTableType', ExternalTableType)
def get_Location(self): # String
return self.get_query_params().get('Location')
def set_Location(self, Location): # String
self.add_query_param('Location', Location)
def get_Comment(self): # String
return self.get_query_params().get('Comment')
def set_Comment(self, Comment): # String
self.add_query_param('Comment', Comment)
| null |
1,768 |
""" Contains the core functionality of the lightly Python package. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
from typing import List, Tuple
import numpy as np
import yaml
import lightly.cli as cli
from lightly.cli.embed_cli import _embed_cli
from lightly.cli.lightly_cli import _lightly_cli
from lightly.cli.train_cli import _train_cli
def METHOD_NAME(config_path):
"""Find path to yaml config file
Args:
config_path: (str) Path to config.yaml file
Returns:
Path to config.yaml if specified else default config.yaml
Raises:
ValueError: If the config_path is not None but doesn't exist
"""
if config_path is None:
dirname = os.path.dirname(cli.__file__)
config_path = os.path.join(dirname, "config/config.yaml")
if not os.path.exists(config_path):
raise ValueError("Config path {} does not exist!".format(config_path))
return config_path
def _load_config_file(config_path):
"""Load a yaml config file
Args:
config_path: (str) Path to config.yaml file
Returns:
Dictionary with configs from config.yaml
"""
Loader = yaml.FullLoader
with open(config_path, "r") as config_file:
cfg = yaml.load(config_file, Loader=Loader)
return cfg
def _add_kwargs(cfg, kwargs):
"""Add keyword arguments to config
Args:
cfg: (dict) Dictionary of configs from config.yaml
kwargs: (dict) Dictionary of keyword arguments
Returns:
Union of cfg and kwargs
"""
for key, item in kwargs.items():
if isinstance(item, dict):
if key in cfg:
cfg[key] = _add_kwargs(cfg[key], item)
else:
cfg[key] = item
else:
cfg[key] = item
return cfg
def train_model_and_embed_images(
config_path: str = None, **kwargs
) -> Tuple[np.ndarray, List[int], List[str]]:
"""Train a self-supervised model and use it to embed images.
First trains a modle using the _train_cli(),
then embeds with the _embed_cli().
All arguments passed to the CLI functions
can also be passed to this function (see below for an example).
Args:
config_path:
Path to config.yaml. If None, the default configs will be used.
**kwargs:
Overwrite default configs py passing keyword arguments.
Returns:
Embeddings, labels, and filenames of the images.
Embeddings are of shape (n_samples, embedding_size)
len(labels) = len(filenames) = n_samples
Examples:
>>> import lightly
>>>
>>> # train a model and embed images with default configs
>>> embeddings, _, _ = lightly.train_model_and_embed_images(
>>> input_dir='path/to/data')
>>>
>>> # train a model and embed images with separate config file
>>> my_config_path = 'my/config/file.yaml'
>>> embeddings, _, _ = lightly.train_model_and_embed_images(
>>> input_dir='path/to/data', config_path=my_config_path)
>>>
>>> # train a model and embed images with default settings + overwrites
>>> my_trainer = {max_epochs: 10}
>>> embeddings, _, _ = lightly.train_model_and_embed_images(
>>> input_dir='path/to/data', trainer=my_trainer)
"""
config_path = METHOD_NAME(config_path)
config_args = _load_config_file(config_path)
config_args = _add_kwargs(config_args, kwargs)
checkpoint = _train_cli(config_args, is_cli_call=False)
config_args["checkpoint"] = checkpoint
embeddings, labels, filenames = _embed_cli(config_args, is_cli_call=False)
return embeddings, labels, filenames
def train_embedding_model(config_path: str = None, **kwargs):
"""Train a self-supervised model.
Calls the same function as lightly-train. All arguments passed to
lightly-train can also be passed to this function (see below for an
example).
Args:
config_path:
Path to config.yaml. If None, the default configs will be used.
**kwargs:
Overwrite default configs py passing keyword arguments.
Returns:
Path to checkpoint of the trained embedding model.
Examples:
>>> import lightly
>>>
>>> # train a model with default configs
>>> checkpoint_path = lightly.train_embedding_model(
>>> input_dir='path/to/data')
>>>
>>> # train a model with separate config file
>>> my_config_path = 'my/config/file.yaml'
>>> checkpoint_path = lightly.train_embedding_model(
>>> input_dir='path/to/data', config_path=my_config_path)
>>>
>>> # train a model with default settings and overwrites: large batch
>>> # sizes are benefitial for self-supervised training and more
>>> # workers speed up the dataloading process.
>>> my_loader = {
>>> batch_size: 100,
>>> num_workers: 8,
>>> }
>>> checkpoint_path = lightly.train_embedding_model(
>>> input_dir='path/to/data', loader=my_loader)
>>> # the command above is equivalent to:
>>> # lightly-train input_dir='path/to/data' loader.batch_size=100 loader.num_workers=8
"""
config_path = METHOD_NAME(config_path)
config_args = _load_config_file(config_path)
config_args = _add_kwargs(config_args, kwargs)
return _train_cli(config_args, is_cli_call=False)
def embed_images(checkpoint: str, config_path: str = None, **kwargs):
"""Embed images with a self-supervised model.
Calls the same function as lightly-embed. All arguments passed to
lightly-embed can also be passed to this function (see below for an
example).
Args:
checkpoint:
Path to the checkpoint file for the embedding model.
config_path:
Path to config.yaml. If None, the default configs will be used.
**kwargs:
Overwrite default configs py passing keyword arguments.
Returns:
Embeddings, labels, and filenames of the images.
Examples:
>>> import lightly
>>> my_checkpoint_path = 'path/to/checkpoint.ckpt'
>>>
>>> # embed images with default configs
>>> embeddings, _, _ = lightly.embed_images(
>>> my_checkpoint_path, input_dir='path/to/data')
>>>
>>> # embed images with separate config file
>>> my_config_path = 'my/config/file.yaml'
>>> embeddings, _, _ = lightly.embed_images(
>>> my_checkpoint_path, input_dir='path/to/data', config_path=my_config_path)
>>>
>>> # embed images with default settings and overwrites: at inference,
>>> # we can use larger input_sizes because it requires less memory.
>>> my_collate = {input_size: 256}
>>> embeddings, _, _ = lightly.embed_images(
>>> my_checkpoint_path, input_dir='path/to/data', collate=my_collate)
>>> # the command above is equivalent to:
>>> # lightly-embed input_dir='path/to/data' collate.input_size=256
"""
config_path = METHOD_NAME(config_path)
config_args = _load_config_file(config_path)
config_args = _add_kwargs(config_args, kwargs)
config_args["checkpoint"] = checkpoint
return _embed_cli(config_args, is_cli_call=False)
| null |
1,769 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifySecurityGroupEgressRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifySecurityGroupEgressRule','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NicType(self): # String
return self.get_query_params().get('NicType')
def set_NicType(self, NicType): # String
self.add_query_param('NicType', NicType)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SourcePortRange(self): # String
return self.get_query_params().get('SourcePortRange')
def set_SourcePortRange(self, SourcePortRange): # String
self.add_query_param('SourcePortRange', SourcePortRange)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_DestPrefixListId(self): # String
return self.get_query_params().get('DestPrefixListId')
def set_DestPrefixListId(self, DestPrefixListId): # String
self.add_query_param('DestPrefixListId', DestPrefixListId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Policy(self): # String
return self.get_query_params().get('Policy')
def set_Policy(self, Policy): # String
self.add_query_param('Policy', Policy)
def get_Ipv6DestCidrIp(self): # String
return self.get_query_params().get('Ipv6DestCidrIp')
def set_Ipv6DestCidrIp(self, Ipv6DestCidrIp): # String
self.add_query_param('Ipv6DestCidrIp', Ipv6DestCidrIp)
def get_Ipv6SourceCidrIp(self): # String
return self.get_query_params().get('Ipv6SourceCidrIp')
def set_Ipv6SourceCidrIp(self, Ipv6SourceCidrIp): # String
self.add_query_param('Ipv6SourceCidrIp', Ipv6SourceCidrIp)
def get_PortRange(self): # String
return self.get_query_params().get('PortRange')
def set_PortRange(self, PortRange): # String
self.add_query_param('PortRange', PortRange)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SourceCidrIp(self): # String
return self.get_query_params().get('SourceCidrIp')
def set_SourceCidrIp(self, SourceCidrIp): # String
self.add_query_param('SourceCidrIp', SourceCidrIp)
def get_DestGroupId(self): # String
return self.get_query_params().get('DestGroupId')
def set_DestGroupId(self, DestGroupId): # String
self.add_query_param('DestGroupId', DestGroupId)
def METHOD_NAME(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Priority(self): # String
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # String
self.add_query_param('Priority', Priority)
def get_DestGroupOwnerAccount(self): # String
return self.get_query_params().get('DestGroupOwnerAccount')
def set_DestGroupOwnerAccount(self, DestGroupOwnerAccount): # String
self.add_query_param('DestGroupOwnerAccount', DestGroupOwnerAccount)
def get_DestCidrIp(self): # String
return self.get_query_params().get('DestCidrIp')
def set_DestCidrIp(self, DestCidrIp): # String
self.add_query_param('DestCidrIp', DestCidrIp)
def get_DestGroupOwnerId(self): # Long
return self.get_query_params().get('DestGroupOwnerId')
def set_DestGroupOwnerId(self, DestGroupOwnerId): # Long
self.add_query_param('DestGroupOwnerId', DestGroupOwnerId)
def get_SecurityGroupRuleId(self): # String
return self.get_query_params().get('SecurityGroupRuleId')
def set_SecurityGroupRuleId(self, SecurityGroupRuleId): # String
self.add_query_param('SecurityGroupRuleId', SecurityGroupRuleId)
| null |
1,770 |
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
import IECoreImage
import IECoreGL
IECoreGL.init( False )
import os.path
import os
import shutil
class CameraTest( unittest.TestCase ) :
def testPositioning( self ) :
# render a plane at z = 0 with the default camera
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.join( os.path.dirname( __file__ ), "shaders" ) ) )
r.display( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ), "tiff", "rgba", {} )
r.camera( "main", { "resolution" : IECore.V2iData( imath.V2i( 512 ) ), "projection" : IECore.StringData( "perspective" ) } )
r.worldBegin()
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } )
IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -0.1 ), imath.V2f( 0.1 ) ) ).render( r )
r.worldEnd()
# check that nothing appears in the output image
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ) ).read()
dimensions = i.dataWindow.size() + imath.V2i( 1 )
midpoint = dimensions.x * dimensions.y//2 + dimensions.x//2
self.assertEqual( i["G"][midpoint], 0 )
# render a plane at z = 0 with the camera moved back a touch to see it
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.join( os.path.dirname( __file__ ), "shaders" ) ) )
r.display( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ), "tiff", "rgba", {} )
r.transformBegin()
r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, 1 ) ) )
r.camera( "main", { "resolution" : IECore.V2iData( imath.V2i( 512 ) ), "projection" : IECore.StringData( "perspective" ) } )
r.transformEnd()
r.worldBegin()
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } )
IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -0.1 ), imath.V2f( 0.1 ) ) ).render( r )
r.worldEnd()
# check that something appears in the output image
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ) ).read()
dimensions = i.dataWindow.size() + imath.V2i( 1 )
midpoint = dimensions.x * dimensions.y//2 + dimensions.x//2
self.assertEqual( i["A"][midpoint], 1 )
def METHOD_NAME( self ) :
# render a red square at x==1, and a green one at y==1
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.join( os.path.dirname( __file__ ), "shaders" ) ) )
r.display( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ), "tiff", "rgba", {} )
r.transformBegin()
r.concatTransform( imath.M44f().translate( imath.V3f( 0, 0, 1 ) ) )
r.camera( "main", { "resolution" : IECore.V2iData( imath.V2i( 512 ) ) } )
r.transformEnd()
r.worldBegin()
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ) } )
IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0.75, -0.25 ), imath.V2f( 1.25, 0.25 ) ) ).render( r )
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ) } )
IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -0.25, 0.75 ), imath.V2f( 0.25, 1.25 ) ) ).render( r )
r.worldEnd()
# check we get the colors we'd expect where we expect them
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "output", "testCamera.tif" ) ).read()
dimensions = i.dataWindow.size() + imath.V2i( 1 )
index = dimensions.x * dimensions.y//2 + dimensions.x - 1
self.assertEqual( i["A"][index], 1 )
self.assertAlmostEqual( i["R"][index], 1, 6 )
self.assertEqual( i["G"][index], 0 )
self.assertEqual( i["B"][index], 0 )
index = dimensions.x//2
self.assertEqual( i["A"][index], 1 )
self.assertEqual( i["R"][index], 0 )
self.assertAlmostEqual( i["G"][index], 1, 6 )
self.assertEqual( i["B"][index], 0 )
def setUp( self ) :
if not os.path.isdir( os.path.join( "test", "IECoreGL", "output" ) ) :
os.makedirs( os.path.join( "test", "IECoreGL", "output" ) )
def tearDown( self ) :
if os.path.isdir( os.path.join( "test", "IECoreGL", "output" ) ) :
shutil.rmtree( os.path.join( "test", "IECoreGL", "output" ) )
if __name__ == "__main__":
unittest.main()
| null |
1,771 |
# SPDX-License-Identifier: LGPL-2.1-or-later
import pytest
import libnmstate
from libnmstate.schema import Interface
from libnmstate.schema import InterfaceIPv4
from libnmstate.schema import InterfaceIPv6
from libnmstate.schema import InterfaceType
from ..testlib import cmdlib
from ..testlib import assertlib
IPV4_ADDRESS1 = "192.0.2.251"
IPV4_ADDRESS2 = "192.0.2.1"
IPV4_NET1 = "198.51.100.0/24"
IPV6_ADDRESS1 = "2001:db8:1::1"
IPV6_ADDRESS2 = "2001:db8:1::2"
IPV6_NET1 = "2001:db8:a::/64"
def test_get_applied_config_for_dhcp_state_with_dhcp_enabeld_on_disk(eth1_up):
iface_state = eth1_up[Interface.KEY][0]
iface_name = iface_state[Interface.NAME]
cmdlib.exec_cmd(
f"nmcli c modify {iface_name} ipv4.method auto".split(), check=True
)
cmdlib.exec_cmd(
f"nmcli c modify {iface_name} ipv6.method auto".split(), check=True
)
assertlib.assert_state_match({Interface.KEY: [iface_state]})
@pytest.fixture
def eth1_up_with_auto_ip(eth1_up):
iface_name = eth1_up[Interface.KEY][0][Interface.NAME]
iface_state = {
Interface.NAME: iface_name,
Interface.IPV4: {
InterfaceIPv4.ENABLED: True,
InterfaceIPv4.DHCP: True,
},
Interface.IPV6: {
InterfaceIPv6.ENABLED: True,
InterfaceIPv6.DHCP: True,
InterfaceIPv6.AUTOCONF: True,
},
}
libnmstate.apply({Interface.KEY: [iface_state]})
yield iface_state
def test_get_applied_config_for_dhcp_state_with_dhcp_disabled_on_disk(
eth1_up_with_auto_ip,
):
iface_state = eth1_up_with_auto_ip
iface_name = iface_state[Interface.NAME]
cmdlib.exec_cmd(
f"nmcli c modify {iface_name} ipv4.method disabled".split(), check=True
)
cmdlib.exec_cmd(
f"nmcli c modify {iface_name} ipv6.method disabled".split(), check=True
)
assertlib.assert_state_match({Interface.KEY: [iface_state]})
@pytest.fixture
def METHOD_NAME():
cmdlib.exec_cmd("ip link set eth1 up".split(), check=True)
cmdlib.exec_cmd(
f"ip addr add {IPV4_ADDRESS1}/24 dev eth1 ".split(), check=True
)
cmdlib.exec_cmd(
f"ip -6 addr add {IPV6_ADDRESS1}/64 dev eth1 ".split(), check=True
)
cmdlib.exec_cmd(
f"ip route add {IPV4_NET1} via {IPV4_ADDRESS2} dev eth1 ".split(),
check=True,
)
cmdlib.exec_cmd(
f"ip -6 route add {IPV6_NET1} via {IPV6_ADDRESS2} dev eth1 ".split(),
check=True,
)
yield
cmdlib.exec_cmd("nmcli c down eth1".split())
cmdlib.exec_cmd("nmcli c del eth1".split())
def test_preserve_static_routes_created_by_iproute(
METHOD_NAME,
):
libnmstate.apply(
{
Interface.KEY: [
{
Interface.NAME: "eth1",
}
],
}
)
assert (
cmdlib.exec_cmd("nmcli -g ipv4.routes c show eth1".split())[1].strip()
== "198.51.100.0/24 192.0.2.1 0 table=254"
)
assert (
cmdlib.exec_cmd("nmcli -g ipv6.routes c show eth1".split())[1].strip()
== r"2001\:db8\:a\:\:/64 2001\:db8\:1\:\:2 1024 table=254"
)
@pytest.fixture
def eth1_up_with_nm_gateway(eth1_up):
desired_state = {
Interface.KEY: [
{
Interface.NAME: "eth1",
Interface.TYPE: InterfaceType.ETHERNET,
Interface.IPV4: {
InterfaceIPv4.ENABLED: True,
InterfaceIPv4.ADDRESS: [
{
InterfaceIPv4.ADDRESS_IP: IPV4_ADDRESS1,
InterfaceIPv4.ADDRESS_PREFIX_LENGTH: 24,
}
],
},
Interface.IPV6: {
InterfaceIPv6.ENABLED: True,
InterfaceIPv6.ADDRESS: [
{
InterfaceIPv6.ADDRESS_IP: IPV6_ADDRESS1,
InterfaceIPv6.ADDRESS_PREFIX_LENGTH: 64,
}
],
},
}
]
}
libnmstate.apply(desired_state)
cmdlib.exec_cmd(
f"nmcli c modify eth1 ipv4.gateway {IPV4_ADDRESS2}".split(),
check=True,
)
cmdlib.exec_cmd(
f"nmcli c modify eth1 ipv6.gateway {IPV6_ADDRESS2}".split(),
check=True,
)
cmdlib.exec_cmd(
"nmcli c up eth1".split(),
check=True,
)
yield
def test_switch_static_gateway_to_dhcp(eth1_up_with_nm_gateway):
libnmstate.apply(
{
Interface.KEY: [
{
Interface.NAME: "eth1",
Interface.IPV4: {
InterfaceIPv4.ENABLED: True,
InterfaceIPv4.DHCP: True,
},
Interface.IPV6: {
InterfaceIPv6.ENABLED: True,
InterfaceIPv6.DHCP: True,
InterfaceIPv6.AUTOCONF: True,
},
}
],
}
)
assert (
cmdlib.exec_cmd("nmcli -g ipv4.gateway c show eth1".split())[1].strip()
== ""
)
assert (
cmdlib.exec_cmd("nmcli -g ipv6.gateway c show eth1".split())[1].strip()
== ""
)
| null |
1,772 |
#!/usr/bin/env python3
import cgi
import contextlib
import json
import os
import socketserver
import string
import sys
import typing as tp
from argparse import ArgumentParser
from http import HTTPStatus, server
from pathlib import Path
from pprint import pprint
from urllib import parse
from xonsh.built_ins import XSH
from xonsh.webconfig import tags as t
from xonsh.webconfig.file_writes import insert_into_xonshrc
from xonsh.webconfig.routes import Routes
RENDERERS: list[tp.Callable] = []
class XonshConfigHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def _write_headers(self, *headers: "tuple[str, str]"):
for name, val in headers:
self.send_header(name, val)
self.end_headers()
def _write_data(self, data: "bytes|dict|str"):
if isinstance(data, bytes):
content_type = "text/html"
elif isinstance(data, dict):
content_type = "application/json"
data = json.dumps(data).encode()
else:
content_type = "text/html"
data = str(data).encode()
self._write_headers(
("Content-type", content_type),
("Content-Length", str(len(data))),
)
self.wfile.write(data)
self.wfile.flush()
def _send(
self,
data: "bytes|dict|str|None" = None,
status: "None|int" = None,
redirect: "str|None" = None,
):
status = status or (HTTPStatus.FOUND if redirect else HTTPStatus.OK)
self.send_response(status)
if data:
self._write_data(data)
elif redirect:
self._write_headers(
("Location", redirect),
)
def _read(self):
content_len = int(self.headers.get("content-length", 0))
return self.rfile.read(content_len)
def render_get(self, route):
try:
webconfig = Path(__file__).parent
except Exception:
# in case of thread missing __file__ definition
webconfig = Path.cwd()
path = webconfig / "index.html"
tmpl = string.Template(path.read_text())
navlinks = t.to_str(route.get_nav_links())
msgs = t.to_str(route.get_err_msgs())
body = t.to_str(route.get()) # type: ignore
data = tmpl.substitute(navlinks=navlinks, body=msgs + body)
return self._send(data)
def _get_route(self, method: str):
url = parse.urlparse(self.path)
route_cls = Routes.registry.get(url.path)
if route_cls and hasattr(route_cls, method):
params = parse.parse_qs(url.query)
return route_cls(url=url, params=params, xsh=XSH)
def do_GET(self) -> None:
route = self._get_route("get")
if route is not None:
return self.render_get(route)
return super().do_GET()
def _read_form(self):
ctype, pdict = cgi.parse_header(self.headers.get("content-type"))
# if ctype == "multipart/form-data":
# postvars = cgi.parse_multipart(self.rfile, pdict)
if ctype == "application/x-www-form-urlencoded":
return parse.parse_qs(self._read(), keep_blank_values=True)
return {}
def do_POST(self):
"""Reads post request body"""
route = self._get_route("post")
if route is not None:
# redirect after form submission
data = cgi.FieldStorage(
self.rfile,
headers=self.headers,
environ={"REQUEST_METHOD": "POST"},
keep_blank_values=True,
)
new_route = route.post(data) or route
return self._send(redirect=new_route.path)
post_body = self._read()
config = json.loads(post_body)
print("Web Config Values:")
pprint(config)
fname = insert_into_xonshrc(config)
print("Wrote out to " + fname)
self._send(b"received post request:<br>" + post_body)
def METHOD_NAME():
p = ArgumentParser("xonfig web")
p.add_argument(
"--no-browser",
"-n",
action="store_false",
dest="browser",
default=True,
help="don't open browser",
)
return p
def bind_server_to(
port: int = 8421, handler_cls=XonshConfigHTTPRequestHandler, browser=False
):
cls = socketserver.TCPServer
# cls = socketserver.ThreadingTCPServer # required ctrl+c twice ?
cls.allow_reuse_address = True
while port <= 9310:
try:
cls.allow_reuse_address = True
httpd = cls(("", port), handler_cls)
url = f"http://localhost:{port}"
print(f"Web config started at '{url}'. Hit Crtl+C to stop.")
if browser:
import webbrowser
webbrowser.open(url)
return httpd
except OSError:
type, value = sys.exc_info()[:2]
if "Address already in use" not in str(value):
raise
except KeyboardInterrupt:
break
port += 1
def serve(browser=False):
httpd = bind_server_to(browser=browser)
with contextlib.suppress(KeyboardInterrupt):
with httpd:
httpd.serve_forever()
def main(args=None):
from xonsh.main import setup
setup()
p = METHOD_NAME()
ns = p.parse_args(args=args)
webconfig_dir = os.path.dirname(__file__)
if webconfig_dir:
os.chdir(webconfig_dir)
serve(ns.browser)
if __name__ == "__main__":
# watchexec -r -e py -- python -m xonsh.webconfig --no-browser
main()
| null |
1,773 |
# coding: utf-8
"""
PKS
PKS API # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AZ(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'cpi': 'str',
'cloud_properties': 'object'
}
attribute_map = {
'name': 'name',
'cpi': 'cpi',
'cloud_properties': 'cloud_properties'
}
def __init__(self, name=None, cpi=None, cloud_properties=None): # noqa: E501
"""AZ - a model defined in Swagger""" # noqa: E501
self._name = None
self._cpi = None
self._cloud_properties = None
self.discriminator = None
self.name = name
self.cpi = cpi
self.cloud_properties = cloud_properties
@property
def name(self):
"""Gets the name of this AZ. # noqa: E501
:return: The name of this AZ. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AZ.
:param name: The name of this AZ. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def cpi(self):
"""Gets the cpi of this AZ. # noqa: E501
:return: The cpi of this AZ. # noqa: E501
:rtype: str
"""
return self._cpi
@cpi.setter
def cpi(self, cpi):
"""Sets the cpi of this AZ.
:param cpi: The cpi of this AZ. # noqa: E501
:type: str
"""
if cpi is None:
raise ValueError("Invalid value for `cpi`, must not be `None`") # noqa: E501
self._cpi = cpi
@property
def cloud_properties(self):
"""Gets the cloud_properties of this AZ. # noqa: E501
:return: The cloud_properties of this AZ. # noqa: E501
:rtype: object
"""
return self._cloud_properties
@cloud_properties.setter
def cloud_properties(self, cloud_properties):
"""Sets the cloud_properties of this AZ.
:param cloud_properties: The cloud_properties of this AZ. # noqa: E501
:type: object
"""
if cloud_properties is None:
raise ValueError("Invalid value for `cloud_properties`, must not be `None`") # noqa: E501
self._cloud_properties = cloud_properties
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AZ, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.METHOD_NAME())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AZ):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| null |
1,774 |
######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""
This file provides a class that can be used to stub any
module that BioSimSpace fails to import. The class will
raise a ModuleNotFound exception with a clear instruction
to the user if any attempt it made to use a module that
has not been installed.
"""
__author__ = "Christopher Woods"
__email__ = "[email protected]"
__all__ = ["_module_stub", "_try_import", "_assert_imported", "_have_imported"]
_failed_modules = {}
class _ModuleStub:
def __init__(self, name: str, install_command: str):
self._name = name
if install_command is None:
self._install_command = f"conda install {name}"
else:
self._install_command = install_command
def __repr__(self):
return f"<stubmodule '{self._name}' from /could/not/be/imported>"
def __getattr__(self, key):
import BioSimSpace
message = (
f"Cannot continue as the module '{self._name}' "
"has not been installed. To continue, you "
"should install the module using the command "
f"'{self._install_command}'."
)
if BioSimSpace._isVerbose():
print(message)
raise ModuleNotFoundError(message)
def _module_stub(name: str, install_command: str = None):
"""
Return a ModuleStub that will raise a ModuleNotFoundError
if it is used in any way.
Parameters
----------
name : str
The name of the module being stubbed
install_command : str (optional)
The command used to install the module. If
this is not supplied, then it is assumed
to be 'conda install {name}'
Returns
-------
module : _ModuleStub
The stubbed module
"""
return _ModuleStub(name=name, install_command=install_command)
def METHOD_NAME(name: str, install_command: str = None):
"""
Try to import the module called 'name' and return
the resulting module. If this fails, catch the
error and instead return a _ModuleStub.
Parameters
----------
name : str
The name of the module being stubbed
install_command : str (optional)
The command used to install the module. If
this is not supplied, then it is assumed
to be 'conda install {name}'
Returns
-------
module : _ModuleStub | module
The module if it loaded correctly, else otherwise
a _ModuleStub for that module
"""
global _failed_modules
if name in _failed_modules:
return _failed_modules[name]
import importlib
try:
m = importlib.import_module(name)
except Exception as e:
m = _ModuleStub(name=name, install_command=install_command)
_failed_modules[name] = m
import BioSimSpace
if BioSimSpace._isVerbose():
print(f"Failed to import module {name}.")
print("Functionality that depends on this module will " "not be available.")
return m
def _assert_imported(module):
"""
Assert that the passed module has indeed been imported.
This will raise a ModuleNotFoundError if the module
has not been imported, and has instead been stubbed.
"""
if type(module) == _ModuleStub:
module.this_will_break()
def _have_imported(module) -> bool:
"""
Return whether or not the passed module has indeed
been imported (and thus is not stubbed).
"""
return type(module) != _ModuleStub
| null |
1,775 |
# Copyright (c) 2017-2022 The Molecular Sciences Software Institute, Virginia Tech
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Reader for the gbasis format
'''
import re
from .. import lut, manip
from . import helpers
element_entry_re = re.compile(r'^([a-zA-Z]{1,3}):(.*):(.*)$')
shell_info_re = re.compile(r'^([a-zA-Z])\s+(\d+)\s+(\d+)$')
def METHOD_NAME(basis_lines):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
GBASIS only supports electronic shells (no ecp)
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
basis_lines = helpers.prune_lines(basis_lines, '!#')
bs_data = {}
other_data = {}
# The file just contains sections separated headed by
# a line looking like "Al:aug-cc-pV5+dZ:(21s13p6d4f3g2h) -> [8s7p6d4f3g2h]"
element_sections = helpers.partition_lines(basis_lines, element_entry_re.match, min_size=4)
found_basis = set()
for element_lines in element_sections:
# First line is element + basis name (already checked)
# Second is highest angular momentum
element_sym, basis_name, _ = helpers.parse_line_regex(element_entry_re, element_lines[0],
"Element entry: sym:basis:pattern")
element_Z = lut.element_Z_from_sym(element_sym, as_str=True)
# Add what basis we found
# We only support one basis per file
found_basis.add(basis_name)
element_data = manip.create_element_data(bs_data, element_Z, 'electron_shells')
max_am = helpers.parse_line_regex(r'^(\d+)$', element_lines[1], 'Highest AM')
# Split all the shells based on lines beginning with alpha character
shell_blocks = helpers.partition_lines(element_lines[2:], lambda x: x[0].isalpha())
# We know how many blocks there should be
if (max_am + 1) != len(shell_blocks):
raise RuntimeError("Different number of blocks for element {}. Expected {}, found {}".format(
element_sym, max_am + 1, len(shell_blocks)))
# Now loop over the blocks
found_am = []
for shell_lines in shell_blocks:
shell_am, nprim, ngen = helpers.parse_line_regex(shell_info_re, shell_lines[0], 'Shell: AM, nprim, ngen')
shell_am = lut.amchar_to_int(shell_am)
func_type = lut.function_type_from_am(shell_am, 'gto', 'spherical')
if len(shell_am) > 1:
raise RuntimeError("Fused AM not supported by gbasis")
if shell_am in found_am:
raise RuntimeError("Duplicate AM for element {}: AM {} already found".format(element_sym, shell_am[0]))
found_am.append(shell_am)
if shell_am[0] > max_am:
raise RuntimeError("Found AM greater than max AM: {}".format(shell_am[0]))
# Now parse the exponents & coefficients
exponents, coefficients = helpers.parse_primitive_matrix(shell_lines[1:], nprim=nprim, ngen=ngen)
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': shell_am,
'exponents': exponents,
'coefficients': coefficients
}
element_data['electron_shells'].append(shell)
if len(found_basis) > 1:
raise RuntimeError("Multiple basis sets in a single file: " + str(found_basis))
return bs_data, other_data
| null |
1,776 |
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import numpy as np
import sys
import os
from collections import OrderedDict
# --- utils ---
from .utils import GetVariablesOnGraph
from .utils import GetCoefficientOnGraph
class ElasticWeightConsolidation:
def __init__(
self,
_y,
_out_FI_path=None,
_iter_num=100,
_apply_function_type_list=['Convolution'],
_calc_switch=False
):
# input
# _y : type=nn.Variable(), The generator output
# _out_FI_path : type=str, The Fisher Information weights result path
# _iter_num : type=int, The iteration number of calculation for the Fisher Information.
# _apply_function_type_list : type=list of str, The function type names which EWC applies to.
# _calc_switch : type=bool, Whether to calculate the Fisher Information forcely.
# [parameters]
self.y = _y
self.out_FI_path = _out_FI_path
self.iter_num = _iter_num
self.apply_function_type_list = _apply_function_type_list
# [variables]
self.FisherInformation_val_dict = None
self.coef_dict_for_FI = OrderedDict()
self.coef_dict_on_graph = None
self.FI_save_switch = True
# [hyper parameters]
self.FI_scope = 'FisherInformation'
# [preprocessing]
self.FisherInformation_val_dict = self.METHOD_NAME(
_calc_switch)
self._preprocessing()
def _preprocessing(self):
# --- all coefficients ---
GCG_class = GetCoefficientOnGraph()
self.y.visit(GCG_class)
self.coef_dict_on_graph = GCG_class.variables
# --- variables which EWC applies to ---
GVG_class = GetVariablesOnGraph()
self.y.visit(GVG_class)
for key in GVG_class.variables:
var = GVG_class.variables[key]
if var.parent.info.type_name in self.apply_function_type_list:
if len(var.parent.inputs) > 1:
for in_var in var.parent.inputs[1:]:
use_var = self._get_input_node(in_var)
if use_var is not None:
self.coef_dict_for_FI[use_var.name] = use_var
def _get_input_node(self, _var, _already_read_list=[]):
if _var in self.coef_dict_on_graph.values():
return _var
else:
_already_read_list.append(_var)
if _var.parent is not None:
for in_var in _var.parent.inputs:
if in_var not in _already_read_list:
return self._get_input_node(in_var, _already_read_list)
def __call__(self, _out_var=None):
# input
# _out_var : type=nn.Variable(), The discriminator output
# --- self ---
# self.coef_dict : type=OrderedDict(), The coefficient dict of the synthesis network (This needs to be on the graph.)
# self.data_iterator : type=nnabla data iterator
# output
# loss : type=nn.Variable()
# --- Calculation of the Fisher Information ---
if _out_var is not None:
temp_need_grad = self.y.need_grad
self.y.need_grad = True
if len(self.FisherInformation_val_dict) == 0:
log_likelihood_var = F.log(F.sigmoid(_out_var))
for i in range(self.iter_num):
log_likelihood_var.forward(clear_no_need_grad=True)
self._zero_grad_all()
log_likelihood_var.backward(clear_buffer=True)
self._accumulate_grads()
sys.stdout.write(
'\rFisher Information Accumulating ... {}/{}'.format(i+1, self.iter_num))
sys.stdout.flush()
print('')
for key in self.FisherInformation_val_dict:
self.FisherInformation_val_dict[key] /= self.iter_num
self.y.need_grad = temp_need_grad
# --- make loss graph ---
loss = 0
for key in self.FisherInformation_val_dict:
key_source = key.replace(self.FI_scope + '/', '')
FI_var = nn.Variable.from_numpy_array(
self.FisherInformation_val_dict[key].copy())
FI_var.name = key
coef_source_var = nn.Variable.from_numpy_array(
self.coef_dict_for_FI[key_source].d.copy())
coef_source_var.name = key.replace(
self.FI_scope + '/', 'weight_source/')
loss += F.mean(FI_var *
(self.coef_dict_for_FI[key_source] - coef_source_var)**2)
# --- save Fisher Information ---
if self.FI_save_switch:
self._save_FisherInformation()
print('[ElasticWeightConsolidation] Success!')
return loss
def _save_FisherInformation(self):
if self.out_FI_path is not None:
os.makedirs(self.out_FI_path.replace(
self.out_FI_path.split(os.sep)[-1], ''), exist_ok=True)
np.savez(self.out_FI_path.replace('.npz', ''),
**self.FisherInformation_val_dict)
print(
'[ElasticWeightConsolidation] Save the calculated fisher information values to...')
print('[ElasticWeightConsolidation] {}'.format(self.out_FI_path))
def METHOD_NAME(self, _calc_switch):
# input
# _FI_path : type=string or None, Already calculated fisher information.
# output
# FI_dict : type=OrderedDict(), key=parameter name, value=np.ndarray
FI_dict = OrderedDict()
if self.out_FI_path is not None and os.path.isfile(self.out_FI_path) and not _calc_switch:
FI_dict = OrderedDict(np.load(self.out_FI_path))
self.FI_save_switch = False
print('[ElasticWeightConsolidation] Load EWC weights ... {}'.format(
self.out_FI_path))
return FI_dict
def _zero_grad_all(self):
for key in self.coef_dict_for_FI:
self.coef_dict_for_FI[key].g.fill(0)
def _accumulate_grads(self):
for key in self.coef_dict_for_FI:
if self.FI_scope + '/' + key not in self.FisherInformation_val_dict:
self.FisherInformation_val_dict[self.FI_scope +
'/' + key] = self.coef_dict_for_FI[key].g.copy()
else:
self.FisherInformation_val_dict[self.FI_scope +
'/' + key] += self.coef_dict_for_FI[key].g.copy()
| null |
1,777 |
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TextCNN model.
A standard 1D CNN model for sentence classification.
## References
[1]: Yoon Kim. Convolutional Neural Networks for Sentence Classification.
In _Empirical Methods in Natural Language Processing_, 2014.
https://www.aclweb.org/anthology/D14-1181/
"""
import tensorflow as tf
def METHOD_NAME(inputs,
vocab_size,
feature_size,
embed_size,
premade_embedding_arr=None):
"""Creates Embedding Layer.
Args:
inputs: (tf.Tensor) Input sentence in token indices format, shape
(batch_size, feature_size).
vocab_size: (int) Static size of vocabulary.
feature_size: (int) Static size of input feature.
embed_size: (int) Static size of hidden dimension of the embedding output.
premade_embedding_arr: (np.ndarray) Pre-made word embedding in numpy array
format, shape (vocab_size, embed_size).
Raises:
(ValueError): If shape of premade_embedding_arr is not
(vocab_size, embed_size).
Returns:
(tf.Tensor) shape (batch_size, feature_size, embed_size).
"""
# Make initializer.
if premade_embedding_arr is not None:
premade_vocab_size, premade_embed_size = premade_embedding_arr.shape
if premade_vocab_size != vocab_size or premade_embed_size != embed_size:
raise ValueError("'premade_embedding_arr' should have size ({}, {}). "
'Observed ({}, {})'.format(vocab_size, embed_size,
premade_vocab_size,
premade_embed_size))
embed_init = tf.keras.initializers.Constant(premade_embedding_arr)
else:
embed_init = tf.keras.initializers.RandomUniform(minval=-1, maxval=1)
# Define layer.
embedding_layer = tf.keras.layers.Embedding(
vocab_size,
embed_size,
input_length=feature_size,
embeddings_initializer=embed_init,
name='embedding')
return embedding_layer(inputs)
def conv_pooled_block(inputs, num_filters, filter_size, feature_size,
embed_size):
"""Creates convolution layer with max pooling.
Args:
inputs: (tf.Tensor) Input tensor, shape (batch_size, feature_size,
embed_size).
num_filters: (int) Number of filters to apply to input.
filter_size: (int) Static size of the convolutional filter.
feature_size: (int) Static size of input feature.
embed_size: (int) Static size of hidden dimension of the text embedding.
Returns:
(tf.Tensor) shape (batch_size, 1, num_filters).
"""
filter_shape = (filter_size, embed_size)
max_pool_shape = (feature_size - filter_size + 1, 1)
conv_layer = tf.keras.layers.Conv2D(
num_filters,
filter_shape,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation='relu',
kernel_initializer='glorot_normal',
bias_initializer=tf.keras.initializers.constant(0.1),
name='convolution_{:d}'.format(filter_size))
# Max pooling over sentence positions for each filter.
maxpool_layer = tf.keras.layers.MaxPool2D(
pool_size=max_pool_shape,
strides=(1, 1),
padding='valid',
data_format='channels_last',
name='max_pooling_{:d}'.format(filter_size))
conv = conv_layer(inputs)
return maxpool_layer(conv)
def textcnn(filter_sizes,
num_filters,
num_classes,
feature_size,
vocab_size,
embed_size,
dropout_rate,
l2,
premade_embedding_arr=None):
"""Builds TextCNN model.
Args:
filter_sizes: (list) A list specifying the sizes of the convolutional
filters.
num_filters: (int) Number of filters to apply to input.
num_classes: (int) Number of output classes.
feature_size: (int) Static size of input feature.
vocab_size: (int) Static size of vocabulary.
embed_size: (int) Static size of hidden dimension of the embedding output.
dropout_rate: (float) Fraction of the convolutional output units to drop.
l2: (float) Strength of L2 regularization for wieghts in the output dense
layer.
premade_embedding_arr: (np.ndarray) Pre-made word embedding in numpy array
format, shape (vocab_size, embed_size).
Returns:
(tf.keras.Model) The TextCNN model.
"""
inputs = tf.keras.Input(shape=(feature_size,))
# Prepare word embedding for convolutional layers.
embed = METHOD_NAME(
inputs,
vocab_size,
feature_size,
embed_size,
premade_embedding_arr=premade_embedding_arr)
embed = tf.keras.layers.Reshape((feature_size, embed_size, 1),
name='add_channel')(
embed)
# Evaluate and gather conv layer output for each filter size.
pool_outputs = []
for filter_size in filter_sizes:
pool = conv_pooled_block(embed, num_filters, filter_size, feature_size,
embed_size)
pool_outputs.append(pool)
pool_outputs = tf.keras.layers.concatenate(
pool_outputs, axis=-1, name='concatenate')
# Flatten and apply dropout.
flat_outputs = tf.keras.layers.Flatten(
data_format='channels_last', name='flatten')(
pool_outputs)
flat_outputs = tf.keras.layers.Dropout(
dropout_rate, name='dropout')(
flat_outputs)
# Dense output.
dense_output_layer = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer='glorot_normal',
bias_initializer=tf.keras.initializers.constant(0.1),
kernel_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
name='dense')
outputs = dense_output_layer(flat_outputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)
| null |
1,778 |
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PSL utils to load PSL constraint model."""
from typing import Any, Dict, List, Sequence
import tensorflow as tf
from psl import data as data_utils # local file import from experimental.language_structure
from psl import psl_model # local file import from experimental.language_structure
from psl import psl_model_dstc_synthetic # local file import from experimental.language_structure
from psl import psl_model_multiwoz # local file import from experimental.language_structure
from vrnn import data_preprocessor as preprocessor # local file import from experimental.language_structure
_INPUT_ID_NAME = preprocessor.INPUT_ID_NAME
_MULTIWOZ_SYNTH = 'multiwoz_synth'
_SGD_SYNTH = 'sgd_synth'
_SGD = 'sgd'
_SGD_DOMAIN_ADAPATION = 'sgd_domain_adapation'
def get_psl_model(dataset: str, rule_names: List[str],
rule_weights: List[float], **kwargs) -> psl_model.PSLModel:
"""Constraints PSL constraint model."""
psl_model_cls_map = {
_MULTIWOZ_SYNTH: psl_model_multiwoz.PSLModelMultiWoZ,
_SGD_SYNTH: psl_model_dstc_synthetic.PSLModelDSTCSynthetic,
_SGD: psl_model_dstc_synthetic.PSLModelDSTCSynthetic,
_SGD_DOMAIN_ADAPATION: psl_model_dstc_synthetic.PSLModelDSTCSynthetic,
}
if dataset in psl_model_cls_map:
psl_model_cls = psl_model_cls_map[dataset]
return psl_model_cls(rule_weights, rule_names, **kwargs)
raise ValueError('Supported PSL constraint for dataset {}, found {}.'.format(
', '.join(psl_model_cls_map.keys()), dataset))
def _get_keyword_ids_per_class(dataset: str, config: Dict[str, Any],
vocab: Sequence[str]) -> Sequence[Sequence[int]]:
"""Gets keyword ids for each class in the PSL constraint model."""
vocab_mapping = {word: word_id for word_id, word in enumerate(vocab)}
keyword_ids_per_class = []
if dataset == _MULTIWOZ_SYNTH:
keys = [
'accept_words', 'cancel_words', 'end_words', 'greet_words',
'info_question_words', 'insist_words', 'slot_question_words'
]
for key in keys:
keyword_ids = [
vocab_mapping[word] for word in config[key] if word in vocab_mapping
]
keyword_ids_per_class.append(keyword_ids)
return keyword_ids_per_class
def _create_psl_features(
user_utterance_ids: tf.Tensor, system_utterance_ids: tf.Tensor,
config: Dict[str, Any], dataset: str,
keyword_ids_per_class: Sequence[Sequence[int]]) -> tf.Tensor:
"""Creates features for PSL constraint model."""
if dataset not in (_MULTIWOZ_SYNTH):
return tf.concat([user_utterance_ids, system_utterance_ids], axis=-1)
features = data_utils.create_features(
user_utterance_ids,
system_utterance_ids,
keyword_ids_per_class,
check_keyword_by_utterance=dataset == 'sgd_synth',
include_keyword_value=config['includes_word'],
exclude_keyword_value=config['excludes_word'],
pad_utterance_mask_value=config['pad_utterance_mask'],
utterance_mask_value=config['utterance_mask'],
last_utterance_mask_value=config['last_utterance_mask'])
return features
def psl_feature_mixin(fn: Any, dataset: str, psl_config: Dict[str, Any],
vocab: Sequence[str]):
"""Creates PSL feature generation mixin.
Args:
fn: dataset processing function converting the dataset into VRNN features.
dataset: dataset name.
psl_config: PSL config to create features.
vocab: vocabulary list.
Returns:
decorated `fn` to include PSL input features generation.
"""
keyword_ids_per_class = _get_keyword_ids_per_class(dataset, psl_config, vocab)
def _run(inputs: Sequence[tf.Tensor]):
encoder_input_1, encoder_input_2 = inputs[:2]
psl_inputs = _create_psl_features(encoder_input_1[_INPUT_ID_NAME],
encoder_input_2[_INPUT_ID_NAME],
psl_config, dataset,
keyword_ids_per_class)
return (*inputs, psl_inputs)
return lambda inputs: _run(fn(inputs))
def _copy_model_weights(weights: List[tf.Tensor]) -> List[tf.Tensor]:
"""Copies a list of model weights."""
weights_copy = []
for layer in weights:
weights_copy.append(tf.identity(layer))
return weights_copy
def METHOD_NAME(model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, model_inputs: Any,
get_logits_fn: Any, psl_constraint: psl_model.PSLModel,
psl_inputs: tf.Tensor, grad_steps: int,
alpha: float) -> tf.Tensor:
"""Test step for gradient based weight updates.
Args:
model: keras model generating the logits
optimizer: keras optimizer
model_inputs: model input features
get_logits_fn: the function deriving the logits from the model outputs.
psl_constraint: differentable psl constraints
psl_inputs: psl input features
grad_steps: number of gradient steps taken to try and satisfy the
constraints
alpha: parameter to determine how important it is to keep the constrained
weights close to the trained unconstrained weights
Returns:
Logits after satisfiying constraints.
"""
@tf.function
def test_step(model_inputs: Any, psl_inputs: tf.Tensor,
weights: Sequence[tf.Tensor]):
"""Update weights by satisfing test constraints."""
with tf.GradientTape() as tape:
model_outputs = model(model_inputs, training=False)
logits = get_logits_fn(model_outputs)
constraint_loss = psl_constraint.compute_loss(psl_inputs, logits)
weight_loss = tf.reduce_sum([
tf.reduce_mean(tf.math.squared_difference(w, w_h))
for w, w_h in zip(weights, model.trainable_weights)
])
loss = constraint_loss + alpha * weight_loss
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
weights_copy = _copy_model_weights(model.trainable_weights)
for _ in tf.range(tf.cast(grad_steps, dtype=tf.int32)):
test_step(model_inputs, psl_inputs, weights=weights_copy)
model_outputs = model(model_inputs)
logits = get_logits_fn(model_outputs)
for var, weight in zip(model.trainable_variables, weights_copy):
var.assign(weight)
return logits
| null |
1,779 |
from typing import Any, Dict, List, Optional, Tuple
from boa3.internal.model.builtin.method.builtinmethod import IBuiltinMethod
from boa3.internal.model.expression import IExpression
from boa3.internal.model.type.collection.sequence.sequencetype import SequenceType
from boa3.internal.model.type.collection.sequence.tupletype import TupleType
from boa3.internal.model.type.itype import IType
from boa3.internal.model.variable import Variable
from boa3.internal.neo.vm.opcode import OpcodeHelper
from boa3.internal.neo.vm.opcode.Opcode import Opcode
class MaxMethod(IBuiltinMethod):
def __init__(self, arg_value: Optional[IType] = None):
from boa3.internal.model.type.type import Type
identifier = 'max'
self._allowed_types = [Type.int, Type.str, Type.bytes]
default_type = Type.int
if not self._is_valid_type(arg_value):
arg_value = default_type
args: Dict[str, Variable] = {
'args1': Variable(arg_value),
'args2': Variable(arg_value)
}
vararg = ('values', Variable(arg_value))
super().__init__(identifier, args, return_type=arg_value, vararg=vararg)
def _is_valid_type(self, arg_type: Optional[IType]) -> bool:
return (isinstance(arg_type, IType) and
any(allowed_type.is_type_of(arg_type) for allowed_type in self._allowed_types))
@property
def identifier(self) -> str:
from boa3.internal.model.type.type import Type
if self._arg_values.type is Type.int:
return self._identifier
return '-{0}_from_{1}'.format(self._identifier, self._arg_values.type._identifier)
@property
def _arg_values(self) -> Variable:
return self._vararg[1]
def validate_parameters(self, *params: IExpression) -> bool:
if len(params) != 1:
return False
if not isinstance(params[0], IExpression):
return False
return isinstance(params[0].type, SequenceType)
@property
def _opcode(self) -> List[Tuple[Opcode, bytes]]:
from boa3.internal.compiler.codegenerator import get_bytes_count
from boa3.internal.neo.vm.type.Integer import Integer
jmp_place_holder = (Opcode.JMP, b'\x01')
verify_number_of_parameters = [ # verifies if the stack has 2 or 3 items
(Opcode.DEPTH, b''),
(Opcode.PUSH2, b''),
(Opcode.JMPEQ, b''),
jmp_place_holder
]
if_n_parameters_gt_2 = [ # if number of items in stack is 3 (2 ints and one tuple)
(Opcode.REVERSE3, b''),
(Opcode.UNPACK, b''),
(Opcode.INC, b''),
(Opcode.INC, b''),
jmp_place_holder # skips the next block of instructions
]
if_n_parameters_eq_2 = [ # if number of items in stack is 2 (2 ints)
(Opcode.PUSH2, b'')
]
jmp_n_parameters_eq_2 = OpcodeHelper.get_jump_and_data(Opcode.JMP, get_bytes_count(if_n_parameters_eq_2), True)
if_n_parameters_gt_2[-1] = jmp_n_parameters_eq_2
jmp_n_parameters_gt_2 = OpcodeHelper.get_jump_and_data(Opcode.JMPEQ, get_bytes_count(if_n_parameters_gt_2))
verify_number_of_parameters[-1] = jmp_n_parameters_gt_2
repack_array = [ # pack all the arguments in the array
(Opcode.PACK, b''),
]
is_int_initialize = [ # puts the last array element as the max value
(Opcode.DUP, b''), # index = len(array) - 1
(Opcode.SIZE, b''),
(Opcode.DEC, b''),
(Opcode.OVER, b''),
(Opcode.OVER, b''),
(Opcode.PICKITEM, b''), # max = array[index]
]
is_int_while = [ # this will get the next number in the array and compare it with the current max
(Opcode.SWAP, b''), # index--
(Opcode.DEC, b''),
(Opcode.SWAP, b''),
(Opcode.PUSH2, b''),
(Opcode.PICK, b''),
(Opcode.PUSH2, b''),
(Opcode.PICK, b''), # max = max if max > array[index] else array[index]
(Opcode.PICKITEM, b''),
]
is_int_while.extend(self._compare_values())
is_int_while.extend([
(Opcode.OVER, b''),
(Opcode.SIGN, b'')
# if index != 0: go back to index--
# else go to the end
])
jmp_back_to_while_statement = (Opcode.JMPIF, Integer(-get_bytes_count(is_int_while)).to_byte_array(signed=True))
is_int_while.append(jmp_back_to_while_statement)
clean_stack = [ # removes everything but max
(Opcode.REVERSE3, b''),
(Opcode.DROP, b''),
(Opcode.DROP, b''),
]
return (
verify_number_of_parameters +
if_n_parameters_gt_2 +
if_n_parameters_eq_2 +
repack_array +
is_int_initialize +
is_int_while +
clean_stack
)
def _compare_values(self) -> List[Tuple[Opcode, bytes]]:
return [(Opcode.MAX, b'')]
@property
def _args_on_stack(self) -> int:
return len(self.args)
@property
def _body(self) -> Optional[str]:
return
def METHOD_NAME(self, value: Any) -> IBuiltinMethod:
if isinstance(value, list) and len(value) > 0:
value = value[0]
if isinstance(value, TupleType):
value = value.value_type
if type(value) == type(self._arg_values.type):
return self
from boa3.internal.model.builtin.method.maxbytestringmethod import MaxByteStringMethod
from boa3.internal.model.builtin.method.maxintmethod import MaxIntMethod
from boa3.internal.model.type.type import Type
if Type.str.is_type_of(value) or Type.bytes.is_type_of(value):
return MaxByteStringMethod(value)
return MaxIntMethod(value)
| null |
1,780 |
######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""An angle type."""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = ["Angle"]
from sire.legacy import Units as _SireUnits
from ._type import Type as _Type
class Angle(_Type):
"""An angle type."""
# A list of the supported Sire unit names.
_sire_units = ["radian", "degree"]
# Dictionary of allowed units.
_supported_units = {"RADIAN": _SireUnits.radian, "DEGREE": _SireUnits.degree}
# Map unit abbreviations to the full name.
_abbreviations = {"R": "RADIAN", "D": "DEGREE"}
# Print format.
_print_format = {"RADIAN": "radian", "DEGREE": "degree"}
# Documentation strings.
_doc_strings = {"RADIAN": "An angle in radians.", "DEGREE": "An angle in degrees."}
# Null type unit for avoiding issue printing configargparse help.
_default_unit = "RADIAN"
# The dimension mask:
# Angle, Charge, Length, Mass, Quantity, Temperature, Time
_dimensions = (1, 0, 0, 0, 0, 0, 0)
def __init__(self, *args):
"""
Constructor.
``*args`` can be a value and unit, or a string representation
of the angle, e.g. "3 radians".
Parameters
----------
value : float
The value.
unit : str
The unit.
string : str
A string representation of the angle.
Examples
--------
Create an object representing an angle of 3.14 radians then
print the length in degrees.
>>> import BioSimSpace as BSS
>>> length = BSS.Types.Angle(3.14, "R")
>>> print(length.degrees())
The same as above, except passing a string representation of the
angle to the constructor.
>>> import BioSimSpace as BSS
>>> length = BSS.Types.Angle("3.14 R")
>>> print(length.degrees())
The string matching is extremeley flexible, so all of the following
would be valid arguments: "3.14 R", "3.14 radians", "314e-2 Radians".
"""
# Call the base class constructor.
super().__init__(*args)
def __str__(self):
"""Return a human readable string representation of the object."""
abbrev = self._print_format[self._unit]
if self._value != 1:
if abbrev[-1] != "s":
abbrev = abbrev + "s"
if abs(self._value) > 1e4 or abs(self._value) < 1e-4:
return "%.4e %s" % (self._value, abbrev)
else:
return "%5.4f %s" % (self._value, abbrev)
def METHOD_NAME(self):
"""
Return the angle in radians.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in radians.
"""
return Angle(
(self._value * self._supported_units[self._unit]).to(_SireUnits.radian),
"RADIAN",
)
def degrees(self):
"""
Return the angle in degrees.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in degrees.
"""
return Angle(
(self._value * self._supported_units[self._unit]).to(_SireUnits.degree),
"DEGREE",
)
def _to_default_unit(self, mag=None):
"""
Internal method to return an object of the same type in the default unit.
Parameters
----------
mag : float
The value (optional).
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The length in the default unit of radians.
"""
if mag is None:
return self.METHOD_NAME()
else:
return Angle(mag, "RADIAN")
def _convert_to(self, unit):
"""
Return the angle in a different unit.
Parameters
----------
unit : str
The unit to convert to.
Returns
-------
angle : :class:`Angle <BioSimSpace.Types.Angle>`
The angle in the specified unit.
"""
if unit == "RADIAN":
return self.METHOD_NAME()
elif unit == "DEGREE":
return self.degrees()
else:
raise ValueError(
"Supported units are: '%s'" % list(self._supported_units.keys())
)
def _validate_unit(self, unit):
"""Validate that the unit are supported."""
# Strip whitespace and convert to upper case.
unit = unit.replace(" ", "").upper()
# Strip any "S" characters.
unit = unit.replace("S", "")
# Strip "EGREE".
unit = unit.replace("EGREE", "")
# Strip "EG".
unit = unit.replace("EG", "")
# Strip "ADIAN".
unit = unit.replace("ADIAN", "")
# Strip "AD".
unit = unit.replace("AD", "")
# Check that the unit is supported.
if unit in self._supported_units:
return unit
elif unit in self._abbreviations:
return self._abbreviations[unit]
else:
raise ValueError(
"Supported units are: '%s'" % list(self._supported_units.keys())
)
@staticmethod
def _to_sire_format(unit):
"""
Reformat the unit string so it adheres to the Sire unit formatting.
Parameters
----------
unit : str
A string representation of the unit.
Returns
-------
sire_unit : str
The unit string in Sire compatible format.
"""
# First, handle plurals and abbreviations.
unit = unit.replace("radians", "rad")
unit = unit.replace("radian", "rad")
unit = unit.replace("rads", "rad")
# Now convert back to correct format.
unit = unit.replace("rad", "radian")
# Convert powers. (Limited selection, for now.)
unit = unit.replace("radian2", "(radian*radian)")
unit = unit.replace("radian3", "(radian*radian*radian)")
unit = unit.replace("degree2", "(degree*degree)")
unit = unit.replace("degree3", "(degree*degree*degree)")
unit = unit.replace("radian-1", "(1/(radian))")
unit = unit.replace("radian-2", "(1/(radian*radian))")
unit = unit.replace("radian-3", "(1/(radian*radian*radian))")
unit = unit.replace("degree-1", "(1/(degree))")
unit = unit.replace("degree-2", "(1/(degree*degree))")
unit = unit.replace("degree-3", "(1/(degree*degree*degree))")
return unit
| null |
1,781 |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
from googlemock.test import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def METHOD_NAME(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(METHOD_NAME(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEqual(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEqual(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main()
| null |
1,782 |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Actor.py: Contains the actor class.
import GemRB
import GUICommon
from GUIDefines import *
from ie_stats import *
#this import is primarily for the tables
import CommonTables
##############################################################################
## GLOBALS TO BE INITIALIZED ONCE
##############################################################################
dualswap = None
classcount = None
levelslots = [IE_LEVEL, IE_LEVEL2, IE_LEVEL3]
class Actor:
"""Holds information of a PC."""
def __init__ (self, pc):
"""Load up basic information."""
#setup our basic Actor
self.METHOD_NAME (pc)
#setup globals if they are blank
if dualswap == None:
self.__setup_globals ()
def __setup_globals (self):
"""Initializes all globals used for quick referencing.
Will only be called by the first Actor created."""
global classcount, dualswap
classcount = CommonTables.Classes.GetRowCount ()
dualswap = [0]*classcount
for i in range(classcount):
rowname = CommonTables.Classes.GetRowName(i)
classid = CommonTables.Classes.GetValue (rowname, "ID")
classnames = rowname.split("_")
#set the MC_WAS_ID of the first class
if len(classnames) == 2:
dualswap[classid-1] = CommonTables.Classes.GetValue (rowname, "MC_WAS_ID")
def Classes (self):
"""Returns a list with all the class IDs."""
if self.__classes == None:
#already reversed in ClassNames
self.__classes = [CommonTables.Classes.GetValue (name, "ID", GTV_INT) for name in self.ClassNames()]
return self.__classes
def ClassNames (self):
"""Returns a list will all the class names."""
if self.__classnames == None:
self.__classnames = GUICommon.GetClassRowName (self.classid, "class").split("_")
if self.IsDualSwap():
self.__classnames.reverse()
return self.__classnames
def ClassTitle (self):
"""Returns the class title as a displayable string."""
if self.__classtitle != None:
return self.__classtitle
self.__classtitle = GemRB.GetPlayerStat (self.pc, IE_TITLE1)
self.ClassNames()
if self.__classtitle == 0:
if self.multiclass and self.isdual == 0:
self.__classtitle = CommonTables.Classes.GetValue ("_".join(self.__classnames), "CAP_REF", GTV_REF)
elif self.isdual:
# first (previous) kit or class of the dual class
self.Classes()
if self.KitIndex():
self.__classtitle = CommonTables.KitList.GetValue (self.__kitindex, 2, GTV_REF)
else:
self.__classtitle = CommonTables.Classes.GetValue (self.__classnames[1], "CAP_REF", GTV_REF)
self.__classtitle = self.__classtitle + " / " + \
CommonTables.Classes.GetValue (self.__classnames[0], "CAP_REF", GTV_REF)
else: # ordinary class or kit
if self.KitIndex():
self.__classtitle = CommonTables.KitList.GetValue (self.__kitindex, 2, GTV_REF)
else:
self.__classtitle = CommonTables.Classes.GetValue ("_".join(self.__classnames), "CAP_REF", GTV_REF)
if self.__classtitle == "*":
self.__classtitle = 0
return self.__classtitle
def IsDualSwap (self):
"""Returns true if IE_LEVEL is opposite of expectations."""
if self.__dualswap == None:
self.__dualswap = (self.isdual & CommonTables.Classes.GetValue \
(self.ClassNames()[0], "MC_WAS_ID", GTV_INT)) > 0
return self.__dualswap
def KitIndex (self):
"""Returns the kit index in relation to kitlist.2da."""
if self.__kitindex != None:
return self.__kitindex
Kit = GemRB.GetPlayerStat (self.pc, IE_KIT)
self.__kitindex = 0
if Kit & 0xc000 == 0x4000:
self.__kitindex = Kit & 0xfff
# carefully looking for kit by the usability flag
# since the barbarian kit id clashes with the no-kit value
if self.__kitindex == 0 and Kit != 0x4000:
self.__kitindex = CommonTables.KitList.FindValue (6, Kit)
if self.__kitindex is None:
self.__kitindex = 0
return self.__kitindex
def LevelDiffs (self):
"""Returns the differences between the current and next classes."""
return [(next-current) for current,next in zip(self.Levels(),
self.NextLevels())]
def Levels (self):
"""Returns the current level of each class."""
if self.__levels == None:
self.__levels = [level for slot in levelslots for level \
in [GemRB.GetPlayerStat (self.pc, slot)] if level>0]
if self.IsDualSwap():
self.__levels.reverse()
return self.__levels
def NextLevelExp (self):
"""Returns the experience required to level each class."""
#filtering the old dual class out seems unnecessary
#just be sure to use NumClasses() or isdual to check
return [CommonTables.NextLevel.GetValue (name, str(level+1)) for name,level \
in zip(self.ClassNames(), self.Levels())]
def NextLevels (self):
"""Returns the next level for each class."""
if self.__nextlevels != None:
return self.__nextlevels
xp = GemRB.GetPlayerStat (self.pc, IE_XP) // self.NumClasses()
self.__nextlevels = []
for name, level in zip(self.ClassNames(), self.Levels() ):
nextLevel = level
#we only want the current level for the old part of a dual-class
if len(self.__nextlevels) < self.__numclasses:
for current in range(level+1, CommonTables.NextLevel.GetColumnCount () ):
if CommonTables.NextLevel.GetValue (name, str(current)) <= xp:
nextLevel = current
else:
break
self.__nextlevels.append(nextLevel)
return self.__nextlevels
def NumClasses (self):
"""Returns the number of *active* classes."""
if self.__numclasses == None:
if self.isdual:
self.__numclasses = 1
else:
self.__numclasses = len(self.ClassNames() )
return self.__numclasses
def RaceName (self):
"""Returns the race string."""
pass
def METHOD_NAME (self, pc):
"""Resets all internal variables.
This should be called after any fundemental changes to the pc.
This includes: dualclassing, leveling."""
#accessible variables
self.pc = pc
self.classid = GemRB.GetPlayerStat (self.pc, IE_CLASS)
self.isdual = GemRB.GetPlayerStat (self.pc, IE_MC_FLAGS) & MC_WAS_ANY_CLASS
self.multiclass = CommonTables.Classes.GetValue (GUICommon.GetClassRowName (pc), "MULTI")
#internal variables - these are only intialized on the first
#call to their respective function, and stored thereafter
self.__classes = None
self.__classnames = None
self.__classtitle = None
self.__dualswap = None
self.__kitindex = None
self.__levels = None
self.__nextlevels = None
self.__numclasses = None
| null |
1,783 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SaveBatchTaskForCreatingOrderActivateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain-intl', '2017-12-18', 'SaveBatchTaskForCreatingOrderActivate','domain')
def get_OrderActivateParams(self):
return self.get_query_params().get('OrderActivateParams')
def set_OrderActivateParams(self,OrderActivateParams):
for i in range(len(OrderActivateParams)):
if OrderActivateParams[i].get('Country') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Country' , OrderActivateParams[i].get('Country'))
if OrderActivateParams[i].get('SubscriptionDuration') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.SubscriptionDuration' , OrderActivateParams[i].get('SubscriptionDuration'))
if OrderActivateParams[i].get('Address') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Address' , OrderActivateParams[i].get('Address'))
if OrderActivateParams[i].get('PermitPremiumActivation') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.PermitPremiumActivation' , OrderActivateParams[i].get('PermitPremiumActivation'))
if OrderActivateParams[i].get('TelArea') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.TelArea' , OrderActivateParams[i].get('TelArea'))
if OrderActivateParams[i].get('City') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.City' , OrderActivateParams[i].get('City'))
if OrderActivateParams[i].get('Dns2') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Dns2' , OrderActivateParams[i].get('Dns2'))
if OrderActivateParams[i].get('Dns1') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Dns1' , OrderActivateParams[i].get('Dns1'))
if OrderActivateParams[i].get('DomainName') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.DomainName' , OrderActivateParams[i].get('DomainName'))
if OrderActivateParams[i].get('RegistrantProfileId') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.RegistrantProfileId' , OrderActivateParams[i].get('RegistrantProfileId'))
if OrderActivateParams[i].get('RegistrantType') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.RegistrantType' , OrderActivateParams[i].get('RegistrantType'))
if OrderActivateParams[i].get('Telephone') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Telephone' , OrderActivateParams[i].get('Telephone'))
if OrderActivateParams[i].get('TrademarkDomainActivation') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.TrademarkDomainActivation' , OrderActivateParams[i].get('TrademarkDomainActivation'))
if OrderActivateParams[i].get('AliyunDns') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.AliyunDns' , OrderActivateParams[i].get('AliyunDns'))
if OrderActivateParams[i].get('RegistrantOrganization') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.RegistrantOrganization' , OrderActivateParams[i].get('RegistrantOrganization'))
if OrderActivateParams[i].get('TelExt') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.TelExt' , OrderActivateParams[i].get('TelExt'))
if OrderActivateParams[i].get('Province') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Province' , OrderActivateParams[i].get('Province'))
if OrderActivateParams[i].get('PostalCode') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.PostalCode' , OrderActivateParams[i].get('PostalCode'))
if OrderActivateParams[i].get('EnableDomainProxy') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.EnableDomainProxy' , OrderActivateParams[i].get('EnableDomainProxy'))
if OrderActivateParams[i].get('Email') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.Email' , OrderActivateParams[i].get('Email'))
if OrderActivateParams[i].get('RegistrantName') is not None:
self.add_query_param('OrderActivateParam.' + str(i + 1) + '.RegistrantName' , OrderActivateParams[i].get('RegistrantName'))
def get_PromotionNo(self):
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self,PromotionNo):
self.add_query_param('PromotionNo',PromotionNo)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def METHOD_NAME(self):
return self.get_query_params().get('CouponNo')
def set_CouponNo(self,CouponNo):
self.add_query_param('CouponNo',CouponNo)
def get_UseCoupon(self):
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self,UseCoupon):
self.add_query_param('UseCoupon',UseCoupon)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_UsePromotion(self):
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self,UsePromotion):
self.add_query_param('UsePromotion',UsePromotion
| null |
1,784 |
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from mock import MagicMock, Mock
from pipeline.hpc.autoscaler import GridEngineScaleUpHandler
from pipeline.hpc.host import MemoryHostStorage
from pipeline.hpc.instance.provider import Instance
from utils import assert_first_argument_contained
try:
from queue import Queue
except ImportError:
from Queue import Queue
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s')
HOSTNAME = 'hostname'
POD_IP = '127.0.0.1'
RUN_ID = '12345'
cmd_executor = Mock()
grid_engine = Mock()
api = Mock()
host_storage = MemoryHostStorage()
instance_helper = Mock()
parent_run_id = 'parent_run_id'
instance_disk = 'instance_disk'
instance_type = 'instance_type'
instance_image = 'instance_image'
cmd_template = 'cmd_template'
price_type = 'price_type'
owner = 'owner'
owner_param_name = 'owner_param_name'
region_id = 1
instance_cores = 4
polling_timeout = 600
instance = Instance(name='instance', price_type=price_type, cpu=4, mem=16, gpu=0)
queue_name = 'main.q'
hostlist = '@allhosts'
run_id_queue = Queue()
scale_up_handler = GridEngineScaleUpHandler(cmd_executor=cmd_executor, api=api, grid_engine=grid_engine,
host_storage=host_storage, parent_run_id=parent_run_id,
instance_disk=instance_disk, instance_image=instance_image,
cmd_template=cmd_template, price_type=price_type,
region_id=region_id, queue=queue_name, hostlist=hostlist,
owner_param_name=owner_param_name,
polling_timeout=polling_timeout, polling_delay=0)
def setup_function():
not_initialized_run = {'initialized': False, 'podId': HOSTNAME}
initialized_pod_run = {'initialized': False, 'podId': HOSTNAME, 'podIP': POD_IP}
initialized_run = {'initialized': True, 'podId': HOSTNAME, 'podIP': POD_IP}
api.load_run = MagicMock(side_effect=[not_initialized_run] * 4 + [initialized_pod_run] * 4 + [initialized_run])
api.load_task = MagicMock(return_value=[{'status': 'SUCCESS'}])
cmd_executor.execute_to_lines = MagicMock(return_value=[RUN_ID])
instance_helper.select_instance = MagicMock(return_value=
Instance.from_cp_response({
"sku": "78J32SRETMXEPY86",
"name": "c5.xlarge",
"termType": "OnDemand",
"operatingSystem": "Linux",
"memory": 96,
"memoryUnit": "GiB",
"instanceFamily": "Compute optimized",
"gpu": 0,
"regionId": 1,
"vcpu": instance_cores
}))
cmd_executor.execute = MagicMock()
grid_engine.enable_host = MagicMock()
host_storage.clear()
def test_waiting_for_run_to_initialize():
scale_up_handler.scale_up(instance, owner, run_id_queue)
api.load_run.assert_called()
assert api.load_run.call_count == 9
def test_enabling_worker_in_grid_engine():
scale_up_handler.scale_up(instance, owner, run_id_queue)
grid_engine.enable_host.assert_called_with(HOSTNAME)
def test_updating_hosts():
scale_up_handler.scale_up(instance, owner, run_id_queue)
assert_first_argument_contained(cmd_executor.execute, 'add_to_hosts')
assert_first_argument_contained(cmd_executor.execute, HOSTNAME)
assert_first_argument_contained(cmd_executor.execute, POD_IP)
def METHOD_NAME():
scale_up_handler.scale_up(instance, owner, run_id_queue)
assert [HOSTNAME] == host_storage.load_hosts()
| null |
1,785 |
# coding: utf-8
"""
TKG Kubernetes API
This API provides to vCD tenants the means to provision (create and update) Tanzu Kubernetes Grid clusters. This is complementary to the defined-entity APIs: GET /cloudapi/1.0.0/entities/urn:vcloud:entity:vmware.tkgcluster:1.0.0:{id} which allows to retrieve the clusters created by the API presented here. This is why you will not find here a GET operation for the corresponding entity. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from container_service_extension.client.tkgclient.models.tkg_cluster_topology_control_plane import TkgClusterTopologyControlPlane # noqa: F401,E501
from container_service_extension.client.tkgclient.models.tkg_cluster_topology_workers import TkgClusterTopologyWorkers # noqa: F401,E501
class TkgClusterSpecTopology(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'control_plane': 'TkgClusterTopologyControlPlane',
'workers': 'TkgClusterTopologyWorkers'
}
attribute_map = {
'control_plane': 'controlPlane',
'workers': 'workers'
}
def __init__(self, control_plane=None, workers=None): # noqa: E501
"""TkgClusterSpecTopology - a model defined in Swagger""" # noqa: E501
self._control_plane = None
self._workers = None
self.discriminator = None
self.control_plane = control_plane
self.workers = workers
@property
def control_plane(self):
"""Gets the control_plane of this TkgClusterSpecTopology. # noqa: E501
:return: The control_plane of this TkgClusterSpecTopology. # noqa: E501
:rtype: TkgClusterTopologyControlPlane
"""
return self._control_plane
@control_plane.setter
def control_plane(self, control_plane):
"""Sets the control_plane of this TkgClusterSpecTopology.
:param control_plane: The control_plane of this TkgClusterSpecTopology. # noqa: E501
:type: TkgClusterTopologyControlPlane
"""
if control_plane is None:
raise ValueError("Invalid value for `control_plane`, must not be `None`") # noqa: E501
self._control_plane = control_plane
@property
def workers(self):
"""Gets the workers of this TkgClusterSpecTopology. # noqa: E501
:return: The workers of this TkgClusterSpecTopology. # noqa: E501
:rtype: TkgClusterTopologyWorkers
"""
return self._workers
@workers.setter
def workers(self, workers):
"""Sets the workers of this TkgClusterSpecTopology.
:param workers: The workers of this TkgClusterSpecTopology. # noqa: E501
:type: TkgClusterTopologyWorkers
"""
if workers is None:
raise ValueError("Invalid value for `workers`, must not be `None`") # noqa: E501
self._workers = workers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TkgClusterSpecTopology):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| null |
1,786 |
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Type import Type
class Func(Symbol):
def __init__(
self,
table,
ident,
name,
location,
return_type,
param_types,
param_strings,
body,
pairs,
):
super().__init__(table, ident, location, pairs)
self.return_type = return_type
self.param_types = param_types
self.param_strings = param_strings
self.body = body
self.isInternalMachineFunc = False
self.c_ident = ident
self.c_name = name
self.class_name = ""
def __repr__(self):
return ""
@property
def prototype(self):
if "external" in self:
return ""
return_type = self.return_type.c_ident
void_type = self.symtab.find("void", Type)
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
elif "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
return f"{return_type} {self.c_name}({', '.join(self.param_strings)});"
def METHOD_NAME(self, path, includes):
return
def checkArguments(self, args):
if len(args) != len(self.param_types):
self.error(
"Wrong number of arguments passed to function : '%s'"
+ " Expected %d, got %d",
self.c_ident,
len(self.param_types),
len(args),
)
cvec = []
type_vec = []
for expr, expected_type in zip(args, self.param_types):
# Check the types of the parameter
actual_type, param_code = expr.inline(True)
if (
str(actual_type) != "OOD"
and str(actual_type) != str(expected_type)
and str(actual_type["interface"]) != str(expected_type)
):
expr.error(
f"Type mismatch: expected: {expected_type} actual: {actual_type}"
)
cvec.append(param_code)
type_vec.append(expected_type)
return cvec, type_vec
def generateCode(self):
"""This write a function of object Chip"""
if "external" in self:
return ""
code = self.symtab.codeFormatter()
# Generate function header
void_type = self.symtab.find("void", Type)
return_type = self.return_type.c_ident
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
if "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
params = ", ".join(self.param_strings)
code(
"""
$return_type
${{self.class_name}}::${{self.c_name}}($params)
{
${{self.body}}
}
"""
)
return str(code)
__all__ = ["Func"]
| null |
1,787 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class AllocateDedicatedHostsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'AllocateDedicatedHosts','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_CpuOverCommitRatio(self): # Float
return self.get_query_params().get('CpuOverCommitRatio')
def set_CpuOverCommitRatio(self, CpuOverCommitRatio): # Float
self.add_query_param('CpuOverCommitRatio', CpuOverCommitRatio)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_MinQuantity(self): # Integer
return self.get_query_params().get('MinQuantity')
def set_MinQuantity(self, MinQuantity): # Integer
self.add_query_param('MinQuantity', MinQuantity)
def get_ActionOnMaintenance(self): # String
return self.get_query_params().get('ActionOnMaintenance')
def set_ActionOnMaintenance(self, ActionOnMaintenance): # String
self.add_query_param('ActionOnMaintenance', ActionOnMaintenance)
def get_DedicatedHostClusterId(self): # String
return self.get_query_params().get('DedicatedHostClusterId')
def METHOD_NAME(self, DedicatedHostClusterId): # String
self.add_query_param('DedicatedHostClusterId', DedicatedHostClusterId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_DedicatedHostType(self): # String
return self.get_query_params().get('DedicatedHostType')
def set_DedicatedHostType(self, DedicatedHostType): # String
self.add_query_param('DedicatedHostType', DedicatedHostType)
def get_AutoRenewPeriod(self): # Integer
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self, AutoRenewPeriod): # Integer
self.add_query_param('AutoRenewPeriod', AutoRenewPeriod)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_Quantity(self): # Integer
return self.get_query_params().get('Quantity')
def set_Quantity(self, Quantity): # Integer
self.add_query_param('Quantity', Quantity)
def get_DedicatedHostName(self): # String
return self.get_query_params().get('DedicatedHostName')
def set_DedicatedHostName(self, DedicatedHostName): # String
self.add_query_param('DedicatedHostName', DedicatedHostName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_AutoReleaseTime(self): # String
return self.get_query_params().get('AutoReleaseTime')
def set_AutoReleaseTime(self, AutoReleaseTime): # String
self.add_query_param('AutoReleaseTime', AutoReleaseTime)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_NetworkAttributesSlbUdpTimeout(self): # Integer
return self.get_query_params().get('NetworkAttributes.SlbUdpTimeout')
def set_NetworkAttributesSlbUdpTimeout(self, NetworkAttributesSlbUdpTimeout): # Integer
self.add_query_param('NetworkAttributes.SlbUdpTimeout', NetworkAttributesSlbUdpTimeout)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_AutoPlacement(self): # String
return self.get_query_params().get('AutoPlacement')
def set_AutoPlacement(self, AutoPlacement): # String
self.add_query_param('AutoPlacement', AutoPlacement)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_NetworkAttributesUdpTimeout(self): # Integer
return self.get_query_params().get('NetworkAttributes.UdpTimeout')
def set_NetworkAttributesUdpTimeout(self, NetworkAttributesUdpTimeout): # Integer
self.add_query_param('NetworkAttributes.UdpTimeout', NetworkAttributesUdpTimeout)
| null |
1,788 |
from __future__ import annotations
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Citations import Citations
from xia2.lib.bits import transpose_loggraph
logger = logging.getLogger("xia2.Wrappers.CCP4.Ctruncate")
def Ctruncate(DriverType=None):
"""A factory for CtruncateWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class CtruncateWrapper(DriverInstance.__class__):
"""A wrapper for Ctruncate, using the regular Driver."""
def __init__(self):
# generic things
DriverInstance.__class__.__init__(self)
Citations.cite("ccp4")
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "ctruncate"))
self._anomalous = False
self._nres = 0
self._b_factor = 0.0
self._moments = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_hklin(self, hklin):
self._hklin = hklin
def set_hklout(self, hklout):
self._hklout = hklout
def set_nres(self, nres):
self._nres = nres
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
if not self._hklin:
raise RuntimeError("hklin not defined")
if not self._hklout:
raise RuntimeError("hklout not defined")
self.add_command_line("-hklin")
self.add_command_line(self._hklin)
self.add_command_line("-hklout")
self.add_command_line(self._hklout)
if self._nres:
self.add_command_line("-nres")
self.add_command_line("%d" % self._nres)
if self._anomalous:
self.add_command_line("-colano")
self.add_command_line("/*/*/[I(+),SIGI(+),I(-),SIGI(-)]")
self.add_command_line("-colin")
self.add_command_line("/*/*/[IMEAN,SIGIMEAN]")
self._xmlout = os.path.join(
self.get_working_directory(), "%d_truncate.xml" % self.get_xpid()
)
self.add_command_line("-xmlout")
self.add_command_line(self._xmlout)
self.start()
self.close_wait()
try:
self.check_for_errors()
except RuntimeError as e:
try:
os.remove(self._hklout)
except Exception:
pass
logger.debug(str(e))
raise RuntimeError("ctruncate failure")
nref = 0
for record in self.get_all_output():
if "Number of reflections:" in record:
nref = int(record.split()[-1])
if "Estimate of Wilson B factor:" in record:
self._b_factor = float(record.split(":")[1].split()[0])
self._nref_in, self._nref_out = nref, nref
self._nabsent = 0
moments = None
results = self.METHOD_NAME()
if "Acentric moments of E using Truncate method" in results:
moments = transpose_loggraph(
results["Acentric moments of E using Truncate method"]
)
elif "Acentric moments of I" in results:
moments = transpose_loggraph(results["Acentric moments of I"])
elif "Acentric moments of E" in results:
moments = transpose_loggraph(results["Acentric moments of E"])
else:
logger.debug("Acentric moments of E/I not found")
self._moments = moments
def get_b_factor(self):
return self._b_factor
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def METHOD_NAME(self):
"""Look through the standard output of the program for
CCP4 loggraph text. When this is found store it in a
local dictionary to allow exploration."""
# reset the loggraph store
self._loggraph = {}
output = self.get_all_output()
for i in range(len(output)):
line = output[i]
if "$TABLE" in line:
n_dollar = line.count("$$")
current = line.split(":")[1].replace(">", "").strip()
self._loggraph[current] = {}
self._loggraph[current]["columns"] = []
self._loggraph[current]["data"] = []
loggraph_info = ""
while n_dollar < 4:
n_dollar += line.count("$$")
loggraph_info += line
if n_dollar == 4:
break
i += 1
line = output[i]
tokens = loggraph_info.split("$$")
self._loggraph[current]["columns"] = tokens[1].split()
if len(tokens) < 4:
raise RuntimeError('loggraph "%s" broken' % current)
data = tokens[3].split("\n")
columns = len(self._loggraph[current]["columns"])
for record in data:
record = record.split()
if len(record) == columns:
self._loggraph[current]["data"].append(record)
return self._loggraph
return CtruncateWrapper()
| null |
1,789 |
"""
API operations allowing clients to determine Galaxy instance's capabilities
and configuration settings.
"""
import logging
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import Path
from galaxy.managers.configuration import ConfigurationManager
from galaxy.managers.context import ProvidesUserContext
from galaxy.schema.fields import DecodedDatabaseIdField
from galaxy.schema.schema import UserModel
from galaxy.webapps.galaxy.api import (
depends,
DependsOnTrans,
Router,
)
from galaxy.webapps.galaxy.api.common import (
parse_serialization_params,
SerializationKeysQueryParam,
SerializationViewQueryParam,
)
log = logging.getLogger(__name__)
router = Router(tags=["configuration"])
EncodedIdPathParam = Path(
...,
title="Encoded id",
description="Encoded id to be decoded",
)
@router.cbv
class FastAPIConfiguration:
configuration_manager: ConfigurationManager = depends(ConfigurationManager)
@router.get(
"/api/whoami",
summary="Return information about the current authenticated user",
response_description="Information about the current authenticated user",
)
def whoami(self, trans: ProvidesUserContext = DependsOnTrans) -> Optional[UserModel]:
"""Return information about the current authenticated user."""
return _user_to_model(trans.user)
@router.get(
"/api/configuration",
summary="Return an object containing exposable configuration settings",
response_description="Object containing exposable configuration settings",
)
def index(
self,
trans: ProvidesUserContext = DependsOnTrans,
view: Optional[str] = SerializationViewQueryParam,
keys: Optional[str] = SerializationKeysQueryParam,
) -> Dict[str, Any]:
"""
Return an object containing exposable configuration settings.
A more complete list is returned if the user is an admin.
Pass in `view` and a comma-seperated list of keys to control which
configuration settings are returned.
"""
return _index(self.configuration_manager, trans, view, keys)
@router.get(
"/api/version",
summary="Return Galaxy version information: major/minor version, optional extra info",
response_description="Galaxy version information: major/minor version, optional extra info",
)
def METHOD_NAME(self) -> Dict[str, Any]:
"""Return Galaxy version information: major/minor version, optional extra info."""
return self.configuration_manager.METHOD_NAME()
@router.get(
"/api/configuration/dynamic_tool_confs",
require_admin=True,
summary="Return dynamic tool configuration files",
response_description="Dynamic tool configuration files",
)
def dynamic_tool_confs(self) -> List[Dict[str, str]]:
"""Return dynamic tool configuration files."""
return self.configuration_manager.dynamic_tool_confs()
@router.get(
"/api/configuration/decode/{encoded_id}",
require_admin=True,
summary="Decode a given id",
response_description="Decoded id",
)
def decode_id(self, encoded_id: str = EncodedIdPathParam) -> Dict[str, int]:
"""Decode a given id."""
return self.configuration_manager.decode_id(encoded_id)
@router.get(
"/api/configuration/tool_lineages",
require_admin=True,
summary="Return tool lineages for tools that have them",
response_description="Tool lineages for tools that have them",
)
def tool_lineages(self) -> List[Dict[str, Dict]]:
"""Return tool lineages for tools that have them."""
return self.configuration_manager.tool_lineages()
@router.put(
"/api/configuration/toolbox", require_admin=True, summary="Reload the Galaxy toolbox (but not individual tools)"
)
def reload_toolbox(self):
"""Reload the Galaxy toolbox (but not individual tools)."""
self.configuration_manager.reload_toolbox()
def _user_to_model(user):
if user:
return UserModel.construct(**user.to_dict(view="element", value_mapper={"id": DecodedDatabaseIdField.encode}))
return None
def _index(manager: ConfigurationManager, trans, view, keys):
serialization_params = parse_serialization_params(view, keys, "all")
return manager.get_configuration(trans, serialization_params)
| null |
1,790 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class CreateServerGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'CreateServerGroup','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ServerGroupName(self): # String
return self.get_query_params().get('ServerGroupName')
def set_ServerGroupName(self, ServerGroupName): # String
self.add_query_param('ServerGroupName', ServerGroupName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckConfig(self): # Struct
return self.get_query_params().get('HealthCheckConfig')
def set_HealthCheckConfig(self, HealthCheckConfig): # Struct
if HealthCheckConfig.get('HealthCheckCodes') is not None:
for index1, value1 in enumerate(HealthCheckConfig.get('HealthCheckCodes')):
self.add_query_param('HealthCheckConfig.HealthCheckCodes.' + str(index1 + 1), value1)
if HealthCheckConfig.get('HealthCheckEnabled') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckEnabled', HealthCheckConfig.get('HealthCheckEnabled'))
if HealthCheckConfig.get('HealthCheckTimeout') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckTimeout', HealthCheckConfig.get('HealthCheckTimeout'))
if HealthCheckConfig.get('HealthCheckMethod') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckMethod', HealthCheckConfig.get('HealthCheckMethod'))
if HealthCheckConfig.get('HealthCheckHost') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckHost', HealthCheckConfig.get('HealthCheckHost'))
if HealthCheckConfig.get('HealthCheckProtocol') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckProtocol', HealthCheckConfig.get('HealthCheckProtocol'))
if HealthCheckConfig.get('UnhealthyThreshold') is not None:
self.add_query_param('HealthCheckConfig.UnhealthyThreshold', HealthCheckConfig.get('UnhealthyThreshold'))
if HealthCheckConfig.get('HealthyThreshold') is not None:
self.add_query_param('HealthCheckConfig.HealthyThreshold', HealthCheckConfig.get('HealthyThreshold'))
if HealthCheckConfig.get('HealthCheckTcpFastCloseEnabled') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckTcpFastCloseEnabled', HealthCheckConfig.get('HealthCheckTcpFastCloseEnabled'))
if HealthCheckConfig.get('HealthCheckPath') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckPath', HealthCheckConfig.get('HealthCheckPath'))
if HealthCheckConfig.get('HealthCheckInterval') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckInterval', HealthCheckConfig.get('HealthCheckInterval'))
if HealthCheckConfig.get('HealthCheckHttpCodes') is not None:
for index1, value1 in enumerate(HealthCheckConfig.get('HealthCheckHttpCodes')):
self.add_query_param('HealthCheckConfig.HealthCheckHttpCodes.' + str(index1 + 1), value1)
if HealthCheckConfig.get('HealthCheckHttpVersion') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckHttpVersion', HealthCheckConfig.get('HealthCheckHttpVersion'))
if HealthCheckConfig.get('HealthCheckConnectPort') is not None:
self.add_query_param('HealthCheckConfig.HealthCheckConnectPort', HealthCheckConfig.get('HealthCheckConnectPort'))
def get_Scheduler(self): # String
return self.get_query_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_query_param('Scheduler', Scheduler)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Protocol(self): # String
return self.get_query_params().get('Protocol')
def set_Protocol(self, Protocol): # String
self.add_query_param('Protocol', Protocol)
def get_ServiceName(self): # String
return self.get_query_params().get('ServiceName')
def set_ServiceName(self, ServiceName): # String
self.add_query_param('ServiceName', ServiceName)
def get_StickySessionConfig(self): # Struct
return self.get_query_params().get('StickySessionConfig')
def set_StickySessionConfig(self, StickySessionConfig): # Struct
if StickySessionConfig.get('StickySessionEnabled') is not None:
self.add_query_param('StickySessionConfig.StickySessionEnabled', StickySessionConfig.get('StickySessionEnabled'))
if StickySessionConfig.get('Cookie') is not None:
self.add_query_param('StickySessionConfig.Cookie', StickySessionConfig.get('Cookie'))
if StickySessionConfig.get('CookieTimeout') is not None:
self.add_query_param('StickySessionConfig.CookieTimeout', StickySessionConfig.get('CookieTimeout'))
if StickySessionConfig.get('StickySessionType') is not None:
self.add_query_param('StickySessionConfig.StickySessionType', StickySessionConfig.get('StickySessionType'))
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ServerGroupType(self): # String
return self.get_query_params().get('ServerGroupType')
def set_ServerGroupType(self, ServerGroupType): # String
self.add_query_param('ServerGroupType', ServerGroupType)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_UchConfig(self): # Struct
return self.get_query_params().get('UchConfig')
def set_UchConfig(self, UchConfig): # Struct
if UchConfig.get('Type') is not None:
self.add_query_param('UchConfig.Type', UchConfig.get('Type'))
if UchConfig.get('Value') is not None:
self.add_query_param('UchConfig.Value', UchConfig.get('Value'))
| null |
1,791 |
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import stat
import uuid
from toil.common import Toil
from toil.fileStores import FileID
from toil.job import Job
from toil.exceptions import FailedJobsException
from toil.test import ToilTest, slow
class ImportExportFileTest(ToilTest):
def setUp(self):
super().setUp()
self.tmp_dir = self._createTempDir()
self.output_file_path = f'{self.tmp_dir}/out'
self.message_portion_1 = 'What do you get when you cross a seal and a polar bear?'
self.message_portion_2 = ' A polar bear.'
def create_file(self, content, executable=False):
file_path = f'{self.tmp_dir}/{uuid.uuid4()}'
with open(file_path, 'w') as f:
f.write(content)
if executable:
# Add file owner execute permissions
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IXUSR)
return file_path
def METHOD_NAME(self, options, fail):
with Toil(options) as toil:
if not options.restart:
msg_portion_file_path = self.create_file(content=self.message_portion_1)
msg_portion_file_id = toil.importFile(f'file://{msg_portion_file_path}')
self.assertIsInstance(msg_portion_file_id, FileID)
self.assertEqual(os.stat(msg_portion_file_path).st_size, msg_portion_file_id.size)
file_that_can_trigger_failure_when_job_starts = self.create_file(
content='Time to freak out!' if fail else 'Keep calm and carry on.')
self.trigger_file_id = toil.importFile(f'file://{file_that_can_trigger_failure_when_job_starts}')
workflow_final_output_file_id = toil.start(
RestartingJob(msg_portion_file_id, self.trigger_file_id, self.message_portion_2))
else:
# TODO: We're hackily updating this file without using the
# correct FileStore interface. User code should not do this!
with toil._jobStore.update_file_stream(self.trigger_file_id) as f:
f.write(('Time to freak out!' if fail else 'Keep calm and carry on.').encode('utf-8'))
workflow_final_output_file_id = toil.restart()
toil.exportFile(workflow_final_output_file_id, f'file://{self.output_file_path}')
with open(self.output_file_path) as f:
self.assertEqual(f.read(), f'{self.message_portion_1}{self.message_portion_2}')
def _run_import_export_workflow(self, restart):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "INFO"
if restart:
try:
self.METHOD_NAME(options, fail=True)
except FailedJobsException:
options.restart = True
self.METHOD_NAME(options, fail=False)
@slow
def test_import_export_restart_true(self):
self._run_import_export_workflow(restart=True)
def test_import_export_restart_false(self):
self._run_import_export_workflow(restart=False)
def test_basic_import_export(self):
"""
Ensures that uploaded files preserve their file permissions when they
are downloaded again. This function checks that an imported executable file
maintains its executability after being exported.
"""
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "INFO"
with Toil(options) as toil:
# TODO: test this with non-local (AWS, Google)
# Note: this is somewhat done in src/toil/test/src/fileStoreTest.py
with self.subTest('Testing permissions are preserved for local importFile/exportFile'):
for executable in True, False:
file_path = self.create_file(content='Hello', executable=executable)
initial_permissions = os.stat(file_path).st_mode & stat.S_IXUSR
file_id = toil.importFile(f'file://{file_path}')
toil.exportFile(file_id, f'file://{self.output_file_path}')
current_permissions = os.stat(self.output_file_path).st_mode & stat.S_IXUSR
assert initial_permissions == current_permissions
with self.subTest('Testing relative paths without the file:// schema.'):
relative_path_data = 'Everything is relative.'
file_path = self.create_file(content=relative_path_data)
file_id = toil.importFile(os.path.relpath(file_path))
toil.exportFile(file_id, os.path.relpath(self.output_file_path))
with open(self.output_file_path) as f:
self.assertEqual(f.read(), relative_path_data)
with self.subTest('Test local importFile accepts a shared_file_name.'):
# TODO: whyyyy do we allow this? shared file names are not unique and can overwrite each other
# ...not only that... we can't use exportFile on them afterwards!?
file_path = self.create_file(content='why')
shared_file_name = 'users_should_probably_not_be_allowed_to_make_shared_files.bad'
toil.importFile(f'file://{file_path}', sharedFileName=shared_file_name)
with toil._jobStore.read_shared_file_stream(shared_file_name, encoding='utf-8') as f:
self.assertEqual(f.read(), 'why')
class RestartingJob(Job):
def __init__(self, msg_portion_file_id, trigger_file_id, message_portion_2):
Job.__init__(self, memory=100000, cores=1, disk="1M")
self.msg_portion_file_id = msg_portion_file_id
self.trigger_file_id = trigger_file_id
self.message_portion_2 = message_portion_2
def run(self, file_store):
with file_store.readGlobalFileStream(self.trigger_file_id) as readable:
if readable.read() == b'Time to freak out!':
raise RuntimeError('D:')
with file_store.writeGlobalFileStream() as (writable, output_file_id):
with file_store.readGlobalFileStream(self.msg_portion_file_id, encoding='utf-8') as readable:
# combine readable.read() (the original message 1) with message 2
# this will be the final output of the workflow
writable.write(f'{readable.read()}{self.message_portion_2}'.encode())
return output_file_id
| null |
1,792 |
import os
import threading
import time
from typing import Optional
import psutil
from galaxy import (
job_metrics,
model,
)
from galaxy.app_unittest_utils.tools_support import UsesTools
from galaxy.jobs.runners import local
from galaxy.util import bunch
from galaxy.util.unittest import TestCase
class TestLocalJobRunner(TestCase, UsesTools):
def setUp(self):
self.setup_app()
self._init_tool()
self.app.job_metrics = job_metrics.JobMetrics()
self.job_wrapper = MockJobWrapper(self.app, self.test_directory, self.tool)
def METHOD_NAME(self):
self.tear_down_app()
def test_run(self):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path(self):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 0
def test_default_slots(self):
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override(self):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params["local_slots"] = 3
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code(self):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set(self):
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_metadata_gets_set_if_embedded(self):
self.job_wrapper.job_destination.params["embed_metadata_in_job"] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job still works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_stopping_job(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner(self.app, 1)
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.stop_job(self.job_wrapper)
t.join(1)
assert not psutil.pid_exists(external_id)
def test_shutdown_no_jobs(self):
self.app.config.monitor_thread_join_timeout = 5
runner = local.LocalJobRunner(self.app, 1)
runner.start()
runner.shutdown()
def test_stopping_job_at_shutdown(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
self.app.model.session = bunch.Bunch(add=lambda x: None, flush=lambda: None)
runner = local.LocalJobRunner(self.app, 1)
runner.start()
self.app.config.monitor_thread_join_timeout = 15
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.shutdown()
t.join(1)
assert not psutil.pid_exists(external_id)
assert "job terminated by Galaxy shutdown" in self.job_wrapper.fail_message
class MockJobWrapper:
def __init__(self, app, test_directory, tool):
working_directory = os.path.join(test_directory, "workdir")
tool_working_directory = os.path.join(working_directory, "working")
os.makedirs(tool_working_directory)
self.app = app
self.tool = tool
self.requires_containerization = False
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.dependency_shell_commands = None
self.working_directory = working_directory
self.tool_working_directory = tool_working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch(id="default", params={})
self.galaxy_lib_dir = os.path.abspath("lib")
self.job = model.Job()
self.job_id = 1
self.job.id = 1
self.output_paths = ["/tmp/output1.dat"]
self.mock_metadata_path = os.path.abspath(os.path.join(test_directory, "METADATA_SET"))
self.metadata_command = f"touch {self.mock_metadata_path}"
self.galaxy_virtual_env = None
self.shell = "/bin/bash"
self.cleanup_job = "never"
self.tmp_dir_creation_statement = ""
self.use_metadata_binary = False
self.guest_ports = []
self.metadata_strategy = "directory"
self.remote_command_line = False
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata: Optional[bunch.Bunch] = bunch.Bunch()
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(build_dependency_shell_commands=lambda: [])
def check_tool_output(*args, **kwds):
return "ok"
def wait_for_external_id(self):
"""Test method for waiting until an external id has been registered."""
external_id = None
for _ in range(50):
external_id = self.job.job_runner_external_id
if external_id:
break
time.sleep(0.1)
return external_id
def prepare(self):
self.prepare_called = True
def set_external_id(self, external_id, **kwd):
self.job.job_runner_external_id = external_id
def get_command_line(self):
return self.command_line
def container_monitor_command(self, *args, **kwds):
return None
def get_id_tag(self):
return "1"
def get_state(self):
return self.state
def change_state(self, state, job=None):
self.state = state
@property
def job_io(self):
return bunch.Bunch(
get_output_fnames=lambda: [], check_job_script_integrity=False, version_path="/tmp/version_path"
)
def get_job(self):
return self.job
def setup_external_metadata(self, **kwds):
return self.metadata_command
def get_env_setup_clause(self):
return ""
def has_limits(self):
return False
def fail(
self, message, exception=False, tool_stdout="", tool_stderr="", exit_code=None, job_stdout=None, job_stderr=None
):
self.fail_message = message
self.fail_exception = exception
def finish(self, stdout, stderr, exit_code, **kwds):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
def tmp_directory(self):
return None
def home_directory(self):
return None
def reclaim_ownership(self):
pass
@property
def is_cwl_job(self):
return False
| null |
1,793 |
##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import imath
import IECore
import IECoreScene
import IECoreHoudini
import unittest
import os
import shutil
class TestCobIOTranslator( IECoreHoudini.TestCase ) :
__testDir = "test/cobIO"
__testFile = "%s/testCobIO.cob" % __testDir
__testPDCFile = "%s/testPdcIO.pdc" % __testDir
__testPTCFile = "%s/testPtcIO.ptc" % __testDir
def torus( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
torus = geo.createNode( "torus" )
facet = torus.createOutputNode( "facet" )
facet.parm( "postnml" ).set( True )
mountain = facet.createOutputNode( "mountain" )
if hou.applicationVersion()[0] >= 16:
mountain.parm("offsetx").setExpression("$FF")
else:
mountain.parm("offset1").setExpression( "$FF" )
return mountain
def points( self, inNode=None ) :
if not inNode :
inNode = self.torus()
return inNode.createOutputNode( "scatter" )
def curves( self, inNode=None ) :
points = self.points( inNode )
add = points.createOutputNode( "add" )
add.parm( "stdswitcher1" ).set( 1 )
add.parm( "switcher1" ).set( 1 )
convert = add.createOutputNode( "convert" )
convert.parm( "totype" ).set( 4 )
return convert
def reader( self ) :
geo = hou.node( "/obj/geo1" )
if not geo :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
reader = geo.createNode( "file" )
reader.parm( "file" ).set( TestCobIOTranslator.__testFile )
reader.parm( "filemode" ).set( 1 )
return reader
def writer( self, inNode ) :
writer = inNode.createOutputNode( "file" )
writer.parm( "file" ).set( TestCobIOTranslator.__testFile )
writer.parm( "filemode" ).set( 2 )
return writer
def testReadWritePoints( self ) :
points = self.points()
writer = self.writer( points )
reader = self.reader()
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
writer.cook()
self.assertTrue( reader.geometry() )
self.assertTrue( not reader.errors() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( reader )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId( IECoreScene.TypeId.PointsPrimitive ) ) )
def testReadWriteMesh( self ) :
mesh = self.torus()
writer = self.writer( mesh )
reader = self.reader()
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
writer.cook()
self.assertTrue( reader.geometry() )
self.assertTrue( not reader.errors() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( reader )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
result = converter.convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId( IECoreScene.TypeId.MeshPrimitive ) ) )
def testReadWriteCurves( self ) :
curves = self.curves()
writer = self.writer( curves )
reader = self.reader()
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
writer.cook()
self.assertTrue( reader.geometry() )
self.assertTrue( not reader.errors() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( reader )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniCurvesConverter ) ) )
result = converter.convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId( IECoreScene.TypeId.CurvesPrimitive ) ) )
def testCantReadBadCob( self ) :
writer = self.writer( self.torus() )
reader = self.reader()
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
writer.cook()
self.assertTrue( reader.geometry() )
self.assertTrue( not reader.errors() )
with open( "testCobIO.cob", "w" ) as f :
f.write( "this is not a real cob" )
reader.parm( "file" ).set( "testCobIO.cob" )
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
os.remove( "testCobIO.cob" )
def METHOD_NAME( self ) :
IECore.ObjectWriter( imath.V3f( 1 ), TestCobIOTranslator.__testFile ).write()
reader = self.reader()
geo = reader.geometry()
prims = geo.prims()
self.assertFalse( reader.errors() )
self.assertEqual( len(prims), 1 )
self.assertEqual( prims[0].type(), hou.primType.Custom )
self.assertEqual( prims[0].vertices()[0].point().number(), 0 )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( reader )
self.assertTrue( isinstance( converter, IECoreHoudini.FromHoudiniCortexObjectConverter ) )
result = converter.convert()
self.assertEqual( result, IECore.V3fData( imath.V3f( 1 ) ) )
def testReadWritePDC( self ) :
points = self.points()
writer = self.writer( points )
writer.parm( "file" ).set( TestCobIOTranslator.__testPDCFile )
reader = self.reader()
reader.parm( "file" ).set( TestCobIOTranslator.__testPDCFile )
self.assertTrue( not reader.geometry() )
self.assertTrue( reader.errors() )
writer.cook()
self.assertTrue( reader.geometry() )
self.assertTrue( not reader.errors() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( reader )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId( IECoreScene.TypeId.PointsPrimitive ) ) )
def setUp( self ) :
IECoreHoudini.TestCase.setUp( self )
if not os.path.exists( TestCobIOTranslator.__testDir ) :
os.mkdir( TestCobIOTranslator.__testDir )
def tearDown( self ) :
if os.path.exists( TestCobIOTranslator.__testDir ) :
shutil.rmtree( TestCobIOTranslator.__testDir )
if __name__ == "__main__":
unittest.main()
| null |
1,794 |
""" testing book data connectors """
from unittest.mock import patch
from django.test import TestCase
import responses
from bookwyrm import models
from bookwyrm.connectors import abstract_connector, ConnectorException
from bookwyrm.connectors.abstract_connector import Mapping, get_data
from bookwyrm.settings import DOMAIN
class AbstractConnector(TestCase):
"""generic code for connecting to outside data sources"""
def setUp(self):
"""we need an example connector"""
self.connector_info = models.Connector.objects.create(
identifier="example.com",
connector_file="openlibrary",
base_url="https://example.com",
books_url="https://example.com/books",
covers_url="https://example.com/covers",
search_url="https://example.com/search?q=",
)
work_data = {
"id": "abc1",
"title": "Test work",
"type": "work",
"openlibraryKey": "OL1234W",
}
self.work_data = work_data
edition_data = {
"id": "abc2",
"title": "Test edition",
"type": "edition",
"openlibraryKey": "OL1234M",
}
self.edition_data = edition_data
class TestConnector(abstract_connector.AbstractConnector):
"""nothing added here"""
generated_remote_link_field = "openlibrary_link"
def parse_search_data(self, data, min_confidence):
return data
def parse_isbn_search_data(self, data):
return data
def METHOD_NAME(self, data):
return data["type"] == "work"
def get_edition_from_work_data(self, data):
return edition_data
def get_work_from_edition_data(self, data):
return work_data
def get_authors_from_data(self, data):
return []
def expand_book_data(self, book):
pass
self.connector = TestConnector("example.com")
self.connector.book_mappings = [
Mapping("id"),
Mapping("title"),
Mapping("openlibraryKey"),
]
self.book = models.Edition.objects.create(
title="Test Book",
remote_id="https://example.com/book/1234",
openlibrary_key="OL1234M",
)
def test_abstract_connector_init(self):
"""barebones connector for search with defaults"""
self.assertIsInstance(self.connector.book_mappings, list)
def test_get_or_create_book_existing(self):
"""find an existing book by remote/origin id"""
self.assertEqual(models.Book.objects.count(), 1)
self.assertEqual(self.book.remote_id, f"https://{DOMAIN}/book/{self.book.id}")
self.assertEqual(self.book.origin_id, "https://example.com/book/1234")
# dedupe by origin id
result = self.connector.get_or_create_book("https://example.com/book/1234")
self.assertEqual(models.Book.objects.count(), 1)
self.assertEqual(result, self.book)
# dedupe by remote id
result = self.connector.get_or_create_book(
f"https://{DOMAIN}/book/{self.book.id}"
)
self.assertEqual(models.Book.objects.count(), 1)
self.assertEqual(result, self.book)
@responses.activate
def test_get_or_create_book_deduped(self):
"""load remote data and deduplicate"""
responses.add(
responses.GET, "https://example.com/book/abcd", json=self.edition_data
)
with patch("bookwyrm.connectors.abstract_connector.load_more_data.delay"):
result = self.connector.get_or_create_book("https://example.com/book/abcd")
self.assertEqual(result, self.book)
self.assertEqual(models.Edition.objects.count(), 1)
self.assertEqual(models.Edition.objects.count(), 1)
@responses.activate
def test_get_or_create_author(self):
"""load an author"""
# pylint: disable=attribute-defined-outside-init
self.connector.author_mappings = [
Mapping("id"),
Mapping("name"),
]
responses.add(
responses.GET,
"https://www.example.com/author",
json={"id": "https://www.example.com/author", "name": "Test Author"},
)
result = self.connector.get_or_create_author("https://www.example.com/author")
self.assertIsInstance(result, models.Author)
self.assertEqual(result.name, "Test Author")
self.assertEqual(result.origin_id, "https://www.example.com/author")
def test_get_or_create_author_existing(self):
"""get an existing author"""
author = models.Author.objects.create(name="Test Author")
result = self.connector.get_or_create_author(author.remote_id)
self.assertEqual(author, result)
@responses.activate
def test_update_author_from_remote(self):
"""trigger the function that looks up the remote data"""
author = models.Author.objects.create(name="Test", openlibrary_key="OL123A")
# pylint: disable=attribute-defined-outside-init
self.connector.author_mappings = [
Mapping("id"),
Mapping("name"),
Mapping("isni"),
]
responses.add(
responses.GET,
"https://openlibrary.org/authors/OL123A",
json={"id": "https://www.example.com/author", "name": "Beep", "isni": "hi"},
)
self.connector.update_author_from_remote(author)
author.refresh_from_db()
self.assertEqual(author.name, "Test")
self.assertEqual(author.isni, "hi")
def test_get_data_invalid_url(self):
"""load json data from an arbitrary url"""
with self.assertRaises(ConnectorException):
get_data("file://hello.com/image/jpg")
with self.assertRaises(ConnectorException):
get_data("http://127.0.0.1/image/jpg")
| null |
1,795 |
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import json
from pathlib import Path
from typing import Callable, ContextManager
import pytest
from conda.common.compat import on_win
from conda.testing import CondaCLIFixture, PathFactoryFixture, TmpEnvFixture
@pytest.fixture
def environment_yml(path_factory: PathFactoryFixture) -> Path:
path = path_factory(name="environment.yml")
path.write_text(
"name: scratch\n"
"channels:\n"
" - defaults\n"
"dependencies:\n"
" - ca-certificates=2023\n"
)
return path
def test_clean(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("clean", "--all", "--yes")
assert out
assert not err
assert not code
def test_create(conda_cli: CondaCLIFixture, path_factory: PathFactoryFixture):
out, err, code = conda_cli("create", "--prefix", path_factory(), "--yes")
assert out
assert not err
assert not code
def test_compare(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
environment_yml: Path,
):
with tmp_env() as prefix:
out, err, code = conda_cli("compare", "--prefix", prefix, environment_yml)
assert out
assert not err
assert code
def test_config(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("config", "--show-sources")
assert out
assert not err
assert not code
def test_doctor(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("doctor")
assert out
assert not err
assert not code
def test_info(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("info")
assert out
assert not err
assert not code
def test_info_json(conda_cli: CondaCLIFixture):
out1, err, code = conda_cli("info", "--json")
assert json.loads(out1)
assert not err
assert not code
out2, err, code = conda_cli("--json", "info")
assert json.loads(out2)
assert not err
assert not code
assert out1 == out2
def test_init(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("init", "--dry-run")
assert out
assert not err
assert not code
def test_install(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(
"install",
*("--prefix", prefix),
"ca-certificates",
"--yes",
)
assert out
assert not err
assert not code
def test_list(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("ca-certificates") as prefix:
out, err, code = conda_cli("list", "--prefix", prefix)
assert out
assert not err
assert not code
def test_notices(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("notices")
assert out
assert not err
assert not code
def test_package(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli("package", "--prefix", prefix)
assert out
assert not err
assert not code
@pytest.mark.parametrize("subcommand", ["remove", "uninstall"])
def test_remove(subcommand: str, conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(subcommand, "--prefix", prefix, "--all", "--yes")
assert out
assert not err
assert not code
def test_rename(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
with tmp_env() as prefix:
out, err, code = conda_cli("rename", "--prefix", prefix, path_factory())
assert out
assert not err
assert not code
def test_run(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("m2-patch" if on_win else "patch") as prefix:
out, err, code = conda_cli("run", "--prefix", prefix, "patch", "--help")
assert out
assert not err
assert not code
def test_search(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("search", "python")
assert out
assert not err
assert not code
@pytest.mark.parametrize("subcommand", ["update", "upgrade"])
def test_update(subcommand: str, conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("ca-certificates<2023") as prefix:
out, err, code = conda_cli(subcommand, "--prefix", prefix, "--all", "--yes")
assert out
assert not err
assert not code
def test_env_list(conda_cli: CondaCLIFixture):
assert conda_cli("env", "list") == conda_cli("info", "--envs")
def test_env_export(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("env", "export")
assert out
assert not err
assert not code
def test_env_remove(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli("env", "remove", "--prefix", prefix, "--yes")
assert out
assert not err
assert not code
def METHOD_NAME(
conda_cli: CondaCLIFixture,
path_factory: PathFactoryFixture,
environment_yml: Path,
):
out, err, code = conda_cli(
"env",
"create",
*("--prefix", path_factory()),
*("--file", environment_yml),
)
assert out
assert not err
assert not code
def test_env_update(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
environment_yml: Path,
):
with tmp_env("ca-certificates<2023") as prefix:
out, err, code = conda_cli(
"env",
"update",
*("--prefix", prefix),
*("--file", environment_yml),
)
assert out
assert not err
assert not code
def test_env_config_vars(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(
"env",
"config",
"vars",
"set",
*("--prefix", prefix),
"FOO=foo",
)
assert not out
assert not err
assert not code
out, err, code = conda_cli("env", "config", "vars", "list", "--prefix", prefix)
assert out
assert not err
assert not code
out, err, code = conda_cli(
"env",
"config",
"vars",
"unset",
*("--prefix", prefix),
"FOO",
)
assert not out
assert not err
assert not code
| null |
1,796 |
from typing import List, Union
import numpy as np
import pytest
import meerkat as mk
from meerkat.interactive.endpoint import _is_annotation_store
from meerkat.interactive.graph.store import _IteratorStore
@pytest.mark.parametrize("fn_decorator", [mk.gui.reactive])
def test_endpoint_wrapping_reactive_fn(fn_decorator):
"""When an endpoint wraps a reactive function, reactivity should be
disabled to prevent adding anything to the graph.
Note, we can only do this with methods decorated with @reactive. If
a method decorated with `@mk.gui.react()` is called from an
endpoint, the graph will be built because `@mk.gui.react()`
activates reactivity prior to the method being called.
"""
fn = fn_decorator(lambda store: store + 3)
@mk.endpoint()
def fn_endpoint(store: mk.Store):
store.set(fn(store))
# Test with @reactive decorator.
x = mk.Store(1)
assert not mk.gui.is_unmarked_context() # Verify we are in a reactive context
fn_endpoint(x)
with mk.unmarked():
assert x == 4 # Verify the endpoint works
assert x.inode is None # Graph should be empty
@pytest.mark.parametrize(
"x",
[
# TODO: Uncomment when we can issue column modifications.
# mk.ScalarColumn([1, 2, 3, 4, 5]),
mk.DataFrame({"a": [1, 2, 3, 4, 5]}),
mk.Store(np.array([1, 2, 3, 4, 5])),
],
)
def test_endpoint_with_reactive_output(
x,
):
"""Test that we can add endpoints to reactive outputs.
The graph for this test looks like
df -> view -> df_view -> view -> df_view2
^ | ^ |
| v | v
add_one add_one (endpoint)
"""
def _get_value(_x):
if isinstance(_x, mk.DataFrame):
return _x["a"]
else:
return _x
x.mark()
@mk.reactive()
def view(_x):
if isinstance(_x, (mk.DataFrame, mk.Column)):
return _x.view()
else:
return _x
# TODO: Change the type hint to Union when unions are supported.
@mk.endpoint()
def add_one(_x: mk.Store):
if isinstance(_x, mk.DataFrame):
_x["a"] = _x["a"] + 1
_x.set(_x)
else:
out = _x + 1
_x.set(out)
endpoint_df = add_one.partial(_x=x)
df_view = view(x)
assert df_view.inode is not None
assert x.inode is not None
df_view_inode = df_view.inode
endpoint_df_view = add_one.partial(_x=df_view)
assert df_view.inode is df_view_inode
df_view2 = view(df_view)
df_view2_inode = df_view2.inode
# Run the endpoint on the original input.
# This should trigger both views to update.
endpoint_df.run()
assert all(_get_value(x) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view_inode.obj) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view2_inode.obj) == [2, 3, 4, 5, 6])
# Run the endpoint on the first view.
# This should trigger the second view to update.
endpoint_df_view.run()
assert all(_get_value(x) == [2, 3, 4, 5, 6])
assert all(_get_value(df_view_inode.obj) == [3, 4, 5, 6, 7])
assert all(_get_value(df_view2_inode.obj) == [3, 4, 5, 6, 7])
@pytest.mark.parametrize("endpoint_id", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("partial", [True, False])
def test_endpoint_type_hints(endpoint_id: int, partial: bool):
"""Test that endpoints with different type hints will work."""
@mk.endpoint()
def endpoint1(x: mk.Store):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint2(x: mk.Store[int]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint3(x: mk.Store[List[int]]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint4(x: Union[mk.Store, int]):
assert isinstance(x, mk.Store)
@mk.endpoint()
def endpoint5(x: Union[mk.Store[int], int]):
assert isinstance(x, mk.Store)
endpoint = {
1: endpoint1,
2: endpoint2,
3: endpoint3,
4: endpoint4,
5: endpoint5,
}[endpoint_id]
store = mk.Store(1)
if partial:
_endpoint = endpoint.partial(x=store)
_endpoint.run()
else:
endpoint.run(store)
@pytest.mark.parametrize("x_input", ["a", mk.Store("a"), mk.Store(1)])
@pytest.mark.parametrize("endpoint_id", [1, 2, 3])
@pytest.mark.parametrize("partial", [True, False])
def METHOD_NAME(x_input, endpoint_id: int, partial: bool):
"""Endpoints resolve variables based on their ids, which are strings.
This may cause problems when the input is actually as string. These
tests are to check that endpoints can work properly with non-id
strings.
"""
@mk.endpoint()
def endpoint1(x: str):
# The type hint is `str`, so the input should never be a store.
assert not isinstance(x, mk.Store)
@mk.endpoint()
def endpoint2(x: mk.Store[str]):
# The type hint is `Store[str]`, so the input should be a store.
# Type hints should never be strict constraints in Python.
# So even if the user passes in some other type, we should still
# be able to handle it.
if isinstance(x_input, mk.Store):
assert isinstance(x, mk.Store)
else:
assert not isinstance(x, mk.Store)
@mk.endpoint()
def endpoint3(x: Union[mk.Store, str]):
# The type hint is `Union[Store, str]`, so the input should be a store
# if a store was passed in. If a store wasn't passed in, then we
if isinstance(x_input, mk.Store):
assert isinstance(x, mk.Store)
else:
assert not isinstance(x, mk.Store)
endpoint = {
1: endpoint1,
2: endpoint2,
3: endpoint3,
}[endpoint_id]
if partial:
_endpoint = endpoint.partial(x=x_input)
_endpoint.run()
else:
endpoint.run(x_input)
@pytest.mark.parametrize(
"type_hint",
[
mk.Store,
mk.Store[int],
mk.Store[List[int]],
# subclass of Store
_IteratorStore,
# Union with non-generic store
Union[mk.Store, int],
# Union with generic store
Union[mk.Store[int], int],
# Nested stores
Union[Union[mk.Store[int], int], int],
],
)
def test_is_annotation_store_true(type_hint):
assert _is_annotation_store(type_hint)
@pytest.mark.parametrize("type_hint", [mk.DataFrame, mk.Column])
def test_is_annotation_store_false(type_hint):
assert not _is_annotation_store(type_hint)
| null |
1,797 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def METHOD_NAME(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxuploadtarget=800"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].sync_with_ping()
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].sync_with_ping()
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_message(getdata_request)
p2p_conns[2].sync_with_ping()
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_message(getdata_request)
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
if __name__ == '__main__':
MaxUploadTest().main()
| null |
1,798 |
#!/usr/bin/env python3
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def METHOD_NAME(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = METHOD_NAME(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| null |
1,799 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ListMediaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ListMedia')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MediaName(self): # String
return self.get_query_params().get('MediaName')
def set_MediaName(self, MediaName): # String
self.add_query_param('MediaName', MediaName)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def METHOD_NAME(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_AccessStatus(self): # String
return self.get_query_params().get('AccessStatus')
def set_AccessStatus(self, AccessStatus): # String
self.add_query_param('AccessStatus', AccessStatus)
def get_FirstScene(self): # String
return self.get_query_params().get('FirstScene')
def set_FirstScene(self, FirstScene): # String
self.add_query_param('FirstScene', FirstScene)
def get_EndCreateTime(self): # Long
return self.get_query_params().get('EndCreateTime')
def set_EndCreateTime(self, EndCreateTime): # Long
self.add_query_param('EndCreateTime', EndCreateTime)
def get_Business(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_Os(self): # String
return self.get_query_params().get('Os')
def set_Os(self, Os): # String
self.add_query_param('Os', Os)
def get_MediaStatus(self): # String
return self.get_query_params().get('MediaStatus')
def set_MediaStatus(self, MediaStatus): # String
self.add_query_param('MediaStatus', MediaStatus)
def get_Environment(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_StartCreateTime(self): # Long
return self.get_query_params().get('StartCreateTime')
def set_StartCreateTime(self, StartCreateTime): # Long
self.add_query_param('StartCreateTime', StartCreateTime)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_SecondScene(self): # String
return self.get_query_params().get('SecondScene')
def set_SecondScene(self, SecondScene): # String
self.add_query_param('SecondScene', SecondScene)
def get_MediaType(self): # String
return self.get_query_params().get('MediaType')
def set_MediaType(self, MediaType): # String
self.add_query_param('MediaType', MediaType)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.