{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n'''\n\n\nclass PermalinkGenerator(Generator):\n '''\n Generate a redirect page for every item of content with a\n permalink_id metadata\n '''\n def generate_context(self):\n '''\n Setup context\n '''\n self.permalink_output_path = os.path.join(\n self.output_path, self.settings['PERMALINK_PATH'])\n self.permalink_id_metadata_key = (\n self.settings['PERMALINK_ID_METADATA_KEY'])\n\n def generate_output(self, writer=None):\n '''\n Generate redirect files\n '''\n logger.info(\n 'Generating permalink files in %r', self.permalink_output_path)\n\n clean_output_dir(self.permalink_output_path, [])\n mkdir_p(self.permalink_output_path)\n for content in itertools.chain(\n self.context['articles'], self.context['pages']):\n\n for permalink_id in content.get_permalink_ids_iter():\n permalink_path = os.path.join(\n self.permalink_output_path, permalink_id) + '.html'\n\n redirect_string = REDIRECT_STRING.format(\n url=article_url(content),\n title=content.title)\n open(permalink_path, 'w').write(redirect_string)\n\n\ndef get_permalink_ids_iter(self):\n '''\n Method to get permalink ids from content. To be bound to the class last\n thing.\n '''\n permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY']\n permalink_ids = self.metadata.get(permalink_id_key, '')\n\n for permalink_id in permalink_ids.split(','):\n if permalink_id:\n yield permalink_id.strip()\n\n\ndef get_permalink_ids(self):\n '''\n Method to get permalink ids from content. To be bound to the class last\n thing.\n '''\n return list(self.get_permalink_ids_iter())\n\n\ndef get_permalink_path(self):\n \"\"\"Get just path component of permalink.\"\"\"\n try:\n first_permalink_id = next(self.get_permalink_ids_iter())\n except StopIteration:\n return None\n\n return '/{settings[PERMALINK_PATH]}/{first_permalink}.html'.format(\n settings=self.settings, first_permalink=first_permalink_id)\n\n\ndef get_permalink_url(self):\n '''\n Get a permalink URL\n '''\n return \"/\".join((self.settings['SITEURL'], self.get_permalink_path()))\n\n\nPERMALINK_METHODS = (\n get_permalink_ids_iter,\n get_permalink_ids,\n get_permalink_url,\n get_permalink_path,\n)\n\n\ndef add_permalink_methods(content_inst):\n '''\n Add permalink methods to object\n '''\n for permalink_method in PERMALINK_METHODS:\n setattr(\n content_inst,\n permalink_method.__name__,\n permalink_method.__get__(content_inst, content_inst.__class__))\n\n\ndef add_permalink_option_defaults(pelicon_inst):\n '''\n Add perlican defaults\n '''\n pelicon_inst.settings.setdefault('PERMALINK_PATH', 'permalinks')\n pelicon_inst.settings.setdefault(\n 'PERMALINK_ID_METADATA_KEY', 'permalink_id')\n\n\ndef METHOD_NAME(_pelican_object):\n return PermalinkGenerator\n\n\ndef register():\n signals.METHOD_NAME.connect(METHOD_NAME)\n signals.content_object_init.connect(add_permalink_methods)\n signals.initialized.connect(add_permalink_option_defaults)"},"code_compressed":{"kind":"null"}}},{"rowIdx":337,"cells":{"id":{"kind":"number","value":337,"string":"337"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkrds.endpoint import endpoint_data\n\nclass CreateDdrInstanceRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'Rds', '2014-08-15', 'CreateDdrInstance')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_ResourceOwnerId(self): # Long\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self, ResourceOwnerId): # Long\n\t\tself.add_query_param('ResourceOwnerId', ResourceOwnerId)\n\tdef get_DBInstanceStorage(self): # Integer\n\t\treturn self.get_query_params().get('DBInstanceStorage')\n\n\tdef set_DBInstanceStorage(self, DBInstanceStorage): # Integer\n\t\tself.add_query_param('DBInstanceStorage', DBInstanceStorage)\n\tdef get_SystemDBCharset(self): # String\n\t\treturn self.get_query_params().get('SystemDBCharset')\n\n\tdef set_SystemDBCharset(self, SystemDBCharset): # String\n\t\tself.add_query_param('SystemDBCharset', SystemDBCharset)\n\tdef get_EngineVersion(self): # String\n\t\treturn self.get_query_params().get('EngineVersion')\n\n\tdef set_EngineVersion(self, EngineVersion): # String\n\t\tself.add_query_param('EngineVersion', EngineVersion)\n\tdef get_ResourceGroupId(self): # String\n\t\treturn self.get_query_params().get('ResourceGroupId')\n\n\tdef set_ResourceGroupId(self, ResourceGroupId): # String\n\t\tself.add_query_param('ResourceGroupId', ResourceGroupId)\n\tdef get_DBInstanceDescription(self): # String\n\t\treturn self.get_query_params().get('DBInstanceDescription')\n\n\tdef set_DBInstanceDescription(self, DBInstanceDescription): # String\n\t\tself.add_query_param('DBInstanceDescription', DBInstanceDescription)\n\tdef get_Period(self): # String\n\t\treturn self.get_query_params().get('Period')\n\n\tdef set_Period(self, Period): # String\n\t\tself.add_query_param('Period', Period)\n\tdef get_BackupSetId(self): # String\n\t\treturn self.get_query_params().get('BackupSetId')\n\n\tdef set_BackupSetId(self, BackupSetId): # String\n\t\tself.add_query_param('BackupSetId', BackupSetId)\n\tdef get_OwnerId(self): # Long\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self, OwnerId): # Long\n\t\tself.add_query_param('OwnerId', OwnerId)\n\tdef get_DBInstanceClass(self): # String\n\t\treturn self.get_query_params().get('DBInstanceClass')\n\n\tdef set_DBInstanceClass(self, DBInstanceClass): # String\n\t\tself.add_query_param('DBInstanceClass', DBInstanceClass)\n\tdef get_SecurityIPList(self): # String\n\t\treturn self.get_query_params().get('SecurityIPList')\n\n\tdef set_SecurityIPList(self, SecurityIPList): # String\n\t\tself.add_query_param('SecurityIPList', SecurityIPList)\n\tdef get_VSwitchId(self): # String\n\t\treturn self.get_query_params().get('VSwitchId')\n\n\tdef set_VSwitchId(self, VSwitchId): # String\n\t\tself.add_query_param('VSwitchId', VSwitchId)\n\tdef get_PrivateIpAddress(self): # String\n\t\treturn self.get_query_params().get('PrivateIpAddress')\n\n\tdef set_PrivateIpAddress(self, PrivateIpAddress): # String\n\t\tself.add_query_param('PrivateIpAddress', PrivateIpAddress)\n\tdef get_ZoneId(self): # String\n\t\treturn self.get_query_params().get('ZoneId')\n\n\tdef set_ZoneId(self, ZoneId): # String\n\t\tself.add_query_param('ZoneId', ZoneId)\n\tdef get_InstanceNetworkType(self): # String\n\t\treturn self.get_query_params().get('InstanceNetworkType')\n\n\tdef METHOD_NAME(self, InstanceNetworkType): # String\n\t\tself.add_query_param('InstanceNetworkType', InstanceNetworkType)\n\tdef get_ConnectionMode(self): # String\n\t\treturn self.get_query_params().get('ConnectionMode')\n\n\tdef set_ConnectionMode(self, ConnectionMode): # String\n\t\tself.add_query_param('ConnectionMode', ConnectionMode)\n\tdef get_SourceDBInstanceName(self): # String\n\t\treturn self.get_query_params().get('SourceDBInstanceName')\n\n\tdef set_SourceDBInstanceName(self, SourceDBInstanceName): # String\n\t\tself.add_query_param('SourceDBInstanceName', SourceDBInstanceName)\n\tdef get_ClientToken(self): # String\n\t\treturn self.get_query_params().get('ClientToken')\n\n\tdef set_ClientToken(self, ClientToken): # String\n\t\tself.add_query_param('ClientToken', ClientToken)\n\tdef get_Engine(self): # String\n\t\treturn self.get_query_params().get('Engine')\n\n\tdef set_Engine(self, Engine): # String\n\t\tself.add_query_param('Engine', Engine)\n\tdef get_DBInstanceStorageType(self): # String\n\t\treturn self.get_query_params().get('DBInstanceStorageType')\n\n\tdef set_DBInstanceStorageType(self, DBInstanceStorageType): # String\n\t\tself.add_query_param('DBInstanceStorageType', DBInstanceStorageType)\n\tdef get_DBInstanceNetType(self): # String\n\t\treturn self.get_query_params().get('DBInstanceNetType')\n\n\tdef set_DBInstanceNetType(self, DBInstanceNetType): # String\n\t\tself.add_query_param('DBInstanceNetType', DBInstanceNetType)\n\tdef get_RestoreTime(self): # String\n\t\treturn self.get_query_params().get('RestoreTime')\n\n\tdef set_RestoreTime(self, RestoreTime): # String\n\t\tself.add_query_param('RestoreTime', RestoreTime)\n\tdef get_ResourceOwnerAccount(self): # String\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String\n\t\tself.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)\n\tdef get_OwnerAccount(self): # String\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self, OwnerAccount): # String\n\t\tself.add_query_param('OwnerAccount', OwnerAccount)\n\tdef get_UsedTime(self): # String\n\t\treturn self.get_query_params().get('UsedTime')\n\n\tdef set_UsedTime(self, UsedTime): # String\n\t\tself.add_query_param('UsedTime', UsedTime)\n\tdef get_RestoreType(self): # String\n\t\treturn self.get_query_params().get('RestoreType')\n\n\tdef set_RestoreType(self, RestoreType): # String\n\t\tself.add_query_param('RestoreType', RestoreType)\n\tdef get_VPCId(self): # String\n\t\treturn self.get_query_params().get('VPCId')\n\n\tdef set_VPCId(self, VPCId): # String\n\t\tself.add_query_param('VPCId', VPCId)\n\tdef get_PayType(self): # String\n\t\treturn self.get_query_params().get('PayType')\n\n\tdef set_PayType(self, PayType): # String\n\t\tself.add_query_param('PayType', PayType)\n\tdef get_SourceRegion(self): # String\n\t\treturn self.get_query_params().get('SourceRegion')\n\n\tdef set_SourceRegion(self, SourceRegion): # String\n\t\tself.add_query_param('SourceRegion', SourceRegion)"},"code_compressed":{"kind":"null"}}},{"rowIdx":338,"cells":{"id":{"kind":"number","value":338,"string":"338"},"code":{"kind":"string","value":"# MIT License\n\n# Copyright (c) 2020 Development Seed\n# Copyright (c) 2021 Plan4Better\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, ClassVar, Dict, List, Optional\n\nfrom pydantic import BaseModel, Field\nfrom pydantic.class_validators import root_validator\nfrom pydantic.networks import AnyHttpUrl\n\nfrom src.core.config import settings\nfrom src.resources.enums import MimeTypes\n\n\n# =========================VECTOR TILE SCHEMAS=========================\nclass VectorTileLayer(BaseModel, metaclass=abc.ABCMeta):\n \"\"\"Layer's Abstract BaseClass.\n Attributes:\n id (str): Layer's name.\n bounds (list): Layer's bounds (left, bottom, right, top).\n minzoom (int): Layer's min zoom level.\n maxzoom (int): Layer's max zoom level.\n tileurl (str, optional): Layer's tiles url.\n\n \"\"\"\n\n id: str\n bounds: List[float] = [-180, -90, 180, 90]\n minzoom: int = settings.DEFAULT_MINZOOM\n maxzoom: int = settings.DEFAULT_MAXZOOM\n tileurl: Optional[str]\n\n\nclass VectorTileTable(VectorTileLayer):\n \"\"\"Table Reader.\n Attributes:\n id (str): Layer's name.\n bounds (list): Layer's bounds (left, bottom, right, top).\n minzoom (int): Layer's min zoom level.\n maxzoom (int): Layer's max zoom level.\n tileurl (str, optional): Layer's tiles url.\n type (str): Layer's type.\n schema (str): Table's database schema (e.g public).\n geometry_type (str): Table's geometry type (e.g polygon).\n geometry_column (str): Name of the geomtry column in the table.\n properties (Dict): Properties available in the table.\n \"\"\"\n\n type: str = \"Table\"\n dbschema: str = Field(..., alias=\"schema\")\n table: str\n geometry_type: str\n geometry_column: str\n properties: Dict[str, str]\n\n\nclass VectorTileFunction(VectorTileTable):\n \"\"\"Function Reader.\n Attributes:\n id (str): Layer's name.\n bounds (list): Layer's bounds (left, bottom, right, top).\n minzoom (int): Layer's min zoom level.\n maxzoom (int): Layer's max zoom level.\n tileurl (str, optional): Layer's tiles url.\n type (str): Layer's type.\n function_name (str): Nane of the SQL function to call. Defaults to `id`.\n sql (str): Valid SQL function which returns Tile data.\n options (list, optional): options available for the SQL function.\n \"\"\"\n\n type: str = \"Function\"\n sql: str\n function_name: Optional[str]\n options: Optional[List[Dict[str, Any]]]\n\n @root_validator\n def function_name_default(cls, values):\n \"\"\"Define default function's name to be same as id.\"\"\"\n function_name = values.get(\"function_name\")\n if function_name is None:\n values[\"function_name\"] = values.get(\"id\")\n return values\n\n @classmethod\n def from_file(cls, id: str, infile: str, **kwargs: Any):\n \"\"\"load sql from file\"\"\"\n with open(infile) as f:\n sql = f.read()\n\n return cls(id=id, sql=sql, **kwargs)\n\n\nclass TileMatrixSetLink(BaseModel):\n \"\"\"\n TileMatrixSetLink model.\n\n Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets\n\n \"\"\"\n\n href: AnyHttpUrl\n rel: str = \"item\"\n type: MimeTypes = MimeTypes.json\n\n class Config:\n \"\"\"Config for model.\"\"\"\n\n use_enum_values = True\n\n\nclass TileMatrixSetRef(BaseModel):\n \"\"\"\n TileMatrixSetRef model.\n\n Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets\n\n \"\"\"\n\n id: str\n title: str\n links: List[TileMatrixSetLink]\n\n\nclass TileMatrixSetList(BaseModel):\n \"\"\"\n TileMatrixSetList model.\n\n Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets\n\n \"\"\"\n\n tileMatrixSets: List[TileMatrixSetRef]\n\n\n@dataclass\nclass Registry:\n \"\"\"function registry\"\"\"\n\n funcs: ClassVar[Dict[str, VectorTileFunction]] = {}\n\n @classmethod\n def get(cls, key: str):\n \"\"\"lookup function by name\"\"\"\n return cls.funcs.get(key)\n\n @classmethod\n def METHOD_NAME(cls, *args: VectorTileFunction):\n \"\"\"register function(s)\"\"\"\n for func in args:\n cls.funcs[func.id] = func\n\n\nregistry = Registry()"},"code_compressed":{"kind":"null"}}},{"rowIdx":339,"cells":{"id":{"kind":"number","value":339,"string":"339"},"code":{"kind":"string","value":"import struct\nimport base64\nimport json\nfrom jmbitcoin import ecdsa_sign, ecdsa_verify\nfrom jmdaemon import fidelity_bond_sanity_check\nimport binascii\n\ndef assert_is_utxo(utxo):\n assert len(utxo) == 2\n assert isinstance(utxo[0], bytes)\n assert len(utxo[0]) == 32\n assert isinstance(utxo[1], int)\n assert utxo[1] >= 0\n\n\ndef METHOD_NAME(cert_pub, cert_expiry):\n return b'fidelity-bond-cert|' + cert_pub + b'|' + str(cert_expiry).encode('ascii')\n\ndef get_ascii_cert_msg(cert_pub, cert_expiry):\n return b'fidelity-bond-cert|' + binascii.hexlify(cert_pub) + b'|' + str(cert_expiry).encode('ascii')\n\nclass FidelityBond:\n def __init__(self, utxo, utxo_pubkey, locktime, cert_expiry,\n cert_privkey, cert_pubkey, cert_signature):\n assert_is_utxo(utxo)\n assert isinstance(utxo_pubkey, bytes)\n assert isinstance(locktime, int)\n assert isinstance(cert_expiry, int)\n assert isinstance(cert_privkey, bytes)\n assert isinstance(cert_pubkey, bytes)\n assert isinstance(cert_signature, bytes)\n self.utxo = utxo\n self.utxo_pubkey = utxo_pubkey\n self.locktime = locktime\n self.cert_expiry = cert_expiry\n self.cert_privkey = cert_privkey\n self.cert_pubkey = cert_pubkey\n self.cert_signature = cert_signature\n\n def create_proof(self, maker_nick, taker_nick):\n return FidelityBondProof(\n maker_nick, taker_nick, self.cert_pubkey, self.cert_expiry,\n self.cert_signature, self.utxo, self.utxo_pubkey, self.locktime)\n\n def serialize(self):\n return json.dumps([\n self.utxo,\n self.utxo_pubkey,\n self.locktime,\n self.cert_expiry,\n self.cert_privkey,\n self.cert_pubkey,\n self.cert_signature,\n ])\n\n @classmethod\n def deserialize(cls, data):\n return cls(*json.loads(data))\n\n\nclass FidelityBondProof:\n # nick_sig + cert_sig + cert_pubkey + cert_expiry + utxo_pubkey + txid + vout + timelock\n # 72 + 72 + 33 + 2 + 33 + 32 + 4 + 4 = 252 bytes\n SER_STUCT_FMT = '<72s72s33sH33s32sII'\n\n def __init__(self, maker_nick, taker_nick, cert_pub, cert_expiry,\n cert_sig, utxo, utxo_pub, locktime):\n assert isinstance(maker_nick, str)\n assert isinstance(taker_nick, str)\n assert isinstance(cert_pub, bytes)\n assert isinstance(cert_sig, bytes)\n assert isinstance(utxo_pub, bytes)\n assert isinstance(locktime, int)\n assert_is_utxo(utxo)\n self.maker_nick = maker_nick\n self.taker_nick = taker_nick\n self.cert_pub = cert_pub\n self.cert_expiry = cert_expiry\n self.cert_sig = cert_sig\n self.utxo = utxo\n self.utxo_pub = utxo_pub\n self.locktime = locktime\n\n @property\n def nick_msg(self):\n return (self.taker_nick + '|' + self.maker_nick).encode('ascii')\n\n def create_proof_msg(self, cert_priv):\n nick_sig = ecdsa_sign(self.nick_msg, cert_priv)\n # FIXME: remove stupid base64\n nick_sig = base64.b64decode(nick_sig)\n return self._serialize_proof_msg(nick_sig)\n\n def _serialize_proof_msg(self, msg_signature):\n msg_signature = msg_signature.rjust(72, b'\\xff')\n cert_sig = self.cert_sig.rjust(72, b'\\xff')\n fidelity_bond_data = struct.pack(\n self.SER_STUCT_FMT,\n msg_signature,\n cert_sig,\n self.cert_pub,\n self.cert_expiry,\n self.utxo_pub,\n self.utxo[0],\n self.utxo[1],\n self.locktime\n )\n return base64.b64encode(fidelity_bond_data).decode('ascii')\n\n @staticmethod\n def _verify_signature(message, signature, pubkey):\n # FIXME: remove stupid base64\n return ecdsa_verify(message, base64.b64encode(signature), pubkey)\n\n @classmethod\n def parse_and_verify_proof_msg(cls, maker_nick, taker_nick, data):\n if not fidelity_bond_sanity_check.fidelity_bond_sanity_check(data):\n raise ValueError(\"sanity check failed\")\n decoded_data = base64.b64decode(data)\n\n unpacked_data = struct.unpack(cls.SER_STUCT_FMT, decoded_data)\n try:\n signature = unpacked_data[0][unpacked_data[0].index(b'\\x30'):]\n cert_sig = unpacked_data[1][unpacked_data[1].index(b'\\x30'):]\n except ValueError:\n #raised if index() doesnt find the position\n raise ValueError(\"der signature header not found\")\n proof = cls(maker_nick, taker_nick, unpacked_data[2], unpacked_data[3],\n cert_sig, (unpacked_data[5], unpacked_data[6]),\n unpacked_data[4], unpacked_data[7])\n cert_msg = METHOD_NAME(proof.cert_pub, proof.cert_expiry)\n ascii_cert_msg = get_ascii_cert_msg(proof.cert_pub, proof.cert_expiry)\n\n if not cls._verify_signature(proof.nick_msg, signature, proof.cert_pub):\n raise ValueError(\"nick sig does not verify\")\n if not cls._verify_signature(cert_msg, proof.cert_sig, proof.utxo_pub) and\\\n not cls._verify_signature(ascii_cert_msg, proof.cert_sig, proof.utxo_pub):\n raise ValueError(\"cert sig does not verify\")\n\n return proof"},"code_compressed":{"kind":"null"}}},{"rowIdx":340,"cells":{"id":{"kind":"number","value":340,"string":"340"},"code":{"kind":"string","value":"# Copyright 2022 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Tuple\nimport argparse\nimport os\nimport numpy as np\n\nimport nnabla as nn\nfrom nnabla.ext_utils import get_extension_context\nfrom nnabla.utils.data_iterator import DataIterator\nfrom nnabla.logger import logger\n\nfrom pointnet2 import pointnet2_classification_msg, pointnet2_classification_ssg\nfrom loss import classification_loss\nfrom running_utils import categorical_accuracy\n\n# Install neu (nnabla examples utils) to import these functions.\n# See [NEU](https://github.com/nnabla/nnabla-examples/tree/master/utils).\nfrom neu.datasets.modelnet40_normal_resampled import data_iterator_modelnet40_normal_resampled\nfrom neu.checkpoint_util import load_checkpoint\n\n\ndef eval_one_epoch(\n valid_data_iter: DataIterator,\n valid_vars: Dict[str, nn.Variable],\n valid_loss_vars: Dict[str, nn.Variable],\n) -> Tuple[np.ndarray, np.ndarray]:\n total_steps = 0\n total_accuracy = 0.0\n total_loss = 0.0\n num_iterations = valid_data_iter.size // valid_data_iter.batch_size\n\n for _ in range(num_iterations):\n point_cloud, label = valid_data_iter.next()\n\n valid_vars[\"point_cloud\"].d = point_cloud\n valid_vars[\"label\"].d = label\n valid_loss_vars[\"loss\"].forward(clear_buffer=True)\n\n pred_logits = valid_loss_vars[\"pred\"].d.copy()\n\n accuracy = categorical_accuracy(pred_logits, valid_vars[\"label\"].d)\n total_steps += 1\n total_accuracy += accuracy\n total_loss += float(valid_loss_vars[\"loss\"].d)\n\n average_accuracy = total_accuracy / float(total_steps)\n average_loss = total_loss / float(total_steps)\n\n return average_accuracy, average_loss\n\n\ndef evaluate(args):\n # Set context\n extension_module = args.context\n ctx = get_extension_context(extension_module, device_id=args.device_id)\n nn.set_default_context(ctx)\n\n # Feature dim, with normal vector or not\n feature_dim = 6 if args.with_normal else 3\n\n # Create validation graph\n valid_batch_size = 4 # Setting 4 is for using all data of valid dataset\n point_cloud_valid = nn.Variable(\n (valid_batch_size, args.num_points, feature_dim))\n label_valid = nn.Variable((valid_batch_size, 1))\n\n if args.model_type == \"ssg\":\n pred_valid = pointnet2_classification_ssg(\n point_cloud_valid, train=False, num_classes=args.num_classes)\n elif args.model_type == \"msg\":\n pred_valid = pointnet2_classification_msg(\n point_cloud_valid, train=False, num_classes=args.num_classes)\n else:\n raise ValueError\n\n pred_valid.persistent = True\n loss_valid = classification_loss(pred_valid, label_valid)\n valid_vars = {\"point_cloud\": point_cloud_valid, \"label\": label_valid}\n valid_loss_vars = {\"loss\": loss_valid, \"pred\": pred_valid}\n\n # Load snapshot\n load_checkpoint(args.checkpoint_json_path, {})\n\n # Data Iterator\n valid_data_iter = data_iterator_modelnet40_normal_resampled(\n args.data_dir,\n valid_batch_size,\n False,\n False,\n args.num_points,\n normalize=True,\n with_normal=args.with_normal,\n )\n logger.info(f\"Validation dataset size: {valid_data_iter.size}\")\n\n # Evaluation\n logger.info(f\"Evaluation starting ...\")\n accuracy, loss = eval_one_epoch(\n valid_data_iter, valid_vars, valid_loss_vars)\n logger.info(\"accuracy: {}\".format(accuracy))\n logger.info(\"loss: {}\".format(loss))\n\n\ndef METHOD_NAME():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_dir\", type=str, default=os.path.join(os.path.dirname(__file__), \"data\", \"modelnet40_normal_resampled\")\n )\n parser.add_argument(\"--model_type\", type=str,\n default=\"ssg\", choices=[\"msg\", \"ssg\"])\n parser.add_argument(\"--num_classes\", type=int, default=40)\n parser.add_argument(\"--num_points\", type=int, default=1024)\n parser.add_argument(\"--with_normal\", action=\"store_true\")\n\n parser.add_argument(\"--device_id\", type=int, default=0)\n parser.add_argument(\"--context\", type=str, default=\"cudnn\")\n parser.add_argument(\n \"--checkpoint_json_path\",\n type=str,\n default=\"./pointnet2_classification_result/seed_100/checkpoint_best/checkpoint_best.json\",\n )\n\n args = parser.parse_args()\n evaluate(args)\n\n\nif __name__ == \"__main__\":\n METHOD_NAME()"},"code_compressed":{"kind":"null"}}},{"rowIdx":341,"cells":{"id":{"kind":"number","value":341,"string":"341"},"code":{"kind":"string","value":"from typing import Optional\n\nfrom pydantic import Field, SecretStr\n\nfrom hummingbot.client.config.config_data_types import BaseConnectorConfigMap, ClientFieldData\nfrom hummingbot.connector.exchange.ndax import ndax_constants as CONSTANTS\nfrom hummingbot.core.utils.tracking_nonce import get_tracking_nonce\n\nCENTRALIZED = True\nEXAMPLE_PAIR = \"BTC-CAD\"\nHUMMINGBOT_ID_PREFIX = 777\n\n# NDAX fees: https://ndax.io/fees\n# Fees have to be expressed as percent value\nDEFAULT_FEES = [0.2, 0.2]\n\n\n# USE_ETHEREUM_WALLET not required because default value is false\n# FEE_TYPE not required because default value is Percentage\n# FEE_TOKEN not required because the fee is not flat\n\n\ndef convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:\n return hb_trading_pair.replace(\"-\", \"\")\n\n\ndef get_new_client_order_id(is_buy: bool, trading_pair: str) -> str:\n ts_micro_sec: int = get_tracking_nonce()\n return f\"{HUMMINGBOT_ID_PREFIX}{ts_micro_sec}\"\n\n\ndef METHOD_NAME(connector_variant_label: Optional[str]) -> str:\n variant = connector_variant_label if connector_variant_label else \"ndax_main\"\n return CONSTANTS.REST_URLS.get(variant)\n\n\ndef wss_url(connector_variant_label: Optional[str]) -> str:\n variant = connector_variant_label if connector_variant_label else \"ndax_main\"\n return CONSTANTS.WSS_URLS.get(variant)\n\n\nclass NdaxConfigMap(BaseConnectorConfigMap):\n connector: str = Field(default=\"ndax\", client_data=None)\n ndax_uid: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX user ID (uid)\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_account_name: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter the name of the account you want to use\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_api_key: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX API key\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_secret_key: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX secret key\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n\n class Config:\n title = \"ndax\"\n\n\nKEYS = NdaxConfigMap.construct()\n\nOTHER_DOMAINS = [\"ndax_testnet\"]\nOTHER_DOMAINS_PARAMETER = {\"ndax_testnet\": \"ndax_testnet\"}\nOTHER_DOMAINS_EXAMPLE_PAIR = {\"ndax_testnet\": \"BTC-CAD\"}\nOTHER_DOMAINS_DEFAULT_FEES = {\"ndax_testnet\": [0.2, 0.2]}\n\n\nclass NdaxTestnetConfigMap(BaseConnectorConfigMap):\n connector: str = Field(default=\"ndax_testnet\", client_data=None)\n ndax_testnet_uid: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX Testnet user ID (uid)\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_testnet_account_name: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter the name of the account you want to use\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_testnet_api_key: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX Testnet API key\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n ndax_testnet_secret_key: SecretStr = Field(\n default=...,\n client_data=ClientFieldData(\n prompt=lambda cm: \"Enter your NDAX Testnet secret key\",\n is_secure=True,\n is_connect_key=True,\n prompt_on_new=True,\n )\n )\n\n class Config:\n title = \"ndax_testnet\"\n\n\nOTHER_DOMAINS_KEYS = {\"ndax_testnet\": NdaxTestnetConfigMap.construct()}"},"code_compressed":{"kind":"null"}}},{"rowIdx":342,"cells":{"id":{"kind":"number","value":342,"string":"342"},"code":{"kind":"string","value":"from abc import abstractmethod\nfrom typing import List, Iterator, Union\nfrom docutils import nodes\nfrom docutils.statemachine import ViewList, string2lines\nfrom docutils.parsers.rst import Directive, directives\n\nfrom conversion import transpile_py_to_r\n\ndef setup(app):\n app.add_directive('pharmpy-execute', PharmpyExecute)\n app.add_directive('pharmpy-code', PharmpyCode)\n\n return {\n 'version': '0.1',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n\ndef csv_option(s):\n return [p.strip() for p in s.split(\",\")] if s else []\n\nclass RecursiveDirective(Directive):\n\n def _convert_lines_to_nodes(self, lines: List[str]) -> List[nodes.Node]:\n \"\"\"Turn an RST string into a node that can be used in the document.\n\n See https://github.com/sphinx-doc/sphinx/issues/8039\n \"\"\"\n\n node = nodes.Element()\n node.document = self.state.document\n self.state.nested_parse(\n ViewList(\n string2lines('\\n'.join(lines)),\n source='[SnippetDirective]',\n ),\n self.content_offset,\n node,\n )\n\n return node.children\n\nclass PharmpyAbstractCodeDirective(RecursiveDirective):\n\n option_spec = {\n 'linenos': directives.flag,\n 'lineno-start': directives.nonnegative_int,\n 'emphasize-lines': directives.unchanged_required,\n }\n\n def run(self):\n return self._nodes()\n\n def _nodes(self):\n lines = self._lines()\n return self._convert_lines_to_nodes(lines)\n\n @abstractmethod\n def _lines(self) -> List[str]:\n \"\"\"Return lines for this directive\"\"\"\n\n def _input(self):\n return [\n '.. tabs::',\n *METHOD_NAME(3, [\n '',\n '.. code-tab:: py',\n *METHOD_NAME(3, self._code_option_lines()),\n '',\n *METHOD_NAME(3, self.content),\n '',\n '.. code-tab:: r R',\n *METHOD_NAME(3, self._code_option_lines()),\n '',\n *METHOD_NAME(3, transpile_py_to_r(self.content)),\n ]),\n ]\n\n def _code_option_lines(self):\n if 'emphasize-lines' in self.options:\n yield f':emphasize-lines:{self.options.get(\"emphasize-lines\")}'\n if 'linenos' in self.options:\n yield ':linenos:'\n if 'lineno-start' in self.options:\n yield f':lineno-start:{self.options.get(\"lineno-start\")}'\n\n\nclass PharmpyExecute(PharmpyAbstractCodeDirective):\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = True\n\n option_spec = {\n **PharmpyAbstractCodeDirective.option_spec,\n 'hide-code': directives.flag,\n 'hide-output': directives.flag,\n 'code-below': directives.flag,\n 'raises': csv_option,\n 'stderr': directives.flag,\n }\n\n def _lines(self) -> List[str]:\n return [\n f'.. container:: pharmpy-snippet{\"\" if \"hide-output\" in self.options else \" with-output\"}',\n '',\n *METHOD_NAME(3, self._input_output_lines())\n ]\n\n def _input_output_lines(self):\n # NOTE self._output should always be returned here, even when\n # `hide-output` is set, otherwise the code will not be executed.\n if 'hide-code' in self.options:\n return self._output()\n\n if 'code-below' in self.options:\n return [\n *self._output(),\n '',\n *self._input(),\n ]\n\n return [\n *self._input(),\n '',\n *self._output(),\n ]\n\n\n def _output(self):\n return [\n '.. jupyter-execute::',\n *METHOD_NAME(3, [\n *self._jupyter_option_lines(),\n '',\n *self.content\n ]),\n ]\n\n def _jupyter_option_lines(self):\n yield ':hide-code:'\n if 'hide-output' in self.options:\n yield ':hide-output:'\n if 'raise' in self.options:\n yield f':raises:{\",\".join(self.options.get(\"raises\"))}'\n if 'stderr' in self.options:\n yield ':stderr:'\n\n\nclass PharmpyCode(PharmpyAbstractCodeDirective):\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = True\n\n option_spec = PharmpyAbstractCodeDirective.option_spec\n\n def _lines(self) -> List[str]:\n return [\n '.. container:: pharmpy-snippet',\n '',\n *METHOD_NAME(3, self._input())\n ]\n\n\ndef METHOD_NAME(n: int, lines: Union[List[str],Iterator[str]]):\n return map(lambda line: (' '*n + line) if line else line, lines)"},"code_compressed":{"kind":"null"}}},{"rowIdx":343,"cells":{"id":{"kind":"number","value":343,"string":"343"},"code":{"kind":"string","value":"import logging\n\nimport httpx\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site import settings\nfrom pydis_site.apps.home.models import RepositoryMetadata\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/king-arthur\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n if not settings.STATIC_BUILD:\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n # If no token is defined (for example in local development), then\n # it does not make sense to pass the Authorization header. More\n # specifically, GitHub will reject any requests from us due to the\n # invalid header. We can make a limited number of anonymous requests\n # though, which is useful for testing.\n if settings.GITHUB_TOKEN:\n self.headers = {\"Authorization\": f\"token {settings.GITHUB_TOKEN}\"}\n else:\n self.headers = {}\n\n def _get_api_data(self) -> dict[str, dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n try:\n # Fetch the data from the GitHub API\n api_data: list[dict] = httpx.get(\n self.github_api,\n headers=self.headers,\n timeout=settings.TIMEOUT_PERIOD\n ).json()\n except httpx.TimeoutException:\n log.error(\"Request to fetch GitHub repository metadata for timed out!\")\n return repo_dict\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def METHOD_NAME(self) -> list[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n # First off, load the timestamp of the least recently updated entry.\n if settings.STATIC_BUILD:\n last_update = None\n else:\n last_update = (\n RepositoryMetadata.objects.values_list(\"last_updated\", flat=True)\n .order_by(\"last_updated\").first()\n )\n\n # If we did not retrieve any results here, we should import them!\n if last_update is None:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n data = [\n RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n for api_data in api_repositories.values()\n ]\n\n if settings.STATIC_BUILD:\n return data\n return RepositoryMetadata.objects.bulk_create(data)\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - last_update).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n database_repositories = []\n for api_data in api_repositories.values():\n repo_data, _created = RepositoryMetadata.objects.update_or_create(\n repo_name=api_data[\"full_name\"],\n defaults={\n 'repo_name': api_data[\"full_name\"],\n 'description': api_data[\"description\"],\n 'forks': api_data[\"forks_count\"],\n 'stargazers': api_data[\"stargazers_count\"],\n 'language': api_data[\"language\"],\n }\n )\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self.METHOD_NAME()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')"},"code_compressed":{"kind":"null"}}},{"rowIdx":344,"cells":{"id":{"kind":"number","value":344,"string":"344"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdksas.endpoint import endpoint_data\n\nclass DescribeSuspEventsRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeSuspEvents')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_Source(self): # String\n\t\treturn self.get_query_params().get('Source')\n\n\tdef set_Source(self, Source): # String\n\t\tself.add_query_param('Source', Source)\n\tdef get_ContainerFieldName(self): # String\n\t\treturn self.get_query_params().get('ContainerFieldName')\n\n\tdef set_ContainerFieldName(self, ContainerFieldName): # String\n\t\tself.add_query_param('ContainerFieldName', ContainerFieldName)\n\tdef get_SourceIp(self): # String\n\t\treturn self.get_query_params().get('SourceIp')\n\n\tdef set_SourceIp(self, SourceIp): # String\n\t\tself.add_query_param('SourceIp', SourceIp)\n\tdef get_EventNames(self): # String\n\t\treturn self.get_query_params().get('EventNames')\n\n\tdef set_EventNames(self, EventNames): # String\n\t\tself.add_query_param('EventNames', EventNames)\n\tdef get_From(self): # String\n\t\treturn self.get_query_params().get('From')\n\n\tdef set_From(self, _From): # String\n\t\tself.add_query_param('From', _From)\n\tdef get_Id(self): # Long\n\t\treturn self.get_query_params().get('Id')\n\n\tdef set_Id(self, Id): # Long\n\t\tself.add_query_param('Id', Id)\n\tdef get_TacticId(self): # String\n\t\treturn self.get_body_params().get('TacticId')\n\n\tdef set_TacticId(self, TacticId): # String\n\t\tself.add_body_params('TacticId', TacticId)\n\tdef get_AlarmUniqueInfo(self): # String\n\t\treturn self.get_query_params().get('AlarmUniqueInfo')\n\n\tdef set_AlarmUniqueInfo(self, AlarmUniqueInfo): # String\n\t\tself.add_query_param('AlarmUniqueInfo', AlarmUniqueInfo)\n\tdef get_UniqueInfo(self): # String\n\t\treturn self.get_query_params().get('UniqueInfo')\n\n\tdef set_UniqueInfo(self, UniqueInfo): # String\n\t\tself.add_query_param('UniqueInfo', UniqueInfo)\n\tdef get_GroupId(self): # Long\n\t\treturn self.get_query_params().get('GroupId')\n\n\tdef set_GroupId(self, GroupId): # Long\n\t\tself.add_query_param('GroupId', GroupId)\n\tdef get_OperateTimeEnd(self): # String\n\t\treturn self.get_query_params().get('OperateTimeEnd')\n\n\tdef set_OperateTimeEnd(self, OperateTimeEnd): # String\n\t\tself.add_query_param('OperateTimeEnd', OperateTimeEnd)\n\tdef get_Name(self): # String\n\t\treturn self.get_query_params().get('Name')\n\n\tdef set_Name(self, Name): # String\n\t\tself.add_query_param('Name', Name)\n\tdef get_Status(self): # String\n\t\treturn self.get_query_params().get('Status')\n\n\tdef set_Status(self, Status): # String\n\t\tself.add_query_param('Status', Status)\n\tdef get_Uuids(self): # String\n\t\treturn self.get_query_params().get('Uuids')\n\n\tdef set_Uuids(self, Uuids): # String\n\t\tself.add_query_param('Uuids', Uuids)\n\tdef get_TimeEnd(self): # String\n\t\treturn self.get_query_params().get('TimeEnd')\n\n\tdef set_TimeEnd(self, TimeEnd): # String\n\t\tself.add_query_param('TimeEnd', TimeEnd)\n\tdef get_TargetType(self): # String\n\t\treturn self.get_query_params().get('TargetType')\n\n\tdef set_TargetType(self, TargetType): # String\n\t\tself.add_query_param('TargetType', TargetType)\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_query_params().get('SortType')\n\n\tdef set_SortType(self, SortType): # String\n\t\tself.add_query_param('SortType', SortType)\n\tdef get_Remark(self): # String\n\t\treturn self.get_query_params().get('Remark')\n\n\tdef set_Remark(self, Remark): # String\n\t\tself.add_query_param('Remark', Remark)\n\tdef get_ContainerFieldValue(self): # String\n\t\treturn self.get_query_params().get('ContainerFieldValue')\n\n\tdef set_ContainerFieldValue(self, ContainerFieldValue): # String\n\t\tself.add_query_param('ContainerFieldValue', ContainerFieldValue)\n\tdef get_PageSize(self): # String\n\t\treturn self.get_query_params().get('PageSize')\n\n\tdef set_PageSize(self, PageSize): # String\n\t\tself.add_query_param('PageSize', PageSize)\n\tdef get_Lang(self): # String\n\t\treturn self.get_query_params().get('Lang')\n\n\tdef set_Lang(self, Lang): # String\n\t\tself.add_query_param('Lang', Lang)\n\tdef get_Dealed(self): # String\n\t\treturn self.get_query_params().get('Dealed')\n\n\tdef set_Dealed(self, Dealed): # String\n\t\tself.add_query_param('Dealed', Dealed)\n\tdef get_CurrentPage(self): # String\n\t\treturn self.get_query_params().get('CurrentPage')\n\n\tdef set_CurrentPage(self, CurrentPage): # String\n\t\tself.add_query_param('CurrentPage', CurrentPage)\n\tdef get_ClusterId(self): # String\n\t\treturn self.get_query_params().get('ClusterId')\n\n\tdef set_ClusterId(self, ClusterId): # String\n\t\tself.add_query_param('ClusterId', ClusterId)\n\tdef get_OperateErrorCodeLists(self): # RepeatList\n\t\treturn self.get_query_params().get('OperateErrorCodeList')\n\n\tdef set_OperateErrorCodeLists(self, OperateErrorCodeList): # RepeatList\n\t\tfor depth1 in range(len(OperateErrorCodeList)):\n\t\t\tself.add_query_param('OperateErrorCodeList.' + str(depth1 + 1), OperateErrorCodeList[depth1])\n\tdef get_SortColumn(self): # String\n\t\treturn self.get_query_params().get('SortColumn')\n\n\tdef set_SortColumn(self, SortColumn): # String\n\t\tself.add_query_param('SortColumn', SortColumn)\n\tdef get_AssetsTypeLists(self): # RepeatList\n\t\treturn self.get_query_params().get('AssetsTypeList')\n\n\tdef set_AssetsTypeLists(self, AssetsTypeList): # RepeatList\n\t\tfor depth1 in range(len(AssetsTypeList)):\n\t\t\tself.add_query_param('AssetsTypeList.' + str(depth1 + 1), AssetsTypeList[depth1])\n\tdef get_OperateTimeStart(self): # String\n\t\treturn self.get_query_params().get('OperateTimeStart')\n\n\tdef set_OperateTimeStart(self, OperateTimeStart): # String\n\t\tself.add_query_param('OperateTimeStart', OperateTimeStart)\n\tdef get_TimeStart(self): # String\n\t\treturn self.get_query_params().get('TimeStart')\n\n\tdef set_TimeStart(self, TimeStart): # String\n\t\tself.add_query_param('TimeStart', TimeStart)\n\tdef get_Levels(self): # String\n\t\treturn self.get_query_params().get('Levels')\n\n\tdef set_Levels(self, Levels): # String\n\t\tself.add_query_param('Levels', Levels)\n\tdef get_ParentEventTypes(self): # String\n\t\treturn self.get_query_params().get('ParentEventTypes')\n\n\tdef set_ParentEventTypes(self, ParentEventTypes): # String\n\t\tself.add_query_param('ParentEventTypes', ParentEventTypes)"},"code_compressed":{"kind":"null"}}},{"rowIdx":345,"cells":{"id":{"kind":"number","value":345,"string":"345"},"code":{"kind":"string","value":"\"\"\"\nDatatypes for Anvi'o\nhttps://github.com/merenlab/anvio\n\"\"\"\nimport glob\nimport logging\nimport os\nfrom typing import Optional\n\nfrom galaxy.datatypes.metadata import MetadataElement\nfrom galaxy.datatypes.protocols import (\n DatasetProtocol,\n HasExtraFilesAndMetadata,\n)\nfrom galaxy.datatypes.text import Html\n\nlog = logging.getLogger(__name__)\n\n\nclass AnvioComposite(Html):\n \"\"\"\n Base class to use for Anvi'o composite datatypes.\n Generally consist of a sqlite database, plus optional additional files\n \"\"\"\n\n file_ext = \"anvio_composite\"\n composite_type = \"auto_primary_file\"\n\n def METHOD_NAME(self, dataset: HasExtraFilesAndMetadata) -> str:\n \"\"\"\n This is called only at upload to write the html file\n cannot rename the datasets here - they come with the default unfortunately\n \"\"\"\n defined_files = self.get_composite_files(dataset=dataset).items()\n rval = [f\"Files for Anvi'o Composite Dataset ({self.file_ext})\"]\n if defined_files:\n rval.append(\"

This composite dataset is composed of the following defined files:

    \")\n for composite_name, composite_file in defined_files:\n opt_text = \"\"\n if composite_file.optional:\n opt_text = \" (optional)\"\n missing_text = \"\"\n if not os.path.exists(os.path.join(dataset.extra_files_path, composite_name)):\n missing_text = \" (missing)\"\n rval.append(f'
  • {composite_name}{opt_text}{missing_text}
  • ')\n rval.append(\"
\")\n defined_files = map(lambda x: x[0], defined_files)\n extra_files = []\n for dirpath, _dirnames, filenames in os.walk(dataset.extra_files_path, followlinks=True):\n for filename in filenames:\n rel_path = os.path.relpath(os.path.join(dirpath, filename), dataset.extra_files_path)\n if rel_path not in defined_files:\n extra_files.append(rel_path)\n if extra_files:\n rval.append(\"

This composite dataset contains these undefined files:

    \")\n for rel_path in extra_files:\n rval.append(f'
  • {rel_path}
  • ')\n rval.append(\"
\")\n if not (defined_files or extra_files):\n rval.append(\"

This composite dataset does not contain any files!

    \")\n rval.append(\"\")\n return \"\\n\".join(rval)\n\n def get_mime(self) -> str:\n \"\"\"Returns the mime type of the datatype\"\"\"\n return \"text/html\"\n\n def set_peek(self, dataset: DatasetProtocol, **kwd) -> None:\n \"\"\"Set the peek and blurb text\"\"\"\n if not dataset.dataset.purged:\n dataset.peek = \"Anvio database (multiple files)\"\n dataset.blurb = \"Anvio database (multiple files)\"\n else:\n dataset.peek = \"file does not exist\"\n dataset.blurb = \"file purged from disk\"\n\n def display_peek(self, dataset: DatasetProtocol) -> str:\n \"\"\"Create HTML content, used for displaying peek.\"\"\"\n try:\n return dataset.peek\n except Exception:\n return \"Anvio database (multiple files)\"\n\n\nclass AnvioDB(AnvioComposite):\n \"\"\"Class for AnvioDB database files.\"\"\"\n\n _anvio_basename: Optional[str] = None\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_db\"\n\n def __init__(self, *args, **kwd):\n super().__init__(*args, **kwd)\n if self._anvio_basename is not None:\n self.add_composite_file(self._anvio_basename, is_binary=True, optional=False)\n\n def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> None:\n \"\"\"\n Set the anvio_basename based upon actual extra_files_path contents.\n \"\"\"\n super().set_meta(dataset, overwrite=overwrite, **kwd)\n if dataset.metadata.anvio_basename is not None and os.path.exists(\n os.path.join(dataset.extra_files_path, dataset.metadata.anvio_basename)\n ):\n return\n found = False\n for basename in [dataset.metadata.anvio_basename, self._anvio_basename]:\n if found:\n break\n if basename is not None and not os.path.exists(os.path.join(dataset.extra_files_path, basename)):\n for name in glob.glob(os.path.join(dataset.extra_files_path, f\"*{basename}\")):\n dataset.metadata.anvio_basename = os.path.basename(name)\n found = True\n break\n\n\nclass AnvioStructureDB(AnvioDB):\n \"\"\"Class for Anvio Structure DB database files.\"\"\"\n\n _anvio_basename = \"STRUCTURE.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_structure_db\"\n\n\nclass AnvioGenomesDB(AnvioDB):\n \"\"\"Class for Anvio Genomes DB database files.\"\"\"\n\n _anvio_basename = \"-GENOMES.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_genomes_db\"\n\n\nclass AnvioContigsDB(AnvioDB):\n \"\"\"Class for Anvio Contigs DB database files.\"\"\"\n\n _anvio_basename = \"CONTIGS.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_contigs_db\"\n\n def __init__(self, *args, **kwd):\n super().__init__(*args, **kwd)\n self.add_composite_file(\"CONTIGS.h5\", is_binary=True, optional=True)\n\n\nclass AnvioProfileDB(AnvioDB):\n \"\"\"Class for Anvio Profile DB database files.\"\"\"\n\n _anvio_basename = \"PROFILE.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_profile_db\"\n\n def __init__(self, *args, **kwd):\n super().__init__(*args, **kwd)\n self.add_composite_file(\"RUNINFO.cp\", is_binary=True, optional=True)\n self.add_composite_file(\"RUNINFO.mcp\", is_binary=True, optional=True)\n self.add_composite_file(\"AUXILIARY_DATA.db\", is_binary=True, optional=True)\n self.add_composite_file(\"RUNLOG.txt\", is_binary=False, optional=True)\n\n\nclass AnvioPanDB(AnvioDB):\n \"\"\"Class for Anvio Pan DB database files.\"\"\"\n\n _anvio_basename = \"PAN.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_pan_db\"\n\n\nclass AnvioSamplesDB(AnvioDB):\n \"\"\"Class for Anvio Samples DB database files.\"\"\"\n\n _anvio_basename = \"SAMPLES.db\"\n MetadataElement(name=\"anvio_basename\", default=_anvio_basename, desc=\"Basename\", readonly=True)\n file_ext = \"anvio_samples_db\""},"code_compressed":{"kind":"null"}}},{"rowIdx":346,"cells":{"id":{"kind":"number","value":346,"string":"346"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkrds.endpoint import endpoint_data\n\nclass UpgradeDBInstanceMajorVersionRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'Rds', '2014-08-15', 'UpgradeDBInstanceMajorVersion')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_ResourceOwnerId(self): # Long\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self, ResourceOwnerId): # Long\n\t\tself.add_query_param('ResourceOwnerId', ResourceOwnerId)\n\tdef get_DBInstanceStorage(self): # Integer\n\t\treturn self.get_query_params().get('DBInstanceStorage')\n\n\tdef set_DBInstanceStorage(self, DBInstanceStorage): # Integer\n\t\tself.add_query_param('DBInstanceStorage', DBInstanceStorage)\n\tdef get_ZoneIdSlave1(self): # String\n\t\treturn self.get_query_params().get('ZoneIdSlave1')\n\n\tdef set_ZoneIdSlave1(self, ZoneIdSlave1): # String\n\t\tself.add_query_param('ZoneIdSlave1', ZoneIdSlave1)\n\tdef get_ZoneIdSlave2(self): # String\n\t\treturn self.get_query_params().get('ZoneIdSlave2')\n\n\tdef set_ZoneIdSlave2(self, ZoneIdSlave2): # String\n\t\tself.add_query_param('ZoneIdSlave2', ZoneIdSlave2)\n\tdef get_SwitchTimeMode(self): # String\n\t\treturn self.get_query_params().get('SwitchTimeMode')\n\n\tdef set_SwitchTimeMode(self, SwitchTimeMode): # String\n\t\tself.add_query_param('SwitchTimeMode', SwitchTimeMode)\n\tdef get_SwitchOver(self): # String\n\t\treturn self.get_query_params().get('SwitchOver')\n\n\tdef set_SwitchOver(self, SwitchOver): # String\n\t\tself.add_query_param('SwitchOver', SwitchOver)\n\tdef get_CollectStatMode(self): # String\n\t\treturn self.get_query_params().get('CollectStatMode')\n\n\tdef set_CollectStatMode(self, CollectStatMode): # String\n\t\tself.add_query_param('CollectStatMode', CollectStatMode)\n\tdef get_SwitchTime(self): # String\n\t\treturn self.get_query_params().get('SwitchTime')\n\n\tdef set_SwitchTime(self, SwitchTime): # String\n\t\tself.add_query_param('SwitchTime', SwitchTime)\n\tdef get_DBInstanceId(self): # String\n\t\treturn self.get_query_params().get('DBInstanceId')\n\n\tdef set_DBInstanceId(self, DBInstanceId): # String\n\t\tself.add_query_param('DBInstanceId', DBInstanceId)\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_query_params().get('DBInstanceStorageType')\n\n\tdef set_DBInstanceStorageType(self, DBInstanceStorageType): # String\n\t\tself.add_query_param('DBInstanceStorageType', DBInstanceStorageType)\n\tdef get_Period(self): # String\n\t\treturn self.get_query_params().get('Period')\n\n\tdef set_Period(self, Period): # String\n\t\tself.add_query_param('Period', Period)\n\tdef get_UsedTime(self): # String\n\t\treturn self.get_query_params().get('UsedTime')\n\n\tdef set_UsedTime(self, UsedTime): # String\n\t\tself.add_query_param('UsedTime', UsedTime)\n\tdef get_DBInstanceClass(self): # String\n\t\treturn self.get_query_params().get('DBInstanceClass')\n\n\tdef set_DBInstanceClass(self, DBInstanceClass): # String\n\t\tself.add_query_param('DBInstanceClass', DBInstanceClass)\n\tdef get_VSwitchId(self): # String\n\t\treturn self.get_query_params().get('VSwitchId')\n\n\tdef set_VSwitchId(self, VSwitchId): # String\n\t\tself.add_query_param('VSwitchId', VSwitchId)\n\tdef get_PrivateIpAddress(self): # String\n\t\treturn self.get_query_params().get('PrivateIpAddress')\n\n\tdef set_PrivateIpAddress(self, PrivateIpAddress): # String\n\t\tself.add_query_param('PrivateIpAddress', PrivateIpAddress)\n\tdef get_VPCId(self): # String\n\t\treturn self.get_query_params().get('VPCId')\n\n\tdef set_VPCId(self, VPCId): # String\n\t\tself.add_query_param('VPCId', VPCId)\n\tdef get_ZoneId(self): # String\n\t\treturn self.get_query_params().get('ZoneId')\n\n\tdef set_ZoneId(self, ZoneId): # String\n\t\tself.add_query_param('ZoneId', ZoneId)\n\tdef get_PayType(self): # String\n\t\treturn self.get_query_params().get('PayType')\n\n\tdef set_PayType(self, PayType): # String\n\t\tself.add_query_param('PayType', PayType)\n\tdef get_InstanceNetworkType(self): # String\n\t\treturn self.get_query_params().get('InstanceNetworkType')\n\n\tdef set_InstanceNetworkType(self, InstanceNetworkType): # String\n\t\tself.add_query_param('InstanceNetworkType', InstanceNetworkType)\n\tdef get_TargetMajorVersion(self): # String\n\t\treturn self.get_query_params().get('TargetMajorVersion')\n\n\tdef set_TargetMajorVersion(self, TargetMajorVersion): # String\n\t\tself.add_query_param('TargetMajorVersion', TargetMajorVersion)"},"code_compressed":{"kind":"null"}}},{"rowIdx":347,"cells":{"id":{"kind":"number","value":347,"string":"347"},"code":{"kind":"string","value":"from __future__ import annotations\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom boa3.internal import constants\nfrom boa3.internal.model.builtin.method.builtinmethod import IBuiltinMethod\nfrom boa3.internal.model.expression import IExpression\nfrom boa3.internal.model.method import Method\nfrom boa3.internal.model.property import Property\nfrom boa3.internal.model.type.classes.classarraytype import ClassArrayType\nfrom boa3.internal.model.variable import Variable\nfrom boa3.internal.neo.vm.opcode.Opcode import Opcode\n\n\nclass BlockType(ClassArrayType):\n \"\"\"\n A class used to represent Neo Block class\n \"\"\"\n\n def __init__(self):\n super().__init__('Block')\n from boa3.internal.model.type.type import Type\n from boa3.internal.model.type.collection.sequence.uint160type import UInt160Type\n from boa3.internal.model.type.collection.sequence.uint256type import UInt256Type\n\n uint256 = UInt256Type.build()\n\n self._variables: Dict[str, Variable] = {\n 'hash': Variable(uint256),\n 'version': Variable(Type.int),\n 'previous_hash': Variable(uint256),\n 'merkle_root': Variable(uint256),\n 'timestamp': Variable(Type.int),\n 'nonce': Variable(Type.int),\n 'index': Variable(Type.int),\n 'primary_index': Variable(Type.int),\n 'next_consensus': Variable(UInt160Type.build()),\n 'transaction_count': Variable(Type.int)\n }\n self._constructor: Method = None\n\n @property\n def class_variables(self) -> Dict[str, Variable]:\n return {}\n\n @property\n def instance_variables(self) -> Dict[str, Variable]:\n return self._variables.copy()\n\n @property\n def properties(self) -> Dict[str, Property]:\n return {}\n\n @property\n def static_methods(self) -> Dict[str, Method]:\n return {}\n\n @property\n def class_methods(self) -> Dict[str, Method]:\n return {}\n\n @property\n def instance_methods(self) -> Dict[str, Method]:\n return {}\n\n def constructor_method(self) -> Optional[Method]:\n # was having a problem with recursive import\n if self._constructor is None:\n self._constructor: Method = BlockMethod(self)\n return self._constructor\n\n @classmethod\n def build(cls, value: Any = None) -> BlockType:\n if value is None or cls._is_type_of(value):\n return _Block\n\n @classmethod\n def _is_type_of(cls, value: Any):\n return isinstance(value, BlockType)\n\n\n_Block = BlockType()\n\n\nclass BlockMethod(IBuiltinMethod):\n\n def __init__(self, return_type: BlockType):\n identifier = '-Block__init__'\n args: Dict[str, Variable] = {}\n super().__init__(identifier, args, return_type=return_type)\n\n def validate_parameters(self, *params: IExpression) -> bool:\n return len(params) == 0\n\n @property\n def METHOD_NAME(self) -> List[Tuple[Opcode, bytes]]:\n from boa3.internal.neo.vm.type.Integer import Integer\n\n uint160_default = Integer(constants.SIZE_OF_INT160).to_byte_array() + bytes(constants.SIZE_OF_INT160)\n uint256_default = Integer(constants.SIZE_OF_INT256).to_byte_array() + bytes(constants.SIZE_OF_INT256)\n\n return [\n (Opcode.PUSH0, b''), # transaction_count\n (Opcode.PUSHDATA1, uint160_default), # next_consensus\n (Opcode.PUSH0, b''), # primary_index\n (Opcode.PUSH0, b''), # index\n (Opcode.PUSH0, b''), # nonce\n (Opcode.PUSH0, b''), # timestamp\n (Opcode.PUSHDATA1, uint256_default), # merkle_root\n (Opcode.PUSHDATA1, uint256_default), # previous_hash\n (Opcode.PUSH0, b''), # version\n (Opcode.PUSHDATA1, uint256_default), # hash\n (Opcode.PUSH10, b''),\n (Opcode.PACK, b'')\n ]\n\n @property\n def _args_on_stack(self) -> int:\n return len(self.args)\n\n @property\n def _body(self) -> Optional[str]:\n return"},"code_compressed":{"kind":"null"}}},{"rowIdx":348,"cells":{"id":{"kind":"number","value":348,"string":"348"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\n\nclass CreateInstanceRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata')\n\t\tself.set_method('POST')\n\n\tdef get_ResourceOwnerId(self):\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self,ResourceOwnerId):\n\t\tself.add_query_param('ResourceOwnerId',ResourceOwnerId)\n\n\tdef get_NodeSpec(self):\n\t\treturn self.get_query_params().get('NodeSpec')\n\n\tdef set_NodeSpec(self,NodeSpec):\n\t\tself.add_query_param('NodeSpec',NodeSpec)\n\n\tdef get_ClientToken(self):\n\t\treturn self.get_query_params().get('ClientToken')\n\n\tdef set_ClientToken(self,ClientToken):\n\t\tself.add_query_param('ClientToken',ClientToken)\n\n\tdef get_NetworkType(self):\n\t\treturn self.get_query_params().get('NetworkType')\n\n\tdef set_NetworkType(self,NetworkType):\n\t\tself.add_query_param('NetworkType',NetworkType)\n\n\tdef get_AccountName(self):\n\t\treturn self.get_query_params().get('AccountName')\n\n\tdef set_AccountName(self,AccountName):\n\t\tself.add_query_param('AccountName',AccountName)\n\n\tdef get_SecurityToken(self):\n\t\treturn self.get_query_params().get('SecurityToken')\n\n\tdef set_SecurityToken(self,SecurityToken):\n\t\tself.add_query_param('SecurityToken',SecurityToken)\n\n\tdef get_NodeNumber(self):\n\t\treturn self.get_query_params().get('NodeNumber')\n\n\tdef set_NodeNumber(self,NodeNumber):\n\t\tself.add_query_param('NodeNumber',NodeNumber)\n\n\tdef get_ResourceOwnerAccount(self):\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self,ResourceOwnerAccount):\n\t\tself.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)\n\n\tdef get_OwnerAccount(self):\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self,OwnerAccount):\n\t\tself.add_query_param('OwnerAccount',OwnerAccount)\n\n\tdef get_OwnerId(self):\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self,OwnerId):\n\t\tself.add_query_param('OwnerId',OwnerId)\n\n\tdef get_SecurityIPList(self):\n\t\treturn self.get_query_params().get('SecurityIPList')\n\n\tdef set_SecurityIPList(self,SecurityIPList):\n\t\tself.add_query_param('SecurityIPList',SecurityIPList)\n\n\tdef get_VSwitchId(self):\n\t\treturn self.get_query_params().get('VSwitchId')\n\n\tdef set_VSwitchId(self,VSwitchId):\n\t\tself.add_query_param('VSwitchId',VSwitchId)\n\n\tdef get_AccountPassword(self):\n\t\treturn self.get_query_params().get('AccountPassword')\n\n\tdef set_AccountPassword(self,AccountPassword):\n\t\tself.add_query_param('AccountPassword',AccountPassword)\n\n\tdef get_InstanceName(self):\n\t\treturn self.get_query_params().get('InstanceName')\n\n\tdef set_InstanceName(self,InstanceName):\n\t\tself.add_query_param('InstanceName',InstanceName)\n\n\tdef get_DBName(self):\n\t\treturn self.get_query_params().get('DBName')\n\n\tdef set_DBName(self,DBName):\n\t\tself.add_query_param('DBName',DBName)\n\n\tdef get_VpcId(self):\n\t\treturn self.get_query_params().get('VpcId')\n\n\tdef set_VpcId(self,VpcId):\n\t\tself.add_query_param('VpcId',VpcId)\n\n\tdef get_ZoneId(self):\n\t\treturn self.get_query_params().get('ZoneId')\n\n\tdef set_ZoneId(self,ZoneId):\n\t\tself.add_query_param('ZoneId',ZoneId)\n\n\tdef get_ChargeType(self):\n\t\treturn self.get_query_params().get('ChargeType')\n\n\tdef METHOD_NAME(self,ChargeType):\n\t\tself.add_query_param('ChargeType',ChargeType"},"code_compressed":{"kind":"null"}}},{"rowIdx":349,"cells":{"id":{"kind":"number","value":349,"string":"349"},"code":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright (c) 2014-2021 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n'''\nScript to generate list of seed nodes for chainparams.cpp.\n\nThis script expects two text files in the directory that is passed as an\nargument:\n\n nodes_main.txt\n nodes_test.txt\n\nThese files must consist of lines in the format\n\n :\n []:\n .onion:\n .b32.i2p:\n\nThe output will be two data structures with the peers in binary format:\n\n static const uint8_t chainparams_seed_{main,test}[]={\n ...\n }\n\nThese should be pasted into `src/chainparamsseeds.h`.\n'''\n\nfrom base64 import b32decode\nfrom enum import Enum\nimport struct\nimport sys\nimport os\nimport re\n\nclass BIP155Network(Enum):\n IPV4 = 1\n IPV6 = 2\n TORV2 = 3 # no longer supported\n TORV3 = 4\n I2P = 5\n CJDNS = 6\n\ndef name_to_bip155(addr):\n '''Convert address string to BIP155 (networkID, addr) tuple.'''\n if addr.endswith('.onion'):\n vchAddr = b32decode(addr[0:-6], True)\n if len(vchAddr) == 35:\n assert vchAddr[34] == 3\n return (BIP155Network.TORV3, vchAddr[:32])\n elif len(vchAddr) == 10:\n return (BIP155Network.TORV2, vchAddr)\n else:\n raise ValueError('Invalid onion %s' % vchAddr)\n elif addr.endswith('.b32.i2p'):\n vchAddr = b32decode(addr[0:-8] + '====', True)\n if len(vchAddr) == 32:\n return (BIP155Network.I2P, vchAddr)\n else:\n raise ValueError(f'Invalid I2P {vchAddr}')\n elif '.' in addr: # IPv4\n return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))\n elif ':' in addr: # IPv6\n sub = [[], []] # prefix, suffix\n x = 0\n addr = addr.split(':')\n for i,comp in enumerate(addr):\n if comp == '':\n if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end\n continue\n x += 1 # :: skips to suffix\n assert(x < 2)\n else: # two bytes per component\n val = int(comp, 16)\n sub[x].append(val >> 8)\n sub[x].append(val & 0xff)\n nullbytes = 16 - len(sub[0]) - len(sub[1])\n assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))\n return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))\n else:\n raise ValueError('Could not parse address %s' % addr)\n\ndef parse_spec(s):\n '''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''\n match = re.match(r'\\[([0-9a-fA-F:]+)\\](?::([0-9]+))?$', s)\n if match: # ipv6\n host = match.group(1)\n port = match.group(2)\n elif s.count(':') > 1: # ipv6, no port\n host = s\n port = ''\n else:\n (host,_,port) = s.partition(':')\n\n if not port:\n port = 0\n else:\n port = int(port)\n\n host = name_to_bip155(host)\n\n if host[0] == BIP155Network.TORV2:\n return None # TORV2 is no longer supported, so we ignore it\n else:\n return host + (port, )\n\ndef ser_compact_size(l):\n r = b\"\"\n if l < 253:\n r = struct.pack(\"B\", l)\n elif l < 0x10000:\n r = struct.pack(\"H', spec[2])\n return r\n\ndef METHOD_NAME(g, f, structname):\n g.write('static const uint8_t %s[] = {\\n' % structname)\n for line in f:\n comment = line.find('#')\n if comment != -1:\n line = line[0:comment]\n line = line.strip()\n if not line:\n continue\n\n spec = parse_spec(line)\n if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2)\n continue\n blob = bip155_serialize(spec)\n hoststr = ','.join(('0x%02x' % b) for b in blob)\n g.write(f' {hoststr},\\n')\n g.write('};\\n')\n\ndef main():\n if len(sys.argv)<2:\n print(('Usage: %s ' % sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n g = sys.stdout\n indir = sys.argv[1]\n g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\\n')\n g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\\n')\n g.write('/**\\n')\n g.write(' * List of fixed seed nodes for the bitcoin network\\n')\n g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\\n')\n g.write(' *\\n')\n g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\\n')\n g.write(' */\\n')\n with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding=\"utf8\") as f:\n METHOD_NAME(g, f, 'chainparams_seed_main')\n g.write('\\n')\n with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding=\"utf8\") as f:\n METHOD_NAME(g, f, 'chainparams_seed_test')\n g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\\n')\n\nif __name__ == '__main__':\n main()"},"code_compressed":{"kind":"null"}}},{"rowIdx":350,"cells":{"id":{"kind":"number","value":350,"string":"350"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\n\nclass CreateInstanceRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata')\n\t\tself.set_method('POST')\n\n\tdef get_ResourceOwnerId(self):\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self,ResourceOwnerId):\n\t\tself.add_query_param('ResourceOwnerId',ResourceOwnerId)\n\n\tdef get_NodeSpec(self):\n\t\treturn self.get_query_params().get('NodeSpec')\n\n\tdef set_NodeSpec(self,NodeSpec):\n\t\tself.add_query_param('NodeSpec',NodeSpec)\n\n\tdef METHOD_NAME(self):\n\t\treturn self.get_query_params().get('ClientToken')\n\n\tdef set_ClientToken(self,ClientToken):\n\t\tself.add_query_param('ClientToken',ClientToken)\n\n\tdef get_NetworkType(self):\n\t\treturn self.get_query_params().get('NetworkType')\n\n\tdef set_NetworkType(self,NetworkType):\n\t\tself.add_query_param('NetworkType',NetworkType)\n\n\tdef get_AccountName(self):\n\t\treturn self.get_query_params().get('AccountName')\n\n\tdef set_AccountName(self,AccountName):\n\t\tself.add_query_param('AccountName',AccountName)\n\n\tdef get_SecurityToken(self):\n\t\treturn self.get_query_params().get('SecurityToken')\n\n\tdef set_SecurityToken(self,SecurityToken):\n\t\tself.add_query_param('SecurityToken',SecurityToken)\n\n\tdef get_NodeNumber(self):\n\t\treturn self.get_query_params().get('NodeNumber')\n\n\tdef set_NodeNumber(self,NodeNumber):\n\t\tself.add_query_param('NodeNumber',NodeNumber)\n\n\tdef get_ResourceOwnerAccount(self):\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self,ResourceOwnerAccount):\n\t\tself.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)\n\n\tdef get_OwnerAccount(self):\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self,OwnerAccount):\n\t\tself.add_query_param('OwnerAccount',OwnerAccount)\n\n\tdef get_OwnerId(self):\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self,OwnerId):\n\t\tself.add_query_param('OwnerId',OwnerId)\n\n\tdef get_SecurityIPList(self):\n\t\treturn self.get_query_params().get('SecurityIPList')\n\n\tdef set_SecurityIPList(self,SecurityIPList):\n\t\tself.add_query_param('SecurityIPList',SecurityIPList)\n\n\tdef get_VSwitchId(self):\n\t\treturn self.get_query_params().get('VSwitchId')\n\n\tdef set_VSwitchId(self,VSwitchId):\n\t\tself.add_query_param('VSwitchId',VSwitchId)\n\n\tdef get_AccountPassword(self):\n\t\treturn self.get_query_params().get('AccountPassword')\n\n\tdef set_AccountPassword(self,AccountPassword):\n\t\tself.add_query_param('AccountPassword',AccountPassword)\n\n\tdef get_InstanceName(self):\n\t\treturn self.get_query_params().get('InstanceName')\n\n\tdef set_InstanceName(self,InstanceName):\n\t\tself.add_query_param('InstanceName',InstanceName)\n\n\tdef get_DBName(self):\n\t\treturn self.get_query_params().get('DBName')\n\n\tdef set_DBName(self,DBName):\n\t\tself.add_query_param('DBName',DBName)\n\n\tdef get_VpcId(self):\n\t\treturn self.get_query_params().get('VpcId')\n\n\tdef set_VpcId(self,VpcId):\n\t\tself.add_query_param('VpcId',VpcId)\n\n\tdef get_ZoneId(self):\n\t\treturn self.get_query_params().get('ZoneId')\n\n\tdef set_ZoneId(self,ZoneId):\n\t\tself.add_query_param('ZoneId',ZoneId)\n\n\tdef get_ChargeType(self):\n\t\treturn self.get_query_params().get('ChargeType')\n\n\tdef set_ChargeType(self,ChargeType):\n\t\tself.add_query_param('ChargeType',ChargeType"},"code_compressed":{"kind":"null"}}},{"rowIdx":351,"cells":{"id":{"kind":"number","value":351,"string":"351"},"code":{"kind":"string","value":"import shutil\nimport os\nimport stat\nimport bpy\nimport arm.utils\nfrom arm import log\n\nif arm.is_reload(__name__):\n log = arm.reload_module(log)\n arm.utils = arm.reload_module(arm.utils)\nelse:\n arm.enable_reload(__name__)\n\nassets = []\nreserved_names = ['return.']\nkhafile_params = []\nkhafile_defs = []\nkhafile_defs_last = []\nembedded_data = []\nshaders = []\nshaders_last = []\nshaders_external = []\nshader_datas = []\nshader_passes = []\nshader_passes_assets = {}\nshader_cons = {}\n\ndef reset():\n global assets\n global khafile_params\n global khafile_defs\n global khafile_defs_last\n global embedded_data\n global shaders\n global shaders_last\n global shaders_external\n global shader_datas\n global shader_passes\n global shader_cons\n assets = []\n khafile_params = []\n khafile_defs_last = khafile_defs\n khafile_defs = []\n embedded_data = []\n shaders_last = shaders\n shaders = []\n shaders_external = []\n shader_datas = []\n shader_passes = []\n shader_cons = {}\n shader_cons['mesh_vert'] = []\n shader_cons['depth_vert'] = []\n shader_cons['depth_frag'] = []\n shader_cons['voxel_vert'] = []\n shader_cons['voxel_frag'] = []\n shader_cons['voxel_geom'] = []\n\ndef add(asset_file):\n global assets\n\n # Asset already exists, do nothing\n if asset_file in assets:\n return\n\n asset_file_base = os.path.basename(asset_file)\n for f in assets:\n f_file_base = os.path.basename(f)\n if f_file_base == asset_file_base:\n return\n\n assets.append(asset_file)\n\n # Reserved file name\n for f in reserved_names:\n if f in asset_file:\n log.warn(f'File \"{asset_file}\" contains reserved keyword, this will break C++ builds!')\n\ndef add_khafile_def(d):\n global khafile_defs\n if d not in khafile_defs:\n khafile_defs.append(d)\n\ndef add_khafile_param(p):\n global khafile_params\n if p not in khafile_params:\n khafile_params.append(p)\n\ndef add_embedded_data(file):\n global embedded_data\n if file not in embedded_data:\n embedded_data.append(file)\n\ndef add_shader(file):\n global shaders\n global shaders_last\n if file not in shaders:\n shaders.append(file)\n\ndef add_shader_data(file):\n global shader_datas\n if file not in shader_datas:\n shader_datas.append(file)\n\ndef add_shader_pass(data_name):\n global shader_passes\n # Shader data for passes are written into single shader_datas.arm file\n add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')\n if data_name not in shader_passes:\n shader_passes.append(data_name)\n\ndef METHOD_NAME(file):\n global shaders_external\n shaders_external.append(file)\n name = file.split('/')[-1].split('\\\\')[-1]\n add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)\n\ninvalidate_enabled = True # Disable invalidating during build process\n\ndef remove_readonly(func, path, excinfo):\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\ndef invalidate_shader_cache(self, context):\n # compiled.inc changed, recompile all shaders next time\n global invalidate_enabled\n if invalidate_enabled is False:\n return\n fp = arm.utils.get_fp_build()\n if os.path.isdir(fp + '/compiled/Shaders'):\n shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)\n if os.path.isdir(fp + '/debug/html5-resources'):\n shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)\n if os.path.isdir(fp + '/krom-resources'):\n shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)\n if os.path.isdir(fp + '/debug/krom-resources'):\n shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)\n if os.path.isdir(fp + '/windows-resources'):\n shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)\n if os.path.isdir(fp + '/linux-resources'):\n shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)\n if os.path.isdir(fp + '/osx-resources'):\n shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)\n\ndef invalidate_compiled_data(self, context):\n global invalidate_enabled\n if invalidate_enabled is False:\n return\n fp = arm.utils.get_fp_build()\n if os.path.isdir(fp + '/compiled'):\n shutil.rmtree(fp + '/compiled', onerror=remove_readonly)\n\ndef invalidate_mesh_data(self, context):\n fp = arm.utils.get_fp_build()\n if os.path.isdir(fp + '/compiled/Assets/meshes'):\n shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)\n\ndef invalidate_envmap_data(self, context):\n fp = arm.utils.get_fp_build()\n if os.path.isdir(fp + '/compiled/Assets/envmaps'):\n shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)\n\ndef invalidate_unpacked_data(self, context):\n fp = arm.utils.get_fp_build()\n if os.path.isdir(fp + '/compiled/Assets/unpacked'):\n shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)\n\ndef invalidate_mesh_cache(self, context):\n if context.object is None or context.object.data is None:\n return\n context.object.data.arm_cached = False\n\ndef invalidate_instance_cache(self, context):\n if context.object is None or context.object.data is None:\n return\n invalidate_mesh_cache(self, context)\n for slot in context.object.material_slots:\n slot.material.arm_cached = False\n\ndef invalidate_compiler_cache(self, context):\n bpy.data.worlds['Arm'].arm_recompile = True\n\ndef shader_equal(sh, ar, shtype):\n # Merge equal shaders\n for e in ar:\n if sh.is_equal(e):\n sh.context.data[shtype] = e.context.data[shtype]\n sh.is_linked = True\n return\n ar.append(sh)\n\ndef vs_equal(c, ar):\n shader_equal(c.vert, ar, 'vertex_shader')\n\ndef fs_equal(c, ar):\n shader_equal(c.frag, ar, 'fragment_shader')\n\ndef gs_equal(c, ar):\n shader_equal(c.geom, ar, 'geometry_shader')\n\ndef tcs_equal(c, ar):\n shader_equal(c.tesc, ar, 'tesscontrol_shader')\n\ndef tes_equal(c, ar):\n shader_equal(c.tese, ar, 'tesseval_shader')"},"code_compressed":{"kind":"null"}}},{"rowIdx":352,"cells":{"id":{"kind":"number","value":352,"string":"352"},"code":{"kind":"string","value":"from unittest import TestCase\n\nfrom pcs.common.pacemaker.resource.operations import CibResourceOperationDto\nfrom pcs.lib.cib.resource import agent\nfrom pcs.lib.resource_agent import (\n ResourceAgentAction,\n ResourceAgentMetadata,\n ResourceAgentName,\n)\nfrom pcs.lib.resource_agent.const import OCF_1_0\n\n\nclass GetDefaultOperationInterval(TestCase):\n def test_return_0s_on_name_different_from_monitor(self):\n self.assertEqual(\"0s\", agent.get_default_operation_interval(\"start\"))\n\n def METHOD_NAME(self):\n self.assertEqual(\"60s\", agent.get_default_operation_interval(\"monitor\"))\n\n\nclass CompleteOperationsOptions(TestCase):\n def test_add_intervals_everywhere_is_missing(self):\n self.assertEqual(\n agent.complete_operations_options(\n [\n {\"name\": \"monitor\", \"interval\": \"20s\"},\n {\"name\": \"start\"},\n ]\n ),\n [\n {\"name\": \"monitor\", \"interval\": \"20s\"},\n {\"name\": \"start\", \"interval\": \"0s\"},\n ],\n )\n\n\nclass GetDefaultOperations(TestCase):\n fixture_actions = [\n ResourceAgentAction(\n \"custom1\", \"40s\", None, None, None, None, False, False\n ),\n ResourceAgentAction(\n \"custom2\", \"60s\", \"25s\", None, None, None, False, False\n ),\n ResourceAgentAction(\n \"meta-data\", None, None, None, None, None, False, False\n ),\n ResourceAgentAction(\n \"monitor\", \"30s\", \"10s\", None, None, None, False, False\n ),\n ResourceAgentAction(\n \"start\", None, \"40s\", None, None, None, False, False\n ),\n ResourceAgentAction(\n \"status\", \"20s\", \"15s\", None, None, None, False, False\n ),\n ResourceAgentAction(\n \"validate-all\", None, None, None, None, None, False, False\n ),\n ]\n fixture_actions_meta_only = [\n ResourceAgentAction(\n \"meta-data\", None, None, None, None, None, False, False\n )\n ]\n maxDiff = None\n\n @staticmethod\n def fixture_agent(actions):\n return ResourceAgentMetadata(\n ResourceAgentName(\"ocf\", \"pacemaker\", \"Dummy\"),\n agent_exists=True,\n ocf_version=OCF_1_0,\n shortdesc=\"\",\n longdesc=\"\",\n parameters=[],\n actions=actions,\n )\n\n @staticmethod\n def fixture_stonith_agent(actions):\n return ResourceAgentMetadata(\n ResourceAgentName(\"stonith\", None, \"fence_test\"),\n agent_exists=True,\n ocf_version=OCF_1_0,\n shortdesc=\"\",\n longdesc=\"\",\n parameters=[],\n actions=actions,\n )\n\n @staticmethod\n def op_fixture(name, interval, timeout):\n return CibResourceOperationDto(\n id=\"\",\n name=name,\n interval=interval,\n description=None,\n start_delay=None,\n interval_origin=None,\n timeout=timeout,\n enabled=None,\n record_pending=None,\n role=None,\n on_fail=None,\n meta_attributes=[],\n instance_attributes=[],\n )\n\n def test_select_only_actions_for_cib(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_agent(self.fixture_actions)\n ),\n [\n self.op_fixture(\"custom1\", \"0s\", \"40s\"),\n self.op_fixture(\"custom2\", \"25s\", \"60s\"),\n self.op_fixture(\"monitor\", \"10s\", \"30s\"),\n self.op_fixture(\"start\", \"40s\", None),\n ],\n )\n\n def test_select_only_actions_for_cib_stonith(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_stonith_agent(self.fixture_actions)\n ),\n [self.op_fixture(\"monitor\", \"10s\", \"30s\")],\n )\n\n def test_select_only_necessary_actions_for_cib(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_agent(self.fixture_actions), necessary_only=True\n ),\n [self.op_fixture(\"monitor\", \"10s\", \"30s\")],\n )\n\n def test_select_only_necessary_actions_for_cib_stonith(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_stonith_agent(self.fixture_actions),\n necessary_only=True,\n ),\n [self.op_fixture(\"monitor\", \"10s\", \"30s\")],\n )\n\n def test_complete_monitor(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_agent(self.fixture_actions_meta_only),\n necessary_only=True,\n ),\n [self.op_fixture(\"monitor\", \"60s\", None)],\n )\n\n def test_complete_monitor_stonith(self):\n self.assertEqual(\n agent.get_default_operations(\n self.fixture_stonith_agent(self.fixture_actions_meta_only),\n necessary_only=True,\n ),\n [self.op_fixture(\"monitor\", \"60s\", None)],\n )"},"code_compressed":{"kind":"null"}}},{"rowIdx":353,"cells":{"id":{"kind":"number","value":353,"string":"353"},"code":{"kind":"string","value":"from __future__ import print_function\nimport IMP.test\nimport IMP.algebra\n\ndisplayit = False\nif displayit:\n import IMP.display\n\nfrom IMP.algebra import *\nimport pickle\n\n\nclass Tests(IMP.test.TestCase):\n\n def test_magnitude(self):\n \"\"\"Check dense log grid of ints\"\"\"\n print(\"construct\")\n bb = BoundingBox3D(Vector3D(1, 1, 1), Vector3D(15, 15, 15))\n sz = [5, 5, 5]\n le = LogEmbedding3D(bb, Vector3D(2.0, 2.0, 2.0), sz)\n g = DenseIntLogGrid3D(sz, le)\n bbo = g.get_bounding_box()\n print(bb, bbo)\n if displayit:\n w = IMP.display.PymolWriter(self.get_tmp_file_name(\"log.pym\"))\n bbg = IMP.display.BoundingBoxGeometry(bb)\n bbg.set_color(IMP.display.get_display_color(0))\n bbg.set_name(\"in\")\n w.add_geometry(bbg)\n bbog = IMP.display.BoundingBoxGeometry(bbo)\n bbog.set_color(IMP.display.get_display_color(1))\n bbog.set_name(\"out\")\n w.add_geometry(bbog)\n for i in range(0, sz[0]):\n for j in range(0, sz[0]):\n for k in range(0, sz[0]):\n ei = ExtendedGridIndex3D(i, j, k)\n gi = g.get_index(ei)\n bbi = g.get_bounding_box(ei)\n bbog = IMP.display.BoundingBoxGeometry(bbi)\n bbog.set_name(str(ei))\n w.add_geometry(bbog)\n cg = IMP.display.PointGeometry(g.get_center(ei))\n cg.set_name(\"center\")\n w.add_geometry(cg)\n self.assertAlmostEqual(bbo.get_corner(1)[0], 15, delta=.1)\n\n def METHOD_NAME(self):\n \"\"\"Test mixed log embedding\"\"\"\n eb = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(0, 0, 0),\n IMP.algebra.Vector3D(1, 1, 1),\n IMP.algebra.Vector3D(1, 2, 1))\n for i in range(0, 10):\n gi = IMP.algebra.ExtendedGridIndex3D([i, i, i])\n center = eb.get_center(gi)\n print(center)\n\n def test_default_embedding_pickle(self):\n \"\"\"Test (un-)pickle of DefaultEmbedding3D\"\"\"\n e1 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(1, 2, 3),\n IMP.algebra.Vector3D(2, 4, 5))\n e2 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(4, 5, 6),\n IMP.algebra.Vector3D(7, 8, 9))\n e2.foo = 'bar'\n dump = pickle.dumps((e1, e2))\n newe1, newe2 = pickle.loads(dump)\n\n self.assertLess(IMP.algebra.get_distance(\n e1.get_origin(), newe1.get_origin()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e2.get_origin(), newe2.get_origin()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4)\n self.assertEqual(newe2.foo, 'bar')\n\n self.assertRaises(TypeError, e1._set_from_binary, 42)\n\n def test_log_embedding_pickle(self):\n \"\"\"Test (un-)pickle of LogEmbedding3D\"\"\"\n e1 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(1, 2, 3),\n IMP.algebra.Vector3D(2, 4, 5),\n IMP.algebra.Vector3D(7, 8, 9))\n e2 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(4, 5, 6),\n IMP.algebra.Vector3D(7, 8, 9),\n IMP.algebra.Vector3D(17, 18, 19))\n e2.foo = 'bar'\n dump = pickle.dumps((e1, e2))\n newe1, newe2 = pickle.loads(dump)\n\n self.assertLess(IMP.algebra.get_distance(\n e1.get_origin(), newe1.get_origin()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e2.get_origin(), newe2.get_origin()), 1e-4)\n self.assertLess(IMP.algebra.get_distance(\n e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4)\n self.assertEqual(newe2.foo, 'bar')\n\n self.assertRaises(TypeError, e1._set_from_binary, 42)\n\n def test_grid_index_pickle(self):\n \"\"\"Test (un-)pickle of GridIndex3D\"\"\"\n g1 = IMP.algebra.GridIndex3D(1, 2, 3)\n g2 = IMP.algebra.GridIndex3D(4, 5, 6)\n g2.foo = 'bar'\n dump = pickle.dumps((g1, g2))\n newg1, newg2 = pickle.loads(dump)\n\n self.assertEqual(g1[0], newg1[0])\n self.assertEqual(g1[1], newg1[1])\n self.assertEqual(g1[2], newg1[2])\n self.assertEqual(g2[0], newg2[0])\n self.assertEqual(g2[1], newg2[1])\n self.assertEqual(g2[2], newg2[2])\n self.assertEqual(newg2.foo, 'bar')\n\n self.assertRaises(TypeError, g1._set_from_binary, 42)\n\n def test_extended_grid_index_pickle(self):\n \"\"\"Test (un-)pickle of ExtendedGridIndex3D\"\"\"\n g1 = IMP.algebra.ExtendedGridIndex3D(1, 2, 3)\n g2 = IMP.algebra.ExtendedGridIndex3D(4, 5, 6)\n g2.foo = 'bar'\n dump = pickle.dumps((g1, g2))\n newg1, newg2 = pickle.loads(dump)\n\n self.assertEqual(g1[0], newg1[0])\n self.assertEqual(g1[1], newg1[1])\n self.assertEqual(g1[2], newg1[2])\n self.assertEqual(g2[0], newg2[0])\n self.assertEqual(g2[1], newg2[1])\n self.assertEqual(g2[2], newg2[2])\n self.assertEqual(newg2.foo, 'bar')\n\n self.assertRaises(TypeError, g1._set_from_binary, 42)\n\n def test_unbounded_grid_range_pickle(self):\n \"\"\"Test (un-)pickle of UnboundedGridRange3D\"\"\"\n g1 = IMP.algebra.UnboundedGridRange3D()\n g2 = IMP.algebra.UnboundedGridRange3D()\n g2.foo = 'bar'\n dump = pickle.dumps((g1, g2))\n newg1, newg2 = pickle.loads(dump)\n\n self.assertEqual(newg2.foo, 'bar')\n\n self.assertRaises(TypeError, g1._set_from_binary, 42)\n\n def test_bounded_grid_range_pickle(self):\n \"\"\"Test (un-)pickle of BoundedGridRange3D\"\"\"\n g1 = IMP.algebra.BoundedGridRange3D([1, 2, 3])\n g2 = IMP.algebra.BoundedGridRange3D([4, 5, 6])\n g2.foo = 'bar'\n dump = pickle.dumps((g1, g2))\n newg1, newg2 = pickle.loads(dump)\n\n self.assertEqual(g1.get_end_index(), newg1.get_end_index())\n self.assertEqual(g2.get_end_index(), newg2.get_end_index())\n self.assertEqual(newg2.foo, 'bar')\n\n self.assertRaises(TypeError, g1._set_from_binary, 42)\n\n\nif __name__ == '__main__':\n IMP.test.main()"},"code_compressed":{"kind":"null"}}},{"rowIdx":354,"cells":{"id":{"kind":"number","value":354,"string":"354"},"code":{"kind":"string","value":"# Copyright (c) ZenML GmbH 2023. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Any, Optional\nfrom unittest.mock import MagicMock, patch\nfrom uuid import UUID, uuid4\n\nimport pytest\n\nfrom zenml.steps.external_artifact import ExternalArtifact\n\n\nclass MockClient:\n class MockArtifactResponse:\n def __init__(self, name):\n self.artifact_store_id = 42\n self.name = name\n self.id = 123\n\n class MockPipelineResponse:\n def __init__(self):\n self.last_successful_run = MagicMock()\n self.last_successful_run.artifacts = [\n MockClient.MockArtifactResponse(\"foo\"),\n MockClient.MockArtifactResponse(\"bar\"),\n ]\n\n def __init__(self, artifact_store_id=42):\n self.active_stack = MagicMock()\n self.active_stack.artifact_store.id = artifact_store_id\n self.active_stack.artifact_store.path = \"foo\"\n\n def get_artifact(self, *args, **kwargs):\n return MockClient.MockArtifactResponse(\"foo\")\n\n def get_pipeline(self, *args, **kwargs):\n return MockClient.MockPipelineResponse()\n\n\n@pytest.mark.parametrize(\n argnames=\"value,id,pipeline_name,artifact_name,exception_start\",\n argvalues=[\n [1, None, None, None, \"\"],\n [None, uuid4(), None, None, \"\"],\n [None, None, \"foo\", \"bar\", \"\"],\n [None, None, None, None, \"Either a value,\"],\n [1, uuid4(), None, None, \"Only a value,\"],\n [None, uuid4(), \"foo\", \"bar\", \"Only a value,\"],\n [1, None, \"foo\", \"bar\", \"Only a value,\"],\n [None, None, \"foo\", None, \"`pipeline_name` and `artifact_name`\"],\n [None, None, None, \"bar\", \"`pipeline_name` and `artifact_name`\"],\n ],\n ids=[\n \"good_by_value\",\n \"good_by_id\",\n \"good_by_pipeline_artifact\",\n \"bad_all_none\",\n \"bad_id_and_value\",\n \"bad_id_and_pipeline_artifact\",\n \"bad_value_and_pipeline_artifact\",\n \"bad_only_pipeline\",\n \"bad_only_artifact\",\n ],\n)\ndef test_external_artifact_init(\n value: Optional[Any],\n id: Optional[UUID],\n pipeline_name: Optional[str],\n artifact_name: Optional[str],\n exception_start: str,\n):\n \"\"\"Tests that initialization logic of `ExternalArtifact` works expectedly.\"\"\"\n if exception_start:\n with pytest.raises(ValueError, match=exception_start):\n ExternalArtifact(\n value=value,\n id=id,\n pipeline_name=pipeline_name,\n artifact_name=artifact_name,\n )\n else:\n ExternalArtifact(\n value=value,\n id=id,\n pipeline_name=pipeline_name,\n artifact_name=artifact_name,\n )\n\n\n@patch(\"zenml.steps.external_artifact.Client\")\n@patch(\"zenml.steps.external_artifact.fileio\")\n@patch(\"zenml.steps.external_artifact.artifact_utils\")\ndef test_upload_if_necessary_by_value(\n mocked_zenml_client,\n mocked_fileio,\n mocked_artifact_utils,\n):\n mocked_fileio.exists.return_value = False\n ea = ExternalArtifact(value=1)\n assert ea._id is None\n ea.upload_if_necessary()\n assert ea._id is not None\n assert ea._value is not None\n assert ea._pipeline_name is None\n assert ea._artifact_name is None\n\n\n@pytest.mark.skip\n@patch(\"zenml.steps.external_artifact.Client\")\ndef test_upload_if_necessary_by_id(mocked_zenml_client):\n mocked_zenml_client.return_value = MockClient()\n ea = ExternalArtifact(id=123)\n assert ea._value is None\n assert ea._pipeline_name is None\n assert ea._artifact_name is None\n assert ea._id is not None\n assert ea.upload_if_necessary() == 123\n\n\n@patch(\"zenml.steps.external_artifact.Client\")\ndef test_upload_if_necessary_by_pipeline_and_artifact(\n mocked_zenml_client,\n):\n mocked_zenml_client.return_value = MockClient()\n ea = ExternalArtifact(pipeline_name=\"foo\", artifact_name=\"bar\")\n assert ea._value is None\n assert ea._pipeline_name is not None\n assert ea._artifact_name is not None\n assert ea._id is None\n assert ea.upload_if_necessary() == 123\n assert ea._id == 123\n\n\n@patch(\"zenml.steps.external_artifact.Client\")\ndef test_upload_if_necessary_by_pipeline_and_artifact_other_artifact_store(\n mocked_zenml_client,\n):\n mocked_zenml_client.return_value = MockClient(artifact_store_id=45)\n with pytest.raises(RuntimeError, match=r\"The artifact bar \\(ID: 123\\)\"):\n ExternalArtifact(\n pipeline_name=\"foo\", artifact_name=\"bar\"\n ).upload_if_necessary()\n\n\n@patch(\"zenml.steps.external_artifact.Client\")\ndef METHOD_NAME(\n mocked_zenml_client,\n):\n mocked_zenml_client.return_value = MockClient()\n with pytest.raises(RuntimeError, match=\"Artifact with name `foobar`\"):\n ExternalArtifact(\n pipeline_name=\"foo\", artifact_name=\"foobar\"\n ).upload_if_necessary()"},"code_compressed":{"kind":"null"}}},{"rowIdx":355,"cells":{"id":{"kind":"number","value":355,"string":"355"},"code":{"kind":"string","value":"import numpy as np\nimport pytest\nimport torch\n\nfrom lhotse import AudioSource, CutSet, MultiCut, Recording, SupervisionSegment\nfrom lhotse.audio import RecordingSet\nfrom lhotse.cut import PaddingCut\nfrom lhotse.utils import fastcopy\n\n\n@pytest.fixture\ndef recording():\n return Recording.from_file(\"test/fixtures/libri/libri-1088-134315-0000_8ch.wav\")\n\n\n@pytest.fixture\ndef mono_rir():\n return Recording.from_file(\"test/fixtures/rir/sim_1ch.wav\")\n\n\n@pytest.fixture\ndef METHOD_NAME():\n return Recording.from_file(\"test/fixtures/rir/real_8ch.wav\")\n\n\n@pytest.fixture\ndef cut_with_supervision(recording, cut_channels=None, sup_channels=None):\n if cut_channels is None:\n cut_channels = [0, 1, 2, 3, 4, 5, 6, 7]\n if sup_channels is None:\n sup_channels = [0, 1, 2, 3, 4, 5, 6, 7]\n return MultiCut(\n id=\"cut\",\n start=0.0,\n duration=1.0,\n channel=cut_channels,\n supervisions=[\n SupervisionSegment(\n id=\"sup\",\n recording_id=\"rec\",\n start=0.0,\n duration=1.0,\n channel=sup_channels,\n )\n ],\n recording=recording,\n )\n\n\ndef test_cut_perturb_speed11(cut_with_supervision):\n cut_sp = cut_with_supervision.perturb_speed(1.1)\n assert cut_sp.start == 0.0\n assert cut_sp.duration == 0.9090625\n assert cut_sp.end == 0.9090625\n assert cut_sp.num_samples == 14545\n\n assert cut_sp.recording.duration == 14.5818125\n assert cut_sp.recording.num_samples == 233309\n\n assert cut_sp.supervisions[0].start == 0.0\n assert cut_sp.supervisions[0].duration == 0.9090625\n assert cut_sp.supervisions[0].end == 0.9090625\n\n cut_samples = cut_sp.load_audio()\n assert cut_samples.shape[0] == 8\n assert cut_samples.shape[1] == 14545\n\n recording_samples = cut_sp.recording.load_audio()\n assert recording_samples.shape[0] == 8\n assert recording_samples.shape[1] == 233309\n\n\ndef test_cut_perturb_speed09(cut_with_supervision):\n cut_sp = cut_with_supervision.perturb_speed(0.9)\n assert cut_sp.start == 0.0\n assert cut_sp.duration == 1.111125\n assert cut_sp.end == 1.111125\n assert cut_sp.num_samples == 17778\n\n assert cut_sp.recording.duration == 17.82225\n assert cut_sp.recording.num_samples == 285156\n\n assert cut_sp.supervisions[0].start == 0.0\n assert cut_sp.supervisions[0].duration == 1.111125\n assert cut_sp.supervisions[0].end == 1.111125\n\n cut_samples = cut_sp.load_audio()\n assert cut_samples.shape[0] == 8\n assert cut_samples.shape[1] == 17778\n\n recording_samples = cut_sp.recording.load_audio()\n assert recording_samples.shape[0] == 8\n assert recording_samples.shape[1] == 285156\n\n\ndef test_cut_perturb_tempo09(cut_with_supervision):\n cut_tp = cut_with_supervision.perturb_tempo(0.9)\n assert cut_tp.start == 0.0\n assert cut_tp.duration == 1.111125\n assert cut_tp.end == 1.111125\n assert cut_tp.num_samples == 17778\n\n assert cut_tp.recording.duration == 17.82225\n assert cut_tp.recording.num_samples == 285156\n\n assert cut_tp.supervisions[0].start == 0.0\n assert cut_tp.supervisions[0].duration == 1.111125\n assert cut_tp.supervisions[0].end == 1.111125\n\n cut_samples = cut_tp.load_audio()\n assert cut_samples.shape[0] == 8\n assert cut_samples.shape[1] == 17778\n\n recording_samples = cut_tp.recording.load_audio()\n assert recording_samples.shape[0] == 8\n assert recording_samples.shape[1] == 285156\n\n\ndef test_cut_perturb_tempo11(cut_with_supervision):\n cut_tp = cut_with_supervision.perturb_tempo(1.1)\n assert cut_tp.start == 0.0\n assert cut_tp.duration == 0.9090625\n assert cut_tp.end == 0.9090625\n assert cut_tp.num_samples == 14545\n\n assert cut_tp.recording.duration == 14.5818125\n assert cut_tp.recording.num_samples == 233309\n\n assert cut_tp.supervisions[0].start == 0.0\n assert cut_tp.supervisions[0].duration == 0.9090625\n assert cut_tp.supervisions[0].end == 0.9090625\n\n cut_samples = cut_tp.load_audio()\n assert cut_samples.shape[0] == 8\n assert cut_samples.shape[1] == 14545\n\n recording_samples = cut_tp.recording.load_audio()\n assert recording_samples.shape[0] == 8\n assert recording_samples.shape[1] == 233309\n\n\ndef test_resample_cut(cut_with_supervision):\n resampled = cut_with_supervision.resample(8000)\n assert cut_with_supervision.sampling_rate == 16000\n assert resampled.sampling_rate == 8000\n assert cut_with_supervision.num_samples == 2 * resampled.num_samples\n samples = resampled.load_audio()\n assert samples.shape[1] == resampled.num_samples\n\n\n@pytest.mark.parametrize(\"scale\", [0.125, 2.0])\ndef test_cut_perturb_volume(cut_with_supervision, scale):\n\n cut_vp = cut_with_supervision.perturb_volume(scale)\n assert cut_vp.start == cut_with_supervision.start\n assert cut_vp.duration == cut_with_supervision.duration\n assert cut_vp.end == cut_with_supervision.end\n assert cut_vp.num_samples == cut_with_supervision.num_samples\n\n assert cut_vp.recording.duration == cut_with_supervision.recording.duration\n assert cut_vp.recording.num_samples == cut_with_supervision.recording.num_samples\n\n assert cut_vp.supervisions[0].start == cut_with_supervision.supervisions[0].start\n assert (\n cut_vp.supervisions[0].duration == cut_with_supervision.supervisions[0].duration\n )\n assert cut_vp.supervisions[0].end == cut_with_supervision.supervisions[0].end\n\n assert cut_vp.load_audio().shape == cut_with_supervision.load_audio().shape\n assert (\n cut_vp.recording.load_audio().shape\n == cut_with_supervision.recording.load_audio().shape\n )\n\n np.testing.assert_array_almost_equal(\n cut_vp.load_audio(), cut_with_supervision.load_audio() * scale\n )\n np.testing.assert_array_almost_equal(\n cut_vp.recording.load_audio(),\n cut_with_supervision.recording.load_audio() * scale,\n )\n\n\n@pytest.mark.parametrize(\n \"rir, rir_channels, expected_channels\",\n [\n (\"mono_rir\", [0], [0, 1, 2, 3, 4, 5, 6, 7]),\n pytest.param(\"mono_rir\", [1], None, marks=pytest.mark.xfail),\n (\"multi_channel_rir\", [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]),\n (\"multi_channel_rir\", [0], [0, 1, 2, 3, 4, 5, 6, 7]),\n (\"multi_channel_rir\", [1], [0, 1, 2, 3, 4, 5, 6, 7]),\n pytest.param(\"multi_channel_rir\", [0, 1], None, marks=pytest.mark.xfail),\n ],\n)\ndef test_cut_reverb_rir(\n cut_with_supervision, rir, rir_channels, expected_channels, request\n):\n rir = request.getfixturevalue(rir)\n cut = cut_with_supervision\n cut_rvb = cut.reverb_rir(rir, rir_channels=rir_channels)\n print(cut_rvb.channel)\n assert cut_rvb.start == cut.start\n assert cut_rvb.duration == cut.duration\n assert cut_rvb.end == cut.end\n assert cut_rvb.num_samples == cut.num_samples\n\n assert cut_rvb.recording.duration == cut.recording.duration\n assert cut_rvb.recording.num_samples == cut.recording.num_samples\n\n assert cut_rvb.supervisions[0].start == cut.supervisions[0].start\n assert cut_rvb.supervisions[0].duration == cut.supervisions[0].duration\n assert cut_rvb.supervisions[0].end == cut.supervisions[0].end\n\n assert cut_rvb.load_audio().shape == cut.load_audio().shape\n assert cut_rvb.recording.load_audio().shape == cut.recording.load_audio().shape\n\n assert cut_rvb.channel == expected_channels\n\n\ndef test_cut_reverb_fast_rir(cut_with_supervision):\n cut = cut_with_supervision\n with pytest.raises(AssertionError):\n cut_rvb = cut.reverb_rir(rir_recording=None)"},"code_compressed":{"kind":"null"}}},{"rowIdx":356,"cells":{"id":{"kind":"number","value":356,"string":"356"},"code":{"kind":"string","value":"################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2016-2020 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n################################################################################\n\nimport logging\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import QueryDict\nfrom django.utils.translation import gettext\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import ngettext\n\nfrom creme.documents import get_document_model\n\nfrom ..forms.mass_import import form_factory, get_header\nfrom ..models import MassImportJobResult\nfrom ..utils.translation import get_model_verbose_name\nfrom .base import JobProgress, JobType\n\nlogger = logging.getLogger(__name__)\n\n\nclass _MassImportType(JobType):\n id = JobType.generate_id('creme_core', 'mass_import')\n verbose_name = _('Mass import')\n\n def _build_POST(self, job_data):\n return QueryDict(job_data['POST'].encode('utf8'))\n\n def _get_document(self, POST):\n return get_document_model().objects.get(id=POST['document'])\n\n def _get_ctype(self, job_data):\n return ContentType.objects.get_for_id(job_data['ctype'])\n\n def _execute(self, job):\n job_data = job.data\n POST = self._build_POST(job_data)\n doc = self._get_document(POST)\n header = get_header(doc.filedata, has_header='has_header' in POST)\n form_class = form_factory(self._get_ctype(job_data), header)\n form = form_class(user=job.user, data=POST)\n\n if not form.is_valid():\n # TODO: unit test\n raise self.Error(\n gettext('Invalid data [{}]').format(form.errors.as_text())\n )\n\n form.process(job)\n\n def progress(self, job):\n count = MassImportJobResult.objects.filter(job=job).count()\n return JobProgress(\n percentage=None,\n label=ngettext(\n '{count} line has been processed.',\n '{count} lines have been processed.',\n count\n ).format(count=count)\n )\n\n @property\n def results_bricks(self):\n from ..bricks import MassImportJobErrorsBrick\n return [MassImportJobErrorsBrick()]\n\n def METHOD_NAME(self, job):\n try:\n job_data = job.data\n desc = [\n gettext('Import «{model}» from {doc}').format(\n model=self._get_ctype(job_data).model_class()._meta.verbose_name,\n doc=self._get_document(self._build_POST(job_data)),\n ),\n ]\n except Exception: # TODO: unit test\n logger.exception('Error in _MassImportType.get_description')\n desc = ['?']\n\n return desc\n\n def get_stats(self, job):\n stats = []\n\n result_qs = MassImportJobResult.objects.filter(job=job)\n lines_count = result_qs.count()\n\n entity_result_qs = result_qs.filter(entity__isnull=False)\n created_count = entity_result_qs.filter(updated=False).count()\n updated_count = entity_result_qs.filter(updated=True).count()\n\n model = self._get_ctype(job.data).model_class()\n\n if created_count:\n stats.append(\n ngettext(\n '{count} «{model}» has been created.',\n '{count} «{model}» have been created.',\n created_count\n ).format(\n count=created_count,\n model=get_model_verbose_name(model, created_count),\n )\n )\n elif updated_count != lines_count:\n stats.append(\n gettext('No «{model}» has been created.').format(\n model=model._meta.verbose_name,\n )\n )\n\n if updated_count:\n stats.append(\n ngettext(\n '{count} «{model}» has been updated.',\n '{count} «{model}» have been updated.',\n updated_count\n ).format(\n count=updated_count,\n model=get_model_verbose_name(model, updated_count),\n )\n )\n elif created_count != lines_count:\n stats.append(\n gettext('No «{model}» has been updated.').format(\n model=model._meta.verbose_name,\n )\n )\n\n stats.append(\n ngettext(\n '{count} line in the file.',\n '{count} lines in the file.',\n lines_count,\n ).format(count=lines_count)\n )\n\n return stats\n\n\nmass_import_type = _MassImportType()"},"code_compressed":{"kind":"null"}}},{"rowIdx":357,"cells":{"id":{"kind":"number","value":357,"string":"357"},"code":{"kind":"string","value":"from __future__ import absolute_import\nfrom six.moves import xrange\nfrom argparse import ArgumentParser\nimport os\nimport logging\nimport random\n\nfrom toil.common import Toil\nfrom toil.job import Job\n\n\ndef setup(job, input_file_id, n, down_checkpoints):\n \"\"\"Sets up the sort.\n Returns the FileID of the sorted file\n \"\"\"\n # Write the input file to the file store\n job.fileStore.logToMaster(\"Starting the merge sort\")\n return job.addChildJobFn(down,\n input_file_id, n,\n down_checkpoints=down_checkpoints,\n memory='600M').rv()\n\n\ndef down(job, input_file_id, n, down_checkpoints):\n \"\"\"Input is a file and a range into that file to sort and an output location in which\n to write the sorted file.\n If the range is larger than a threshold N the range is divided recursively and\n a follow on job is then created which merges back the results. Otherwise,\n the file is sorted and placed in the output.\n \"\"\"\n # Read the file\n input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)\n length = os.path.getsize(input_file)\n if length > n:\n # We will subdivide the file\n job.fileStore.logToMaster(\"Splitting file: %s of size: %s\"\n % (input_file_id, length), level=logging.CRITICAL)\n # Split the file into two copies\n mid_point = get_midpoint(input_file, 0, length)\n t1 = job.fileStore.getLocalTempFile()\n with open(t1, 'w') as fH:\n copy_subrange_of_file(input_file, 0, mid_point + 1, fH)\n t2 = job.fileStore.getLocalTempFile()\n with open(t2, 'w') as fH:\n copy_subrange_of_file(input_file, mid_point + 1, length, fH)\n\n # Call the down function recursively\n return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n,\n down_checkpoints=down_checkpoints, memory='600M').rv(),\n job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n,\n down_checkpoints=down_checkpoints,\n memory='600M').rv()).rv()\n else:\n # We can sort this bit of the file\n job.fileStore.logToMaster(\"Sorting file: %s of size: %s\"\n % (input_file_id, length), level=logging.CRITICAL)\n # Sort the copy and write back to the fileStore\n output_file = job.fileStore.getLocalTempFile()\n sort(input_file, output_file)\n return job.fileStore.writeGlobalFile(output_file)\n\n\ndef up(job, input_file_id_1, input_file_id_2):\n \"\"\"Merges the two files and places them in the output.\n \"\"\"\n with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id):\n with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1:\n with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2:\n job.fileStore.logToMaster(\"Merging %s and %s to %s\"\n % (input_file_id_1, input_file_id_2, output_id))\n METHOD_NAME(inputFileHandle1, inputFileHandle2, fileHandle)\n\n # Cleanup up the input files - these deletes will occur after the completion is successful.\n job.fileStore.deleteGlobalFile(input_file_id_1)\n job.fileStore.deleteGlobalFile(input_file_id_2)\n return output_id\n\n\n# convenience functions\ndef sort(in_file, out_file):\n \"\"\"Sorts the given file.\n \"\"\"\n filehandle = open(in_file, 'r')\n lines = filehandle.readlines()\n filehandle.close()\n lines.sort()\n filehandle = open(out_file, 'w')\n for line in lines:\n filehandle.write(line)\n filehandle.close()\n\n\ndef METHOD_NAME(filehandle_1, filehandle_2, output_filehandle):\n \"\"\"Merges together two files maintaining sorted order.\n \"\"\"\n line2 = filehandle_2.readline()\n for line1 in filehandle_1.readlines():\n while line2 != '' and line2 <= line1:\n output_filehandle.write(line2)\n line2 = filehandle_2.readline()\n output_filehandle.write(line1)\n while line2 != '':\n output_filehandle.write(line2)\n line2 = filehandle_2.readline()\n\n\ndef copy_subrange_of_file(input_file, file_start, file_end, output_filehandle):\n \"\"\"Copies the range (in bytes) between fileStart and fileEnd to the given\n output file handle.\n \"\"\"\n with open(input_file, 'r') as fileHandle:\n fileHandle.seek(file_start)\n data = fileHandle.read(file_end - file_start)\n assert len(data) == file_end - file_start\n output_filehandle.write(data)\n\n\ndef get_midpoint(file, file_start, file_end):\n \"\"\"Finds the point in the file to split.\n Returns an int i such that fileStart <= i < fileEnd\n \"\"\"\n filehandle = open(file, 'r')\n mid_point = (file_start + file_end) / 2\n assert mid_point >= file_start\n filehandle.seek(mid_point)\n line = filehandle.readline()\n assert len(line) >= 1\n if len(line) + mid_point < file_end:\n return mid_point + len(line) - 1\n filehandle.seek(file_start)\n line = filehandle.readline()\n assert len(line) >= 1\n assert len(line) + file_start <= file_end\n return len(line) + file_start - 1\n\n\ndef make_file_to_sort(file_name, lines, line_length):\n with open(file_name, 'w') as fileHandle:\n for _ in xrange(lines):\n line = \"\".join(random.choice('actgACTGNXYZ') for _ in xrange(line_length - 1)) + '\\n'\n fileHandle.write(line)\n\n\ndef main():\n parser = ArgumentParser()\n Job.Runner.addToilOptions(parser)\n\n parser.add_argument('--num-lines', default=1000, help='Number of lines in file to sort.', type=int)\n parser.add_argument('--line-length', default=50, help='Length of lines in file to sort.', type=int)\n parser.add_argument(\"--N\",\n help=\"The threshold below which a serial sort function is used to sort file. \"\n \"All lines must of length less than or equal to N or program will fail\",\n default=10000)\n\n options = parser.parse_args()\n\n if int(options.N) <= 0:\n raise RuntimeError(\"Invalid value of N: %s\" % options.N)\n\n file_name = 'file_to_sort.txt'\n make_file_to_sort(file_name=file_name, lines=options.num_lines, line_length=options.line_length)\n\n with Toil(options) as toil:\n sort_file_url = 'file://' + os.path.abspath('file_to_sort.txt')\n if not toil.options.restart:\n sort_file_id = toil.importFile(sort_file_url)\n sorted_file_id = toil.start(Job.wrapJobFn(setup, sort_file_id, int(options.N), False, memory='600M'))\n else:\n sorted_file_id = toil.restart()\n toil.exportFile(sorted_file_id, sort_file_url)\n\nif __name__ == '__main__':\n main()"},"code_compressed":{"kind":"null"}}},{"rowIdx":358,"cells":{"id":{"kind":"number","value":358,"string":"358"},"code":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom rest_framework import generics\nfrom rest_framework import permissions\nfrom rest_framework.exceptions import NotFound\n\nfrom api.actions.serializers import PreprintRequestActionSerializer\nfrom api.base.views import JSONAPIBaseView\nfrom api.base import permissions as base_permissions\nfrom api.base.filters import ListFilterMixin\nfrom api.base.utils import get_object_or_error\nfrom api.requests.permissions import NodeRequestPermission, PreprintRequestPermission\nfrom api.requests.serializers import NodeRequestSerializer, PreprintRequestSerializer\nfrom framework.auth.oauth_scopes import CoreScopes\nfrom osf.models import Node, NodeRequest, PreprintRequest, Preprint\n\n\nclass RequestMixin(object):\n serializer_class = None\n request_class = None\n request_display_name = None\n target_class = None\n target_display_name = None\n target_lookup_url_kwarg = None\n request_lookup_url_kwarg = None\n\n def __get_object(self, object_class, lookup_arg, display_name, check_object_permissions=True):\n obj = get_object_or_error(\n object_class,\n self.kwargs[lookup_arg],\n self.request,\n display_name=display_name,\n )\n\n # May raise a permission denied\n if check_object_permissions:\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def get_request(self, check_object_permissions=True):\n return self.__get_object(self.request_class, self.request_lookup_url_kwarg, self.request_display_name, check_object_permissions=check_object_permissions)\n\n def get_target(self, check_object_permissions=True):\n return self.__get_object(self.target_class, self.target_lookup_url_kwarg, self.target_display_name, check_object_permissions=check_object_permissions)\n\n\nclass NodeRequestMixin(RequestMixin):\n serializer_class = NodeRequestSerializer\n request_class = NodeRequest\n request_display_name = 'node request'\n target_class = Node\n target_display_name = 'node'\n target_lookup_url_kwarg = 'node_id'\n request_lookup_url_kwarg = 'request_id'\n\n\nclass PreprintRequestMixin(RequestMixin):\n serializer_class = PreprintRequestSerializer\n request_class = PreprintRequest\n request_display_name = 'preprint request'\n target_class = Preprint\n target_display_name = 'preprint'\n target_lookup_url_kwarg = 'preprint_id'\n request_lookup_url_kwarg = 'request_id'\n\n\nclass RequestDetail(JSONAPIBaseView, generics.RetrieveAPIView):\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n base_permissions.TokenHasScope,\n )\n\n required_read_scopes = [CoreScopes.ALWAYS_PUBLIC] # Actual scope checks are done on subview.as_view\n required_write_scopes = [CoreScopes.NULL]\n view_category = 'requests'\n view_name = 'request-detail'\n\n def get(self, request, *args, **kwargs):\n request_id = self.kwargs['request_id']\n if NodeRequest.objects.filter(_id=request_id).exists():\n return NodeRequestDetail.as_view()(request._request, *args, **kwargs)\n elif PreprintRequest.objects.filter(_id=request_id).exists():\n return PreprintRequestDetail.as_view()(request._request, *args, **kwargs)\n else:\n raise NotFound\n\nclass NodeRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeRequestMixin):\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n base_permissions.TokenHasScope,\n NodeRequestPermission,\n )\n\n required_read_scopes = [CoreScopes.NODE_REQUESTS_READ]\n required_write_scopes = [CoreScopes.NULL]\n\n serializer_class = NodeRequestSerializer\n\n view_category = 'requests'\n view_name = 'node-request-detail'\n\n def get_object(self):\n return self.get_request()\n\nclass PreprintRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintRequestMixin):\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n base_permissions.TokenHasScope,\n PreprintRequestPermission,\n )\n\n required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ]\n required_write_scopes = [CoreScopes.NULL]\n\n serializer_class = PreprintRequestSerializer\n\n view_category = 'requests'\n view_name = 'preprint-request-detail'\n\n def get_object(self):\n return self.get_request()\n\nclass RequestActionList(JSONAPIBaseView, generics.ListAPIView):\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n base_permissions.TokenHasScope,\n )\n\n required_read_scopes = [CoreScopes.ACTIONS_READ]\n required_write_scopes = [CoreScopes.NULL]\n\n view_category = 'requests'\n view_name = 'request-action-list'\n\n def get(self, request, *args, **kwargs):\n request_id = self.kwargs['request_id']\n if PreprintRequest.objects.filter(_id=request_id).exists():\n return PreprintRequestActionList.as_view()(request._request, *args, **kwargs)\n else:\n raise NotFound\n\nclass PreprintRequestActionList(JSONAPIBaseView, generics.ListAPIView, PreprintRequestMixin, ListFilterMixin):\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n base_permissions.TokenHasScope,\n PreprintRequestPermission,\n )\n\n required_read_scopes = [CoreScopes.ACTIONS_READ]\n required_write_scopes = [CoreScopes.NULL]\n\n serializer_class = PreprintRequestActionSerializer\n\n view_category = 'requests'\n view_name = 'preprint-request-action-list'\n\n # supports MustBeModerator\n def METHOD_NAME(self):\n request_id = self.kwargs['request_id']\n preprint_request = PreprintRequest.load(request_id)\n if preprint_request:\n return preprint_request.target.provider\n raise NotFound\n\n # overrides ListFilterMixin\n def get_default_queryset(self):\n return self.get_request().actions.order_by('-created').all()\n\n # overrides ListAPIView\n def get_queryset(self):\n return self.get_queryset_from_request()"},"code_compressed":{"kind":"null"}}},{"rowIdx":359,"cells":{"id":{"kind":"number","value":359,"string":"359"},"code":{"kind":"string","value":"import hashlib\nimport math\nimport operator\nimport re\nfrom collections import OrderedDict\nfrom typing import Dict, Iterable, List, Optional, Set\n\nfrom click import UsageError\nfrom pygitguardian.models import Match, PolicyBreak, ScanResult\n\nfrom ggshield.core.types import IgnoredMatch\n\n\nREGEX_MATCH_HIDE = re.compile(r\"[^+\\-\\s]\")\nREGEX_SPECIAL_CHARS = set(\".^$+*?{}()[]\\\\|\")\nINVALID_PATTERNS_REGEX = re.compile(\n r\"(\\*\\*\\*)\" # the \"***\" sequence is not valid\n r\"|(\\*\\*[^/])\" # a \"**\" sequence must be immediately followed by a \"/\"\n r\"|([^/]\\*\\*)\" # a \"**\" sequence must be either at the start of the string or\n # immediately preceded by a \"/\"\n)\n\nMAXIMUM_CENSOR_LENGTH = 60\n\n\ndef is_ignored(\n policy_break: PolicyBreak,\n matches_ignore: Iterable[IgnoredMatch],\n) -> bool:\n \"\"\"\n is_ignored checks if a occurrence is ignored.\n There are 2 ways of ignoring a occurrence:\n - matching the occurrence sha\n - matching one of the match.match values\n\n :param policy_break: Policy Break occurrence to judge\n :param matches_ignore: Iterable of match ignores\n :return: True if ignored\n \"\"\"\n\n matches = [match.match for match in matches_ignore]\n if policy_break.policy.lower() != \"secrets detection\":\n return True\n if get_ignore_sha(policy_break) in matches or any(\n match.match in matches for match in policy_break.matches\n ):\n return True\n return False\n\n\ndef remove_ignored_from_result(\n scan_result: ScanResult, matches_ignore: Iterable[IgnoredMatch]\n) -> None:\n \"\"\"\n remove_ignored removes occurrences from a Scan Result based on a sha\n made from its matches.\n\n :param scan_result: ScanResult to filter\n :param matches_ignore: match SHAs or plaintext matches to filter out\n \"\"\"\n\n scan_result.policy_breaks = [\n policy_break\n for policy_break in scan_result.policy_breaks\n if not is_ignored(policy_break, matches_ignore)\n ]\n\n scan_result.policy_break_count = len(scan_result.policy_breaks)\n\n\ndef remove_results_from_ignore_detectors(\n scan_result: ScanResult,\n ignored_detectors: Optional[Set[str]] = None,\n) -> None:\n if not ignored_detectors:\n return\n\n scan_result.policy_breaks = [\n policy_break\n for policy_break in scan_result.policy_breaks\n if policy_break.break_type not in ignored_detectors\n ]\n\n scan_result.policy_break_count = len(scan_result.policy_breaks)\n\n\ndef get_ignore_sha(policy_break: PolicyBreak) -> str:\n hashable = \"\".join(\n [\n f\"{match.match},{match.match_type}\"\n for match in sorted(\n policy_break.matches, key=operator.attrgetter(\"match_type\")\n )\n ]\n )\n\n return hashlib.sha256(hashable.encode(\"UTF-8\")).hexdigest()\n\n\ndef leak_dictionary_by_ignore_sha(\n policy_breaks: List[PolicyBreak],\n) -> Dict[str, List[PolicyBreak]]:\n \"\"\"\n leak_dictionary_by_ignore_sha sorts matches and incidents by\n first appearance in file.\n\n sort incidents by first appearance on file,\n file wide matches have no index\n so give it -1 so they get bumped to the top\n\n :return: Dictionary with line number as index and a list of\n matches that start on said line.\n \"\"\"\n policy_breaks.sort(\n key=lambda x: min( # type: ignore\n match.index_start if match.index_start else -1 for match in x.matches\n )\n )\n sha_dict: Dict[str, List[PolicyBreak]] = OrderedDict()\n for policy_break in policy_breaks:\n policy_break.matches.sort(key=lambda x: x.index_start if x.index_start else -1)\n ignore_sha = get_ignore_sha(policy_break)\n sha_dict.setdefault(ignore_sha, []).append(policy_break)\n\n return sha_dict\n\n\ndef translate_user_pattern(pattern: str) -> str:\n \"\"\"\n Translate the user pattern into a regex. This function assumes that the given\n pattern is valid and has been normalized beforehand.\n \"\"\"\n\n # Escape each special character\n pattern = \"\".join(\n f\"\\\\{char}\" if char in REGEX_SPECIAL_CHARS else char for char in pattern\n )\n\n # Handle start/end of pattern\n if pattern[-1] != \"/\":\n pattern += \"$\"\n if pattern[0] == \"/\":\n pattern = \"^\" + pattern[1:]\n else:\n pattern = \"(^|/)\" + pattern\n\n # Replace * and ** sequences\n pattern = re.sub(r\"\\\\\\*\\\\\\*/\", \"([^/]+/)*\", pattern)\n pattern = re.sub(r\"\\\\\\*\", \"([^/]+)\", pattern)\n\n return pattern\n\n\ndef METHOD_NAME(pattern: str) -> bool:\n return bool(pattern) and not INVALID_PATTERNS_REGEX.search(pattern)\n\n\ndef init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]:\n \"\"\"\n filter_set creates a set of paths of the ignored\n entries from 3 sources:\n .gitguardian.yaml\n files in .git\n files ignore in .gitignore\n \"\"\"\n res = set()\n for path in paths_ignore:\n if not METHOD_NAME(path):\n raise UsageError(f\"{path} is not a valid exclude pattern.\")\n res.add(re.compile(translate_user_pattern(path)))\n return res\n\n\ndef censor_string(text: str) -> str:\n \"\"\"\n Censor a string (usually a secret), revealing only the first and last\n 1/6th of the match up to a maximum of MAXIMUM_CENSOR_LENGTH.\n\n :return: the text censored\n \"\"\"\n len_match = len(text)\n start_privy_len = min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH)\n end_privy_len = len_match - min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH)\n\n censored = REGEX_MATCH_HIDE.sub(\"*\", text)\n\n return str(\n text[:start_privy_len]\n + censored[start_privy_len:end_privy_len]\n + text[end_privy_len:]\n )\n\n\ndef censor_match(match: Match) -> str:\n return censor_string(match.match)\n\n\ndef censor_content(content: str, policy_breaks: List[PolicyBreak]) -> str:\n for policy_break in policy_breaks:\n for match in policy_break.matches:\n if match.index_start is None:\n continue\n\n match.match = censor_match(match)\n\n content = \"\".join(\n (\n content[: match.index_start],\n match.match,\n content[len(match.match) + match.index_start :],\n )\n )\n return content"},"code_compressed":{"kind":"null"}}},{"rowIdx":360,"cells":{"id":{"kind":"number","value":360,"string":"360"},"code":{"kind":"string","value":"from datetime import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom elasticsearch.exceptions import NotFoundError\nimport pytz\n\n\nclass MetricMixin(object):\n\n @classmethod\n def _get_all_indices(cls):\n all_aliases = cls._index.get_alias()\n indices = set()\n for index, aliases in all_aliases.items():\n indices.add(index)\n if aliases['aliases']:\n for alias in aliases['aliases'].keys():\n indices.add(alias)\n return indices\n\n @classmethod\n def _get_relevant_indices(cls, after, before):\n # NOTE: This will only work for yearly indices. This logic\n # will need to be updated if we change to monthly or daily indices\n if before and after:\n year_range = range(after.year, before.year + 1)\n elif after:\n year_range = range(after.year, timezone.now().year + 1)\n else:\n # No metric data from before 2013\n year_range = range(2013, before.year + 1)\n all_indices = cls._get_all_indices()\n relevant_indices = [\n # get_index_name takes a datetime, so get Jan 1 for each relevant year\n cls.get_index_name(datetime(year, 1, 1, tzinfo=pytz.utc))\n for year in year_range\n ]\n return [index for index in relevant_indices if index in all_indices]\n\n @classmethod\n def _get_id_to_count(cls, size, metric_field, count_field, after=None, before=None):\n \"\"\"Performs the elasticsearch aggregation for get_top_by_count. Return a\n dict mapping ids to summed counts. If there's no data in the ES index, return None.\n \"\"\"\n search = cls.search(after=after, before=before)\n timestamp = {}\n if after:\n timestamp['gte'] = after\n if before:\n timestamp['lt'] = before\n if timestamp:\n search = search.filter('range', timestamp=timestamp)\n search.aggs.\\\n bucket('by_id', 'terms', field=metric_field, size=size, order={'sum_count': 'desc'}).\\\n metric('sum_count', 'sum', field=count_field)\n # Optimization: set size to 0 so that hits aren't returned (we only care about the aggregation)\n search = search.extra(size=0)\n try:\n response = search.execute()\n except NotFoundError:\n # _get_relevant_indices returned 1 or more indices\n # that doesn't exist. Fall back to unoptimized query\n search = search.index().index(cls._default_index())\n response = search.execute()\n # No indexed data\n if not hasattr(response.aggregations, 'by_id'):\n return None\n buckets = response.aggregations.by_id.buckets\n # Map _id => count\n return {\n bucket.key: int(bucket.sum_count.value)\n for bucket in buckets\n }\n\n # Overrides Document.search to only search relevant\n # indices, determined from `after`\n @classmethod\n def search(cls, using=None, index=None, after=None, before=None, *args, **kwargs):\n if not index and (before or after):\n indices = cls._get_relevant_indices(after, before)\n index = ','.join(indices)\n return super(MetricMixin, cls).search(using=using, index=index, *args, **kwargs)\n\n @classmethod\n def METHOD_NAME(cls, qs, model_field, metric_field,\n size, order_by=None,\n count_field='count',\n annotation='metric_count',\n after=None, before=None):\n \"\"\"Return a queryset annotated with the metric counts for each item.\n\n Example: ::\n\n # Get the top 10 PreprintProviders by download count\n top_providers = PreprintDownload.get_top_by_count(\n qs=PreprintProvider.objects.all(),\n model_field='_id',\n metric_field='provider_id',\n annotation='download_count',\n size=10\n )\n\n for each in top_providers:\n print('{}: {}'.format(each._id, each.download_count))\n\n ``size`` determines the number of buckets returned by the aggregation.\n If ``size=None``, the size of the queryset is used.\n WARNING: Be careful when using size=None when using a large queryset.\n\n :param QuerySet qs: The initial queryset to annotate\n :param str model_field: Model field that corresponds to ``metric_field``.\n :param str metric_field: Metric field that corresponds to ``model_field``.\n :param int size: Size of the aggregation. Also determines the size of the final\n queryset.\n :param str order_by: Field to order queryset by. If `None`, orders by\n the metric, descending.\n :param datetime after: Minimum datetime to narrow the search (inclusive).\n :param datetime before: Maximum datetime to narrow the search (exclusive).\n :param str count_field: Name of the field where count values are stored.\n :param str annotation: Name of the annotation.\n \"\"\"\n id_to_count = cls._get_id_to_count(\n size=size or qs.count(),\n metric_field=metric_field,\n count_field=count_field,\n after=after,\n before=before\n )\n if id_to_count is None:\n return qs.annotate(**{annotation: models.Value(0, models.IntegerField())})\n # Annotate the queryset with the counts for each id\n # https://stackoverflow.com/a/48187723/1157536\n whens = [\n models.When(**{\n model_field: k,\n 'then': v,\n }) for k, v in id_to_count.items()\n ]\n # By default order by annotation, desc\n order_by = order_by or '-{}'.format(annotation)\n return qs.annotate(**{\n annotation: models.Case(*whens, default=0, output_field=models.IntegerField())\n }).order_by(order_by)"},"code_compressed":{"kind":"null"}}},{"rowIdx":361,"cells":{"id":{"kind":"number","value":361,"string":"361"},"code":{"kind":"string","value":"from methods.regular.regular_api import *\nfrom methods.task.task_template.task_template_launch_handler import TaskTemplateLauncherThread\nfrom methods.sync_events.sync_actions_handler import SyncActionsHandlerThread\nfrom methods.action.action_flow_trigger_queue import ActionFlowTriggerQueueProcess\nfrom shared.ingest.packet import enqueue_packet\n\n\n@routes.route('/api/walrus/v1/interservice/receive',\n methods = ['POST'])\ndef METHOD_NAME():\n \"\"\"\n Inter-Service route to notify of new job launch\n\n For now relies on inter_service_security_token for permissions...\n\n This is just a starting point for more generic inter service notification\n Pros/Cons to having DB as intermediary point there, fo now\n this is fairly light weight.\n \n Once we have a good pattern here, eg retry/overflow handling,\n can probably remove polling / thread\n\n \"\"\"\n spec_list = [{\"inter_service_security_token\": {\n 'kind': str,\n 'required': True,\n 'security_token': settings.INTER_SERVICE_SECRET\n }\n },\n {\"message\": {\n 'kind': str,\n 'required': True\n }\n },\n {\"id\": { # or \"base_class_id\"?\n 'kind': int,\n 'required': False,\n 'default': None\n }\n },\n {\"extra_params\": {\n 'kind': dict,\n 'required': False,\n 'default': None\n }\n },\n {\"base_class_string\": {\n 'kind': str,\n 'required': False,\n 'default': None\n }\n },\n {\"project_string_id\": {\n 'kind': str,\n 'required': False,\n 'default': None\n }\n }\n # Serialized object maybe?\n ]\n\n log, input_from_request, untrusted_input = regular_input.master(request = request, spec_list = spec_list)\n\n if len(log[\"error\"].keys()) >= 1:\n return jsonify(log = log), 400\n\n logger.info(\"Received valid inter service request\")\n\n with sessionMaker.session_scope() as session:\n\n # CAUTIONS\n # Generally assumes any calls here are non blocking\n # So as to reasonably return \n\n # eg 1) Condition on message then some_launcher(event_id = input['id'])\n\n # Or 2) if we want object here for some reason, something like:\n # if input['base_class_string']:\n # base_object = getattr(sys.modules[__name__], input['base_class_string']).get_by_id(\n # id = input['id'],\n # session = session)\n\n if input_from_request['message'] == 'new_job_launch_queue_item':\n job_launcher_thread = TaskTemplateLauncherThread(run_once = True)\n log['info']['job_launcher_thread'] = True\n if input_from_request['message'] == 'new_sync_action_item':\n sync_action_thread = SyncActionsHandlerThread(run_once = True)\n log['info']['job_launcher_thread'] = True\n\n if input_from_request['message'] == 'new_action_flow_queue_item':\n\n num_flows = ActionFlowTriggerQueueProcess.try_to_enqueue_new_action_flows(\n session = session,\n event_id = input_from_request['id'],\n commit_per_element = True)\n for i in range(0, num_flows):\n action_flow_thread = ActionFlowTriggerQueueProcess(run_once = True)\n\n if input_from_request['message'] == 'file_copy':\n enqueue_packet(project_string_id = input_from_request.get('project_string_id'),\n session = session,\n media_url = None,\n media_type = input_from_request['extra_params'].get('type'),\n directory_id = input_from_request['extra_params'].get('destination_working_dir_id'),\n source_directory_id = input_from_request['extra_params'].get('source_working_dir_id'),\n remove_link = input_from_request['extra_params'].get('remove_link'),\n add_link = input_from_request['extra_params'].get('add_link'),\n copy_instance_list = input_from_request['extra_params'].get('copy_instance_list'),\n job_id = None,\n batch_id = input_from_request['extra_params'].get('batch_id'),\n file_id = input_from_request['id'],\n instance_list = [],\n video_parent_length = input_from_request['extra_params'].get('frame_count'),\n task_id = None,\n mode = 'copy_file',\n commit_input = True)\n\n log['success'] = True\n return jsonify(log = log), 200"},"code_compressed":{"kind":"null"}}},{"rowIdx":362,"cells":{"id":{"kind":"number","value":362,"string":"362"},"code":{"kind":"string","value":"# coding=utf-8\n# Copyright 2018-2023 EvaDB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport re\nimport shutil\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom evadb.catalog.models.table_catalog import TableCatalogEntry\nfrom evadb.database import EvaDBDatabase\nfrom evadb.models.storage.batch import Batch\nfrom evadb.parser.table_ref import TableInfo\nfrom evadb.storage.abstract_storage_engine import AbstractStorageEngine\nfrom evadb.storage.sqlite_storage_engine import SQLStorageEngine\nfrom evadb.utils.logging_manager import logger\n\n\nclass AbstractMediaStorageEngine(AbstractStorageEngine):\n def __init__(self, db: EvaDBDatabase):\n super().__init__(db)\n self._rdb_handler: SQLStorageEngine = SQLStorageEngine(db)\n\n def METHOD_NAME(self, table: TableCatalogEntry):\n return self.db.catalog().get_multimedia_metadata_table_catalog_entry(table)\n\n def _create_metadata_table(self, table: TableCatalogEntry):\n return (\n self.db.catalog().create_and_insert_multimedia_metadata_table_catalog_entry(\n table\n )\n )\n\n def _xform_file_url_to_file_name(self, file_url: Path) -> str:\n # Convert media_path to file name. This is done to support duplicate media_names with\n # different complete paths. Without conversion, we cannot copy files with same name but\n # different paths. Eg., a/b/my.mp4 and a/b/c/my.mp4.\n # xformed_file_name = zlib.crc32(str(file_url).encode(\"utf-8\")) & 0xFFFFFFFF\n # return str(xformed_file_name)\n\n # Previous approach with hashing is commented out above. Since we now use symbolic link, the only\n # thing we need to worry about is the same file name under different directory. This motivates us\n # to just breakdown directory also as part of file name. Additionally, it does not use hashing,\n # which avoids computation overhead.\n file_path_str = str(file_url)\n file_path = re.sub(r\"[^a-zA-Z0-9 \\.\\n]\", \"_\", file_path_str)\n return file_path\n\n def create(self, table: TableCatalogEntry, if_not_exists=True):\n \"\"\"\n Create the directory to store the images.\n Create a sqlite table to persist the file urls\n \"\"\"\n dir_path = Path(table.file_url)\n try:\n dir_path.mkdir(parents=True)\n except FileExistsError:\n if if_not_exists:\n return True\n error = \"Failed to load the image as directory \\\n already exists: {}\".format(\n dir_path\n )\n logger.error(error)\n raise FileExistsError(error)\n\n self._rdb_handler.create(self._create_metadata_table(table))\n return True\n\n def drop(self, table: TableCatalogEntry):\n try:\n dir_path = Path(table.file_url)\n shutil.rmtree(str(dir_path))\n metadata_table = self.METHOD_NAME(table)\n self._rdb_handler.drop(metadata_table)\n # remove the metadata table from the catalog\n self.db.catalog().delete_table_catalog_entry(metadata_table)\n except Exception as e:\n err_msg = f\"Failed to drop the image table {e}\"\n logger.exception(err_msg)\n raise Exception(err_msg)\n\n def delete(self, table: TableCatalogEntry, rows: Batch):\n try:\n media_metadata_table = self.METHOD_NAME(table)\n for media_file_path in rows.file_paths():\n dst_file_name = self._xform_file_url_to_file_name(Path(media_file_path))\n image_file = Path(table.file_url) / dst_file_name\n self._rdb_handler.delete(\n media_metadata_table,\n where_clause={\n media_metadata_table.identifier_column: str(media_file_path)\n },\n )\n image_file.unlink()\n except Exception as e:\n error = f\"Deleting file path {media_file_path} failed with exception {e}\"\n logger.exception(error)\n raise RuntimeError(error)\n return True\n\n def write(self, table: TableCatalogEntry, rows: Batch):\n try:\n dir_path = Path(table.file_url)\n copied_files = []\n for media_file_path in rows.file_paths():\n media_file = Path(media_file_path)\n dst_file_name = self._xform_file_url_to_file_name(media_file)\n dst_path = dir_path / dst_file_name\n if dst_path.exists():\n raise FileExistsError(\n f\"Duplicate File: {media_file} already exists in the table {table.name}\"\n )\n src_path = Path.cwd() / media_file\n os.symlink(src_path, dst_path)\n copied_files.append(dst_path)\n # assuming sql write is an atomic operation\n self._rdb_handler.write(\n self.METHOD_NAME(table),\n Batch(pd.DataFrame({\"file_url\": list(rows.file_paths())})),\n )\n\n except Exception as e:\n # delete the copied_files\n for file in copied_files:\n logger.info(f\"Rollback file {file}\")\n file.unlink()\n logger.exception(str(e))\n raise RuntimeError(str(e))\n else:\n return True\n\n def rename(self, old_table: TableCatalogEntry, new_name: TableInfo):\n try:\n self.db.catalog().rename_table_catalog_entry(old_table, new_name)\n except Exception as e:\n raise Exception(f\"Failed to rename table {new_name} with exception {e}\")"},"code_compressed":{"kind":"null"}}},{"rowIdx":363,"cells":{"id":{"kind":"number","value":363,"string":"363"},"code":{"kind":"string","value":"from typing import Iterable\nfrom typing import Optional\nfrom typing import Union\n\nfrom .request import Request\nfrom .response import Response\n\n\nclass CORSMiddleware(object):\n \"\"\"CORS Middleware.\n\n This middleware provides a simple out-of-the box CORS policy, including handling\n of preflighted requests from the browser.\n\n See also:\n\n * https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS\n * https://www.w3.org/TR/cors/#resource-processing-model\n\n Keyword Arguments:\n allow_origins (Union[str, Iterable[str]]): List of origins to allow (case\n sensitive). The string ``'*'`` acts as a wildcard, matching every origin.\n (default ``'*'``).\n expose_headers (Optional[Union[str, Iterable[str]]]): List of additional\n response headers to expose via the ``Access-Control-Expose-Headers``\n header. These headers are in addition to the CORS-safelisted ones:\n ``Cache-Control``, ``Content-Language``, ``Content-Length``,\n ``Content-Type``, ``Expires``, ``Last-Modified``, ``Pragma``.\n (default ``None``).\n\n See also:\n https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers\n allow_credentials (Optional[Union[str, Iterable[str]]]): List of origins\n (case sensitive) for which to allow credentials via the\n ``Access-Control-Allow-Credentials`` header.\n The string ``'*'`` acts as a wildcard, matching every allowed origin,\n while ``None`` disallows all origins. This parameter takes effect only\n if the origin is allowed by the ``allow_origins`` argument.\n (Default ``None``).\n\n \"\"\"\n\n def __init__(\n self,\n allow_origins: Union[str, Iterable[str]] = '*',\n expose_headers: Optional[Union[str, Iterable[str]]] = None,\n allow_credentials: Optional[Union[str, Iterable[str]]] = None,\n ):\n if allow_origins == '*':\n self.allow_origins = allow_origins\n else:\n if isinstance(allow_origins, str):\n allow_origins = [allow_origins]\n self.allow_origins = frozenset(allow_origins)\n if '*' in self.allow_origins:\n raise ValueError(\n 'The wildcard string \"*\" may only be passed to allow_origins as a '\n 'string literal, not inside an iterable.'\n )\n\n if expose_headers is not None and not isinstance(expose_headers, str):\n expose_headers = ', '.join(expose_headers)\n self.expose_headers = expose_headers\n\n if allow_credentials is None:\n allow_credentials = frozenset()\n elif allow_credentials != '*':\n if isinstance(allow_credentials, str):\n allow_credentials = [allow_credentials]\n allow_credentials = frozenset(allow_credentials)\n if '*' in allow_credentials:\n raise ValueError(\n 'The wildcard string \"*\" may only be passed to allow_credentials '\n 'as a string literal, not inside an iterable.'\n )\n self.allow_credentials = allow_credentials\n\n def process_response(self, req: Request, resp: Response, resource, req_succeeded):\n \"\"\"Implement the CORS policy for all routes.\n\n This middleware provides a simple out-of-the box CORS policy,\n including handling of preflighted requests from the browser.\n\n See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS\n\n See also: https://www.w3.org/TR/cors/#resource-processing-model\n \"\"\"\n\n origin = req.get_header('Origin')\n if origin is None:\n return\n\n if self.allow_origins != '*' and origin not in self.allow_origins:\n return\n\n if resp.get_header('Access-Control-Allow-Origin') is None:\n set_origin = '*' if self.allow_origins == '*' else origin\n if self.allow_credentials == '*' or origin in self.allow_credentials:\n set_origin = origin\n resp.set_header('Access-Control-Allow-Credentials', 'true')\n resp.set_header('Access-Control-Allow-Origin', set_origin)\n\n if self.expose_headers:\n resp.set_header('Access-Control-Expose-Headers', self.expose_headers)\n\n if (\n req_succeeded\n and req.method == 'OPTIONS'\n and req.get_header('Access-Control-Request-Method')\n ):\n\n # NOTE(kgriffs): This is a CORS preflight request. Patch the\n # response accordingly.\n\n allow = resp.get_header('Allow')\n resp.delete_header('Allow')\n\n allow_headers = req.get_header(\n 'Access-Control-Request-Headers', default='*'\n )\n\n resp.set_header('Access-Control-Allow-Methods', allow)\n resp.set_header('Access-Control-Allow-Headers', allow_headers)\n resp.set_header('Access-Control-Max-Age', '86400') # 24 hours\n\n async def METHOD_NAME(self, *args):\n self.process_response(*args)"},"code_compressed":{"kind":"null"}}},{"rowIdx":364,"cells":{"id":{"kind":"number","value":364,"string":"364"},"code":{"kind":"string","value":"# Copyright (c) ZenML GmbH 2022. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Local ZenML server deployment.\"\"\"\n\nimport logging\nimport sys\nfrom typing import TYPE_CHECKING, Optional\n\nfrom tests.harness.deployment.base import (\n LOCAL_ZENML_SERVER_DEFAULT_PORT,\n BaseTestDeployment,\n)\nfrom tests.harness.deployment.local_default import LocalDefaultTestDeployment\nfrom tests.harness.model import (\n DeploymentConfig,\n DeploymentSetup,\n DeploymentStoreConfig,\n DeploymentType,\n)\n\nif TYPE_CHECKING:\n from zenml.zen_server.deploy.deployment import ServerDeployment\n\n\nclass ServerLocalTestDeployment(BaseTestDeployment):\n \"\"\"A deployment that runs a ZenML server as a background process.\"\"\"\n\n def __init__(self, config: DeploymentConfig) -> None:\n \"\"\"Initializes a local ZenML server deployment.\n\n Args:\n config: The configuration for the deployment.\n \"\"\"\n super().__init__(config)\n\n # The server local deployment is built on top of a local default\n # deployment because the server is provisioned through the client\n self.default_deployment = LocalDefaultTestDeployment(config)\n\n @property\n def server(self) -> Optional[\"ServerDeployment\"]:\n \"\"\"Returns the ZenML server corresponding to this configuration.\n\n Returns:\n The server for the deployment if it exists, None otherwise.\n \"\"\"\n from zenml.enums import ServerProviderType\n from zenml.zen_server.deploy.deployer import ServerDeployer\n\n # Managing the local server deployment is done through a default\n # local deployment with the same config.\n with self.default_deployment.connect():\n deployer = ServerDeployer()\n servers = deployer.list_servers(\n provider_type=ServerProviderType.LOCAL\n )\n if not servers:\n return None\n\n return servers[0]\n\n @property\n def is_running(self) -> bool:\n \"\"\"Returns whether the ZenML server is running.\n\n Returns:\n True if the server is running, False otherwise.\n \"\"\"\n server = self.server\n if server is not None and server.is_running:\n return True\n\n return False\n\n def up(self) -> None:\n \"\"\"Starts the ZenML deployment.\n\n Raises:\n RuntimeError: If the deployment is not supported on the host OS.\n \"\"\"\n from zenml.enums import ServerProviderType\n from zenml.utils.networking_utils import scan_for_available_port\n from zenml.zen_server.deploy.deployer import ServerDeployer\n from zenml.zen_server.deploy.deployment import ServerDeploymentConfig\n\n if sys.platform == \"win32\":\n raise RuntimeError(\n \"Running the ZenML server locally as a background process is \"\n \"not supported on Windows.\"\n )\n else:\n pass\n\n if self.is_running:\n logging.info(\n f\"Deployment '{self.config.name}' is already running. \"\n f\"Skipping provisioning.\"\n )\n return\n\n self.default_deployment.up()\n\n # Managing the local server deployment is done through the default\n # deployment with the same config.\n with self.default_deployment.connect():\n port = scan_for_available_port(LOCAL_ZENML_SERVER_DEFAULT_PORT)\n\n if port is None:\n raise RuntimeError(\n \"Could not find an available port for the ZenML server.\"\n )\n\n deployer = ServerDeployer()\n server_config = ServerDeploymentConfig(\n name=self.config.name,\n provider=ServerProviderType.LOCAL,\n port=port,\n )\n deployer.deploy_server(server_config)\n\n logging.info(\n f\"Started ZenML server for deployment '{self.config.name}'.\"\n )\n\n def down(self) -> None:\n \"\"\"Stops the ZenML deployment.\"\"\"\n from zenml.zen_server.deploy.deployer import ServerDeployer\n\n server = self.server\n if server is None:\n logging.info(\n f\"Deployment '{self.config.name}' is no longer running. \"\n )\n return\n\n # Managing the local server deployment is done through the default\n # deployment with the same config.\n with self.default_deployment.connect():\n deployer = ServerDeployer()\n deployer.remove_server(server.config.name)\n\n self.default_deployment.down()\n\n def METHOD_NAME(self) -> Optional[DeploymentStoreConfig]:\n \"\"\"Returns the store config for the deployment.\n\n Returns:\n The store config for the deployment if it is running, None\n otherwise.\n\n Raises:\n RuntimeError: If the deployment is not running.\n \"\"\"\n from zenml.zen_stores.base_zen_store import (\n DEFAULT_PASSWORD,\n DEFAULT_USERNAME,\n )\n\n if not self.is_running:\n raise RuntimeError(\n f\"The '{self.config.name}' deployment is not running.\"\n )\n\n server = self.server\n if (\n server is None\n or server.status is None\n or server.status.url is None\n ):\n raise RuntimeError(\n f\"The '{self.config.name}' deployment is not running.\"\n )\n\n return DeploymentStoreConfig(\n url=server.status.url,\n username=DEFAULT_USERNAME,\n password=DEFAULT_PASSWORD,\n )\n\n\nServerLocalTestDeployment.register_deployment_class(\n type=DeploymentType.SERVER, setup=DeploymentSetup.DEFAULT\n)"},"code_compressed":{"kind":"null"}}},{"rowIdx":365,"cells":{"id":{"kind":"number","value":365,"string":"365"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkretailcloud.endpoint import endpoint_data\n\nclass CreateClusterRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'CreateCluster')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_BusinessCode(self): # String\n\t\treturn self.get_query_params().get('BusinessCode')\n\n\tdef set_BusinessCode(self, BusinessCode): # String\n\t\tself.add_query_param('BusinessCode', BusinessCode)\n\tdef get_CreateWithLogIntegration(self): # Boolean\n\t\treturn self.get_query_params().get('CreateWithLogIntegration')\n\n\tdef set_CreateWithLogIntegration(self, CreateWithLogIntegration): # Boolean\n\t\tself.add_query_param('CreateWithLogIntegration', CreateWithLogIntegration)\n\tdef get_Vswitchidss(self): # RepeatList\n\t\treturn self.get_query_params().get('Vswitchids')\n\n\tdef set_Vswitchidss(self, Vswitchids): # RepeatList\n\t\tfor depth1 in range(len(Vswitchids)):\n\t\t\tself.add_query_param('Vswitchids.' + str(depth1 + 1), Vswitchids[depth1])\n\tdef get_CloudMonitorFlags(self): # Integer\n\t\treturn self.get_query_params().get('CloudMonitorFlags')\n\n\tdef set_CloudMonitorFlags(self, CloudMonitorFlags): # Integer\n\t\tself.add_query_param('CloudMonitorFlags', CloudMonitorFlags)\n\tdef get_ClusterEnvType(self): # String\n\t\treturn self.get_query_params().get('ClusterEnvType')\n\n\tdef set_ClusterEnvType(self, ClusterEnvType): # String\n\t\tself.add_query_param('ClusterEnvType', ClusterEnvType)\n\tdef METHOD_NAME(self): # Boolean\n\t\treturn self.get_query_params().get('CreateWithArmsIntegration')\n\n\tdef set_CreateWithArmsIntegration(self, CreateWithArmsIntegration): # Boolean\n\t\tself.add_query_param('CreateWithArmsIntegration', CreateWithArmsIntegration)\n\tdef get_KeyPair(self): # String\n\t\treturn self.get_query_params().get('KeyPair')\n\n\tdef set_KeyPair(self, KeyPair): # String\n\t\tself.add_query_param('KeyPair', KeyPair)\n\tdef get_ClusterTitle(self): # String\n\t\treturn self.get_query_params().get('ClusterTitle')\n\n\tdef set_ClusterTitle(self, ClusterTitle): # String\n\t\tself.add_query_param('ClusterTitle', ClusterTitle)\n\tdef get_PodCIDR(self): # String\n\t\treturn self.get_query_params().get('PodCIDR')\n\n\tdef set_PodCIDR(self, PodCIDR): # String\n\t\tself.add_query_param('PodCIDR', PodCIDR)\n\tdef get_ClusterId(self): # Long\n\t\treturn self.get_query_params().get('ClusterId')\n\n\tdef set_ClusterId(self, ClusterId): # Long\n\t\tself.add_query_param('ClusterId', ClusterId)\n\tdef get_ClusterType(self): # String\n\t\treturn self.get_query_params().get('ClusterType')\n\n\tdef set_ClusterType(self, ClusterType): # String\n\t\tself.add_query_param('ClusterType', ClusterType)\n\tdef get_Password(self): # String\n\t\treturn self.get_query_params().get('Password')\n\n\tdef set_Password(self, Password): # String\n\t\tself.add_query_param('Password', Password)\n\tdef get_SnatEntry(self): # Integer\n\t\treturn self.get_query_params().get('SnatEntry')\n\n\tdef set_SnatEntry(self, SnatEntry): # Integer\n\t\tself.add_query_param('SnatEntry', SnatEntry)\n\tdef get_NetPlug(self): # String\n\t\treturn self.get_query_params().get('NetPlug')\n\n\tdef set_NetPlug(self, NetPlug): # String\n\t\tself.add_query_param('NetPlug', NetPlug)\n\tdef get_VpcId(self): # String\n\t\treturn self.get_query_params().get('VpcId')\n\n\tdef set_VpcId(self, VpcId): # String\n\t\tself.add_query_param('VpcId', VpcId)\n\tdef get_RegionName(self): # String\n\t\treturn self.get_query_params().get('RegionName')\n\n\tdef set_RegionName(self, RegionName): # String\n\t\tself.add_query_param('RegionName', RegionName)\n\tdef get_PrivateZone(self): # Boolean\n\t\treturn self.get_query_params().get('PrivateZone')\n\n\tdef set_PrivateZone(self, PrivateZone): # Boolean\n\t\tself.add_query_param('PrivateZone', PrivateZone)\n\tdef get_ServiceCIDR(self): # String\n\t\treturn self.get_query_params().get('ServiceCIDR')\n\n\tdef set_ServiceCIDR(self, ServiceCIDR): # String\n\t\tself.add_query_param('ServiceCIDR', ServiceCIDR)\n\tdef get_PublicSlb(self): # Integer\n\t\treturn self.get_query_params().get('PublicSlb')\n\n\tdef set_PublicSlb(self, PublicSlb): # Integer\n\t\tself.add_query_param('PublicSlb', PublicSlb)"},"code_compressed":{"kind":"null"}}},{"rowIdx":366,"cells":{"id":{"kind":"number","value":366,"string":"366"},"code":{"kind":"string","value":"from pyrokinetics.gk_code import GKInputGENE\nfrom pyrokinetics import template_dir\nfrom pyrokinetics.local_geometry import LocalGeometryMiller\nfrom pyrokinetics.local_species import LocalSpecies\nfrom pyrokinetics.numerics import Numerics\n\nfrom pathlib import Path\nimport numpy as np\nimport pytest\n\nimport sys\ndocs_dir = Path(__file__).parent.parent.parent / \"docs\"\nsys.path.append(str(docs_dir))\nfrom examples import example_JETTO # noqa\n\ntemplate_file = template_dir / \"input.gene\"\n\n\n@pytest.fixture\ndef default_gene():\n return GKInputGENE()\n\n\n@pytest.fixture\ndef gene():\n return GKInputGENE(template_file)\n\n\ndef test_read(gene):\n \"\"\"Ensure a gene file can be read, and that the 'data' attribute is set\"\"\"\n params = [\"general\", \"box\", \"geometry\"]\n assert np.all(np.isin(params, list(gene.data)))\n\n\ndef test_read_str():\n \"\"\"Ensure a gene file can be read as a string, and that the 'data' attribute is set\"\"\"\n params = [\"general\", \"box\", \"geometry\"]\n with open(template_file, \"r\") as f:\n gene = GKInputGENE.from_str(f.read())\n assert np.all(np.isin(params, list(gene.data)))\n\n\ndef test_verify_file_type(gene):\n \"\"\"Ensure that 'verify_file_type' does not raise exception on GENE file\"\"\"\n gene.verify_file_type(template_file)\n\n\n@pytest.mark.parametrize(\n \"filename\", [\"input.gs2\", \"input.cgyro\", \"transp.cdf\", \"helloworld\"]\n)\ndef test_verify_file_type_bad_inputs(gene, filename):\n \"\"\"Ensure that 'verify_file_type' raises exception on non-GENE file\"\"\"\n with pytest.raises(Exception):\n gene.verify_file_type(template_dir / filename)\n\n\ndef test_is_nonlinear(gene):\n \"\"\"Expect template file to be linear. Modify it so that it is nonlinear.\"\"\"\n gene.data[\"general\"][\"nonlinear\"] = 0\n assert gene.is_linear()\n assert not gene.is_nonlinear()\n gene.data[\"general\"][\"nonlinear\"] = 1\n assert not gene.is_linear()\n assert gene.is_nonlinear()\n\n\ndef test_add_flags(gene):\n gene.add_flags({\"foo\": {\"bar\": \"baz\"}})\n assert gene.data[\"foo\"][\"bar\"] == \"baz\"\n\n\ndef METHOD_NAME(gene):\n # TODO test it has the correct values\n local_geometry = gene.get_local_geometry()\n assert isinstance(local_geometry, LocalGeometryMiller)\n\n\ndef test_get_local_species(gene):\n local_species = gene.get_local_species()\n assert isinstance(local_species, LocalSpecies)\n assert local_species.nspec == 2\n assert len(gene.data[\"species\"]) == 2\n # Ensure you can index gene.data[\"species\"] (doesn't work on some f90nml versions)\n assert gene.data[\"species\"][0]\n assert gene.data[\"species\"][1]\n assert local_species[\"electron\"]\n assert local_species[\"ion1\"]\n # TODO test it has the correct values\n\n\ndef test_get_numerics(gene):\n # TODO test it has the correct values\n numerics = gene.get_numerics()\n assert isinstance(numerics, Numerics)\n\n\ndef test_write(tmp_path, gene):\n \"\"\"Ensure a gene file can be written, and that no info is lost in the process\"\"\"\n # Get template data\n local_geometry = gene.get_local_geometry()\n local_species = gene.get_local_species()\n numerics = gene.get_numerics()\n\n # Set output path\n filename = tmp_path / \"input.in\"\n\n # Write out a new input file\n gene_writer = GKInputGENE()\n gene_writer.set(local_geometry, local_species, numerics)\n\n # Ensure you can index gene.data[\"species\"] (doesn't work on some f90nml versions)\n assert len(gene_writer.data[\"species\"]) == 2\n assert gene_writer.data[\"species\"][0]\n assert gene_writer.data[\"species\"][1]\n\n # Write to disk\n gene_writer.write(filename)\n\n # Ensure a new file exists\n assert Path(filename).exists()\n\n # Ensure it is a valid file\n GKInputGENE().verify_file_type(filename)\n gene_reader = GKInputGENE(filename)\n new_local_geometry = gene_reader.get_local_geometry()\n assert local_geometry.shat == new_local_geometry.shat\n new_local_species = gene_reader.get_local_species()\n assert local_species.nspec == new_local_species.nspec\n new_numerics = gene_reader.get_numerics()\n assert numerics.delta_time == new_numerics.delta_time\n\n\ndef test_species_order(tmp_path):\n pyro = example_JETTO.main(tmp_path)\n\n # Reverse species order so electron is last\n pyro.local_species.names = pyro.local_species.names[::-1]\n pyro.gk_code = \"GENE\"\n\n pyro.write_gk_file(file_name=tmp_path / \"input.in\")\n\n assert Path(tmp_path / \"input.in\").exists()"},"code_compressed":{"kind":"null"}}},{"rowIdx":367,"cells":{"id":{"kind":"number","value":367,"string":"367"},"code":{"kind":"string","value":"import re\nimport warnings\nfrom contextlib import contextmanager\n\nimport pymssql # pylint: disable=import-error\n\nfrom Orange.data import StringVariable, TimeVariable, ContinuousVariable, DiscreteVariable\nfrom Orange.data.sql.backend import Backend\nfrom Orange.data.sql.backend.base import ToSql, BackendError\n\n\ndef METHOD_NAME(ex: Exception) -> str:\n try:\n return ex.args[0][1].decode().splitlines()[-1]\n except: # pylint: disable=bare-except\n return str(ex)\n\n\nclass PymssqlBackend(Backend):\n display_name = \"SQL Server\"\n\n def __init__(self, connection_params):\n connection_params[\"server\"] = connection_params.pop(\"host\", None)\n\n for key in list(connection_params):\n if connection_params[key] is None:\n del connection_params[key]\n\n super().__init__(connection_params)\n try:\n self.connection = pymssql.connect(login_timeout=5, **connection_params)\n except pymssql.Error as ex:\n raise BackendError(METHOD_NAME(ex)) from ex\n except ValueError:\n # ValueError is raised when 'server' contains \"\\\\\"\n raise BackendError(\"Incorrect format of connection details\")\n\n def list_tables_query(self, schema=None):\n return \"\"\"\n SELECT [TABLE_SCHEMA], [TABLE_NAME]\n FROM information_schema.tables\n WHERE TABLE_TYPE in ('VIEW' ,'BASE TABLE')\n ORDER BY [TABLE_NAME]\n \"\"\"\n\n def quote_identifier(self, name):\n return \"[{}]\".format(name)\n\n def unquote_identifier(self, quoted_name):\n return quoted_name[1:-1]\n\n def create_sql_query(self, table_name, fields, filters=(),\n group_by=None, order_by=None, offset=None, limit=None,\n use_time_sample=None):\n sql = [\"SELECT\"]\n if limit and not offset:\n sql.extend([\"TOP\", str(limit)])\n sql.append(', '.join(fields))\n sql.extend([\"FROM\", table_name])\n if use_time_sample:\n sql.append(\"TABLESAMPLE system_time(%i)\" % use_time_sample)\n if filters:\n sql.extend([\"WHERE\", \" AND \".join(filters)])\n if group_by:\n sql.extend([\"GROUP BY\", \", \".join(group_by)])\n\n if offset and not order_by:\n order_by = fields[0].split(\"AS\")[1:]\n\n if order_by:\n sql.extend([\"ORDER BY\", \",\".join(order_by)])\n if offset:\n sql.extend([\"OFFSET\", str(offset), \"ROWS\"])\n if limit:\n sql.extend([\"FETCH FIRST\", str(limit), \"ROWS ONLY\"])\n\n return \" \".join(sql)\n\n @contextmanager\n def execute_sql_query(self, query, params=()):\n try:\n with self.connection.cursor() as cur:\n cur.execute(query, *params)\n yield cur\n except pymssql.Error as ex:\n raise BackendError(METHOD_NAME(ex)) from ex\n\n def create_variable(self, field_name, field_metadata, type_hints, inspect_table=None):\n if field_name in type_hints:\n var = type_hints[field_name]\n else:\n var = self._guess_variable(field_name, field_metadata,\n inspect_table)\n\n field_name_q = self.quote_identifier(field_name)\n if var.is_continuous:\n if isinstance(var, TimeVariable):\n var.to_sql = ToSql(\"DATEDIFF(s, '1970-01-01 00:00:00', {})\".format(field_name_q))\n else:\n var.to_sql = ToSql(field_name_q)\n else: # discrete or string\n var.to_sql = ToSql(field_name_q)\n return var\n\n def _guess_variable(self, field_name, field_metadata, inspect_table):\n # pylint: disable=import-error\n from pymssql import STRING, NUMBER, DATETIME, DECIMAL\n\n type_code, *_ = field_metadata\n\n if type_code in (NUMBER, DECIMAL):\n return ContinuousVariable(field_name)\n\n if type_code == DATETIME:\n tv = TimeVariable(field_name)\n tv.have_date = True\n tv.have_time = True\n return tv\n\n if type_code == STRING:\n if inspect_table:\n values = self.get_distinct_values(field_name, inspect_table)\n if values:\n return DiscreteVariable(field_name, values)\n\n return StringVariable(field_name)\n\n EST_ROWS_RE = re.compile(r'StatementEstRows=\"(\\d+)\"')\n\n def count_approx(self, query):\n with self.connection.cursor() as cur:\n try:\n cur.execute(\"SET SHOWPLAN_XML ON\")\n try:\n cur.execute(query)\n result = cur.fetchone()\n match = self.EST_ROWS_RE.search(result[0])\n if not match:\n # Either StatementEstRows was not found or\n # a float is received.\n # If it is a float then it is most probable\n # that the server's statistics are out of date\n # and the result is false. In that case\n # it is preferable to return None so\n # an exact count be used.\n return None\n return int(match.group(1))\n finally:\n cur.execute(\"SET SHOWPLAN_XML OFF\")\n except pymssql.Error as ex:\n if \"SHOWPLAN permission denied\" in str(ex):\n warnings.warn(\"SHOWPLAN permission denied, count approximates will not be used\")\n return None\n raise BackendError(METHOD_NAME(ex)) from ex\n\n def distinct_values_query(self, field_name: str, table_name: str) -> str:\n field = self.quote_identifier(field_name)\n return self.create_sql_query(\n table_name,\n [field],\n # Cast - workaround for collations that are not case-sensitive and\n # UTF characters sensitive\n # DATALENGTH - workaround for string comparison that ignore trailing\n # spaces, two strings that differ only in space in the end would\n # group together if DATALENGTH wouldn't be used\n group_by=[f\"{field}, Cast({field} as binary), DATALENGTH({field})\"],\n order_by=[field],\n limit=21,\n )"},"code_compressed":{"kind":"null"}}},{"rowIdx":368,"cells":{"id":{"kind":"number","value":368,"string":"368"},"code":{"kind":"string","value":"import pytest\n\n\n@pytest.fixture\ndef user_data_model():\n from alfasim_sdk._internal.models import data_model\n from alfasim_sdk._internal.types import BaseField\n\n class ValidType(BaseField):\n pass\n\n @data_model(icon=\"model.png\", caption=\"PLUGIN DEV MODEL\")\n class Model:\n valid_attribute = ValidType(caption=\"valid\")\n\n return Model\n\n\n@pytest.fixture\ndef user_data_container(user_data_model):\n from alfasim_sdk._internal.models import container_model\n from alfasim_sdk._internal.types import BaseField\n\n class ValidType(BaseField):\n pass\n\n @container_model(\n model=user_data_model, icon=\"container.png\", caption=\"PLUGIN DEV CONTAINER\"\n )\n class Container:\n container_valid_attribute = ValidType(caption=\"valid\")\n\n return Container\n\n\ndef test_data_model(user_data_model):\n import attr\n\n # Attributes from the class, should be accessed by _alfasim_metadata\n assert user_data_model._alfasim_metadata[\"caption\"] == \"PLUGIN DEV MODEL\"\n assert user_data_model._alfasim_metadata[\"icon\"] == \"model.png\"\n\n # \"data_model\" should not have references to others model\n assert user_data_model._alfasim_metadata[\"model\"] is None\n\n # Attributes defined from the user should be accessed by attr fields\n assert attr.fields(user_data_model).valid_attribute is not None\n\n\ndef METHOD_NAME(user_data_container):\n import attr\n\n assert user_data_container._alfasim_metadata[\"model\"] is not None\n assert \"Model\" in str(user_data_container._alfasim_metadata[\"model\"])\n\n assert user_data_container._alfasim_metadata[\"caption\"] == \"PLUGIN DEV CONTAINER\"\n assert user_data_container._alfasim_metadata[\"icon\"] == \"container.png\"\n\n assert attr.fields(user_data_container).container_valid_attribute is not None\n\n\ndef test_invalid_attribute():\n from alfasim_sdk._internal.models import data_model\n from alfasim_sdk._internal.types import BaseField\n\n class ValidType(BaseField):\n pass\n\n error_msg = \"Error defining _invalid_attribute, attributes starting with '_' are not allowed\"\n\n with pytest.raises(TypeError, match=error_msg):\n\n @data_model(icon=\"model.png\", caption=\"PLUGIN DEV MODEL\")\n class ModelPrivateAttribute: # pylint: disable=unused-variable\n _invalid_attribute = ValidType(caption=\"invalid\")\n\n class Invalid(object):\n pass\n\n error_msg = (\n \"Error defining invalid, attributes must be a valid type defined by alfasim_sdk\"\n )\n with pytest.raises(TypeError, match=error_msg):\n\n @data_model(icon=\"model.png\", caption=\"PLUGIN DEV MODEL\")\n class Model: # pylint: disable=unused-variable\n invalid = Invalid()\n\n\ndef test_attribute_order():\n from alfasim_sdk._internal.models import data_model\n from alfasim_sdk._internal.types import (\n Boolean,\n Reference,\n TracerType,\n Enum,\n String,\n Quantity,\n )\n\n @data_model(icon=\"\", caption=\"caption\")\n class Model:\n boolean = Boolean(value=True, caption=\"caption\")\n data_reference = Reference(ref_type=TracerType, caption=\"caption\")\n enum = Enum(values=[\"value_1\", \"value_2\"], caption=\"caption\")\n string = String(value=\"value\", caption=\"caption\")\n quantity = Quantity(value=1, unit=\"m\", caption=\"caption\")\n\n expected_order = [\"boolean\", \"data_reference\", \"enum\", \"string\", \"quantity\"]\n assert [attr.name for attr in Model.__attrs_attrs__] == expected_order\n\n\ndef test_check_model_in_container_model():\n from alfasim_sdk._internal.models import container_model, data_model\n from alfasim_sdk._internal.types import String\n\n @data_model(caption=\"The child\")\n class Child:\n name = String(value=\"A child\", caption=\"Name\")\n\n @container_model(caption=\"The parent\", model=Child)\n class Parent:\n name = String(value=\"A parent\", caption=\"Name\")\n\n with pytest.raises(TypeError):\n\n @container_model(caption=\"The grand parent\", model=Parent)\n class GrandParent: # pragma: no cover (`container_model` is expected to raise)\n name = String(value=\"A grand parent\", caption=\"Name\")"},"code_compressed":{"kind":"null"}}},{"rowIdx":369,"cells":{"id":{"kind":"number","value":369,"string":"369"},"code":{"kind":"string","value":"import asyncio\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Collection,\n Optional,\n Type,\n)\n\nimport aiohttp\nfrom requests.exceptions import (\n ConnectionError,\n HTTPError,\n Timeout,\n TooManyRedirects,\n)\n\nfrom web3.types import (\n AsyncMiddlewareCoroutine,\n RPCEndpoint,\n RPCResponse,\n)\n\nif TYPE_CHECKING:\n from web3 import ( # noqa: F401\n AsyncWeb3,\n Web3,\n )\n\nwhitelist = [\n \"admin\",\n \"miner\",\n \"net\",\n \"txpool\",\n \"testing\",\n \"evm\",\n \"eth_protocolVersion\",\n \"eth_syncing\",\n \"eth_coinbase\",\n \"eth_mining\",\n \"eth_hashrate\",\n \"eth_chainId\",\n \"eth_gasPrice\",\n \"eth_accounts\",\n \"eth_blockNumber\",\n \"eth_getBalance\",\n \"eth_getStorageAt\",\n \"eth_getProof\",\n \"eth_getCode\",\n \"eth_getBlockByNumber\",\n \"eth_getBlockByHash\",\n \"eth_getBlockTransactionCountByNumber\",\n \"eth_getBlockTransactionCountByHash\",\n \"eth_getUncleCountByBlockNumber\",\n \"eth_getUncleCountByBlockHash\",\n \"eth_getTransactionByHash\",\n \"eth_getTransactionByBlockHashAndIndex\",\n \"eth_getTransactionByBlockNumberAndIndex\",\n \"eth_getTransactionReceipt\",\n \"eth_getTransactionCount\",\n \"eth_getRawTransactionByHash\",\n \"eth_call\",\n \"eth_estimateGas\",\n \"eth_newBlockFilter\",\n \"eth_newPendingTransactionFilter\",\n \"eth_newFilter\",\n \"eth_getFilterChanges\",\n \"eth_getFilterLogs\",\n \"eth_getLogs\",\n \"eth_uninstallFilter\",\n \"eth_getCompilers\",\n \"eth_getWork\",\n \"eth_sign\",\n \"eth_signTypedData\",\n \"eth_sendRawTransaction\",\n \"personal_importRawKey\",\n \"personal_newAccount\",\n \"personal_listAccounts\",\n \"personal_listWallets\",\n \"personal_lockAccount\",\n \"personal_unlockAccount\",\n \"personal_ecRecover\",\n \"personal_sign\",\n \"personal_signTypedData\",\n]\n\n\ndef check_if_retry_on_failure(method: RPCEndpoint) -> bool:\n root = method.split(\"_\")[0]\n if root in whitelist:\n return True\n elif method in whitelist:\n return True\n else:\n return False\n\n\ndef exception_retry_middleware(\n make_request: Callable[[RPCEndpoint, Any], RPCResponse],\n _w3: \"Web3\",\n errors: Collection[Type[BaseException]],\n retries: int = 5,\n) -> Callable[[RPCEndpoint, Any], RPCResponse]:\n \"\"\"\n Creates middleware that retries failed HTTP requests. Is a default\n middleware for HTTPProvider.\n \"\"\"\n\n def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]:\n if check_if_retry_on_failure(method):\n for i in range(retries):\n try:\n return make_request(method, params)\n except tuple(errors):\n if i < retries - 1:\n continue\n else:\n raise\n return None\n else:\n return make_request(method, params)\n\n return middleware\n\n\ndef http_retry_request_middleware(\n make_request: Callable[[RPCEndpoint, Any], Any], w3: \"Web3\"\n) -> Callable[[RPCEndpoint, Any], Any]:\n return exception_retry_middleware(\n make_request, w3, (ConnectionError, HTTPError, Timeout, TooManyRedirects)\n )\n\n\nasync def METHOD_NAME(\n make_request: Callable[[RPCEndpoint, Any], Any],\n _async_w3: \"AsyncWeb3\",\n errors: Collection[Type[BaseException]],\n retries: int = 5,\n backoff_factor: float = 0.3,\n) -> AsyncMiddlewareCoroutine:\n \"\"\"\n Creates middleware that retries failed HTTP requests.\n Is a default middleware for AsyncHTTPProvider.\n \"\"\"\n\n async def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]:\n if check_if_retry_on_failure(method):\n for i in range(retries):\n try:\n return await make_request(method, params)\n except tuple(errors):\n if i < retries - 1:\n await asyncio.sleep(backoff_factor)\n continue\n else:\n raise\n return None\n else:\n return await make_request(method, params)\n\n return middleware\n\n\nasync def async_http_retry_request_middleware(\n make_request: Callable[[RPCEndpoint, Any], Any], async_w3: \"AsyncWeb3\"\n) -> Callable[[RPCEndpoint, Any], Any]:\n return await METHOD_NAME(\n make_request,\n async_w3,\n (TimeoutError, aiohttp.ClientError),\n )"},"code_compressed":{"kind":"null"}}},{"rowIdx":370,"cells":{"id":{"kind":"number","value":370,"string":"370"},"code":{"kind":"string","value":"import logging\nimport operator\nfrom functools import reduce\nfrom time import sleep\nfrom typing import Any, Dict, List\n\nimport boto3\nimport botocore.loaders as boto_loader\nimport botocore.regions as boto_regions\nfrom botocore.config import Config as BotoConfig\nfrom botocore.exceptions import ClientError, NoCredentialsError, ProfileNotFound\n\nfrom taskcat.exceptions import TaskCatException\n\nLOG = logging.getLogger(__name__)\n\nREGIONAL_ENDPOINT_SERVICES = [\"sts\"]\n\n\nclass Boto3Cache:\n RETRIES = 10\n BACKOFF = 2\n DELAY = 0.1\n CLIENT_THROTTLE_RETRIES = 20\n\n def __init__(self, _boto3=boto3):\n self._boto3 = _boto3\n self._session_cache: Dict[str, Dict[str, boto3.Session]] = {}\n self._client_cache: Dict[str, Dict[str, Dict[str, boto3.client]]] = {}\n self._resource_cache: Dict[str, Dict[str, Dict[str, boto3.resource]]] = {}\n self._account_info: Dict[str, Dict[str, str]] = {}\n self._lock_cache_update = False\n\n def session(self, profile: str = \"default\", region: str = None) -> boto3.Session:\n region = self._get_region(region, profile)\n try:\n session = self._cache_lookup(\n self._session_cache,\n [profile, region],\n self._boto3.Session,\n [],\n {\"region_name\": region, \"profile_name\": profile},\n )\n except ProfileNotFound:\n if profile != \"default\":\n raise\n session = self._boto3.Session(region_name=region)\n self._cache_set(self._session_cache, [profile, region], session)\n return session\n\n def client(\n self, service: str, profile: str = \"default\", region: str = None\n ) -> boto3.client:\n region = self._get_region(region, profile)\n session = self.session(profile, region)\n kwargs = {\"config\": BotoConfig(retries={\"max_attempts\": 20})}\n if service in REGIONAL_ENDPOINT_SERVICES:\n kwargs.update({\"endpoint_url\": self._get_endpoint_url(service, region)})\n return self._cache_lookup(\n self._client_cache,\n [profile, region, service],\n session.client,\n [service],\n kwargs,\n )\n\n def resource(\n self, service: str, profile: str = \"default\", region: str = None\n ) -> boto3.resource:\n region = self._get_region(region, profile)\n session = self.session(profile, region)\n return self._cache_lookup(\n self._resource_cache,\n [profile, region, service],\n session.resource,\n [service],\n )\n\n def partition(self, profile: str = \"default\") -> str:\n return self._cache_lookup(\n self._account_info, [profile], self._get_account_info, [profile]\n )[\"partition\"]\n\n def account_id(self, profile: str = \"default\") -> str:\n return self._cache_lookup(\n self._account_info, [profile], self._get_account_info, [profile]\n )[\"account_id\"]\n\n def _get_account_info(self, profile):\n partition, region = self._get_partition(profile)\n session = self.session(profile, region)\n sts_client = session.client(\"sts\", region_name=region)\n try:\n account_id = sts_client.get_caller_identity()[\"Account\"]\n except ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"AccessDenied\":\n # pylint: disable=raise-missing-from\n raise TaskCatException(\n f\"Not able to fetch account number from {region} using profile \"\n f\"{profile}. {str(e)}\"\n )\n raise\n except NoCredentialsError as e:\n # pylint: disable=raise-missing-from\n raise TaskCatException(\n f\"Not able to fetch account number from {region} using profile \"\n f\"{profile}. {str(e)}\"\n )\n except ProfileNotFound as e:\n # pylint: disable=raise-missing-from\n raise TaskCatException(\n f\"Not able to fetch account number from {region} using profile \"\n f\"{profile}. {str(e)}\"\n )\n return {\"partition\": partition, \"account_id\": account_id}\n\n def _make_parent_keys(self, cache: dict, keys: list):\n if keys:\n if not cache.get(keys[0]):\n cache[keys[0]] = {}\n self._make_parent_keys(cache[keys[0]], keys[1:])\n\n def _cache_lookup(self, cache, key_list, create_func, args=None, kwargs=None):\n try:\n value = self._cache_get(cache, key_list)\n except KeyError:\n args = [] if not args else args\n kwargs = {} if not kwargs else kwargs\n value = self.METHOD_NAME(create_func, args, kwargs)\n self._cache_set(cache, key_list, value)\n return value\n\n def METHOD_NAME(self, create_func, args, kwargs):\n retries = self.RETRIES\n delay = self.DELAY\n while retries:\n try:\n return create_func(*args, **kwargs)\n except KeyError as e:\n if str(e) not in [\"'credential_provider'\", \"'endpoint_resolver'\"]:\n raise\n backoff = (self.RETRIES - retries + delay) * self.BACKOFF\n sleep(backoff)\n\n @staticmethod\n def _get_endpoint_url(service, region):\n data = boto_loader.create_loader().load_data(\"endpoints\")\n endpoint_data = boto_regions.EndpointResolver(data).construct_endpoint(\n service, region\n )\n if not endpoint_data:\n raise TaskCatException(\n f\"unable to resolve endpoint for {service} in {region}\"\n )\n return f\"https://{service}.{region}.{endpoint_data['dnsSuffix']}\"\n\n @staticmethod\n def _cache_get(cache: dict, key_list: List[str]):\n return reduce(operator.getitem, key_list, cache)\n\n def _cache_set(self, cache: dict, key_list: list, value: Any):\n self._make_parent_keys(cache, key_list[:-1])\n self._cache_get(cache, key_list[:-1])[key_list[-1]] = value\n\n def _get_region(self, region, profile):\n if not region:\n region = self.get_default_region(profile)\n return region\n\n def _get_partition(self, profile):\n partition_regions = [\n (\"aws\", \"us-east-1\"),\n (\"aws-cn\", \"cn-north-1\"),\n (\"aws-us-gov\", \"us-gov-west-1\"),\n ]\n for partition, region in partition_regions:\n try:\n self.session(profile, region).client(\n \"sts\", region_name=region\n ).get_caller_identity()\n return (partition, region)\n except ClientError as e:\n if \"InvalidClientTokenId\" in str(e):\n continue\n raise\n raise ValueError(\"cannot find suitable AWS partition\")\n\n def get_default_region(self, profile_name=\"default\") -> str:\n try:\n if profile_name != \"default\":\n region = self._boto3.session.Session(\n profile_name=profile_name\n ).region_name\n else:\n region = self._boto3.session.Session().region_name\n except ProfileNotFound:\n if profile_name != \"default\":\n raise\n region = self._boto3.session.Session().region_name\n if not region:\n _, region = self._get_partition(profile_name)\n LOG.warning(\n \"Region not set in credential chain, defaulting to {}\".format(region)\n )\n return region"},"code_compressed":{"kind":"null"}}},{"rowIdx":371,"cells":{"id":{"kind":"number","value":371,"string":"371"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\n\nclass ListGroupAuthorizationRulesRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'IoTCC', '2021-05-13', 'ListGroupAuthorizationRules','IoTCC')\n\t\tself.set_method('POST')\n\n\tdef get_DestinationTypes(self): # RepeatList\n\t\treturn self.get_query_params().get('DestinationType')\n\n\tdef set_DestinationTypes(self, DestinationType): # RepeatList\n\t\tfor depth1 in range(len(DestinationType)):\n\t\t\tself.add_query_param('DestinationType.' + str(depth1 + 1), DestinationType[depth1])\n\tdef get_Destinations(self): # RepeatList\n\t\treturn self.get_query_params().get('Destination')\n\n\tdef set_Destinations(self, Destination): # RepeatList\n\t\tfor depth1 in range(len(Destination)):\n\t\t\tself.add_query_param('Destination.' + str(depth1 + 1), Destination[depth1])\n\tdef get_Type(self): # String\n\t\treturn self.get_query_params().get('Type')\n\n\tdef set_Type(self, Type): # String\n\t\tself.add_query_param('Type', Type)\n\tdef get_Protocols(self): # RepeatList\n\t\treturn self.get_query_params().get('Protocol')\n\n\tdef set_Protocols(self, Protocol): # RepeatList\n\t\tfor depth1 in range(len(Protocol)):\n\t\t\tself.add_query_param('Protocol.' + str(depth1 + 1), Protocol[depth1])\n\tdef get_AuthorizationRuleIdss(self): # RepeatList\n\t\treturn self.get_query_params().get('AuthorizationRuleIds')\n\n\tdef set_AuthorizationRuleIdss(self, AuthorizationRuleIds): # RepeatList\n\t\tfor depth1 in range(len(AuthorizationRuleIds)):\n\t\t\tself.add_query_param('AuthorizationRuleIds.' + str(depth1 + 1), AuthorizationRuleIds[depth1])\n\tdef get_NextToken(self): # String\n\t\treturn self.get_query_params().get('NextToken')\n\n\tdef set_NextToken(self, NextToken): # String\n\t\tself.add_query_param('NextToken', NextToken)\n\tdef get_Policys(self): # RepeatList\n\t\treturn self.get_query_params().get('Policy')\n\n\tdef METHOD_NAME(self, Policy): # RepeatList\n\t\tfor depth1 in range(len(Policy)):\n\t\t\tself.add_query_param('Policy.' + str(depth1 + 1), Policy[depth1])\n\tdef get_AuthorizationRuleStatuss(self): # RepeatList\n\t\treturn self.get_query_params().get('AuthorizationRuleStatus')\n\n\tdef set_AuthorizationRuleStatuss(self, AuthorizationRuleStatus): # RepeatList\n\t\tfor depth1 in range(len(AuthorizationRuleStatus)):\n\t\t\tself.add_query_param('AuthorizationRuleStatus.' + str(depth1 + 1), AuthorizationRuleStatus[depth1])\n\tdef get_IoTCloudConnectorGroupId(self): # String\n\t\treturn self.get_query_params().get('IoTCloudConnectorGroupId')\n\n\tdef set_IoTCloudConnectorGroupId(self, IoTCloudConnectorGroupId): # String\n\t\tself.add_query_param('IoTCloudConnectorGroupId', IoTCloudConnectorGroupId)\n\tdef get_AuthorizationRuleNames(self): # RepeatList\n\t\treturn self.get_query_params().get('AuthorizationRuleName')\n\n\tdef set_AuthorizationRuleNames(self, AuthorizationRuleName): # RepeatList\n\t\tfor depth1 in range(len(AuthorizationRuleName)):\n\t\t\tself.add_query_param('AuthorizationRuleName.' + str(depth1 + 1), AuthorizationRuleName[depth1])\n\tdef get_DestinationPorts(self): # RepeatList\n\t\treturn self.get_query_params().get('DestinationPort')\n\n\tdef set_DestinationPorts(self, DestinationPort): # RepeatList\n\t\tfor depth1 in range(len(DestinationPort)):\n\t\t\tself.add_query_param('DestinationPort.' + str(depth1 + 1), DestinationPort[depth1])\n\tdef get_MaxResults(self): # Integer\n\t\treturn self.get_query_params().get('MaxResults')\n\n\tdef set_MaxResults(self, MaxResults): # Integer\n\t\tself.add_query_param('MaxResults', MaxResults)"},"code_compressed":{"kind":"null"}}},{"rowIdx":372,"cells":{"id":{"kind":"number","value":372,"string":"372"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkr_kvstore.endpoint import endpoint_data\n\nclass DescribeInstancesRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeInstances','redisa')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_ResourceOwnerId(self): # Long\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self, ResourceOwnerId): # Long\n\t\tself.add_query_param('ResourceOwnerId', ResourceOwnerId)\n\tdef get_SearchKey(self): # String\n\t\treturn self.get_query_params().get('SearchKey')\n\n\tdef set_SearchKey(self, SearchKey): # String\n\t\tself.add_query_param('SearchKey', SearchKey)\n\tdef get_NetworkType(self): # String\n\t\treturn self.get_query_params().get('NetworkType')\n\n\tdef set_NetworkType(self, NetworkType): # String\n\t\tself.add_query_param('NetworkType', NetworkType)\n\tdef get_EngineVersion(self): # String\n\t\treturn self.get_query_params().get('EngineVersion')\n\n\tdef set_EngineVersion(self, EngineVersion): # String\n\t\tself.add_query_param('EngineVersion', EngineVersion)\n\tdef get_InstanceClass(self): # String\n\t\treturn self.get_query_params().get('InstanceClass')\n\n\tdef METHOD_NAME(self, InstanceClass): # String\n\t\tself.add_query_param('InstanceClass', InstanceClass)\n\tdef get_PageNumber(self): # Integer\n\t\treturn self.get_query_params().get('PageNumber')\n\n\tdef set_PageNumber(self, PageNumber): # Integer\n\t\tself.add_query_param('PageNumber', PageNumber)\n\tdef get_ResourceGroupId(self): # String\n\t\treturn self.get_query_params().get('ResourceGroupId')\n\n\tdef set_ResourceGroupId(self, ResourceGroupId): # String\n\t\tself.add_query_param('ResourceGroupId', ResourceGroupId)\n\tdef get_Expired(self): # String\n\t\treturn self.get_query_params().get('Expired')\n\n\tdef set_Expired(self, Expired): # String\n\t\tself.add_query_param('Expired', Expired)\n\tdef get_SecurityToken(self): # String\n\t\treturn self.get_query_params().get('SecurityToken')\n\n\tdef set_SecurityToken(self, SecurityToken): # String\n\t\tself.add_query_param('SecurityToken', SecurityToken)\n\tdef get_PageSize(self): # Integer\n\t\treturn self.get_query_params().get('PageSize')\n\n\tdef set_PageSize(self, PageSize): # Integer\n\t\tself.add_query_param('PageSize', PageSize)\n\tdef get_InstanceType(self): # String\n\t\treturn self.get_query_params().get('InstanceType')\n\n\tdef set_InstanceType(self, InstanceType): # String\n\t\tself.add_query_param('InstanceType', InstanceType)\n\tdef get_EditionType(self): # String\n\t\treturn self.get_query_params().get('EditionType')\n\n\tdef set_EditionType(self, EditionType): # String\n\t\tself.add_query_param('EditionType', EditionType)\n\tdef get_Tags(self): # RepeatList\n\t\treturn self.get_query_params().get('Tag')\n\n\tdef set_Tags(self, Tag): # RepeatList\n\t\tfor depth1 in range(len(Tag)):\n\t\t\tif Tag[depth1].get('Value') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))\n\t\t\tif Tag[depth1].get('Key') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))\n\tdef get_InstanceStatus(self): # String\n\t\treturn self.get_query_params().get('InstanceStatus')\n\n\tdef set_InstanceStatus(self, InstanceStatus): # String\n\t\tself.add_query_param('InstanceStatus', InstanceStatus)\n\tdef get_ResourceOwnerAccount(self): # String\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String\n\t\tself.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)\n\tdef get_OwnerAccount(self): # String\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self, OwnerAccount): # String\n\t\tself.add_query_param('OwnerAccount', OwnerAccount)\n\tdef get_GlobalInstance(self): # Boolean\n\t\treturn self.get_query_params().get('GlobalInstance')\n\n\tdef set_GlobalInstance(self, GlobalInstance): # Boolean\n\t\tself.add_query_param('GlobalInstance', GlobalInstance)\n\tdef get_PrivateIp(self): # String\n\t\treturn self.get_query_params().get('PrivateIp')\n\n\tdef set_PrivateIp(self, PrivateIp): # String\n\t\tself.add_query_param('PrivateIp', PrivateIp)\n\tdef get_OwnerId(self): # Long\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self, OwnerId): # Long\n\t\tself.add_query_param('OwnerId', OwnerId)\n\tdef get_VSwitchId(self): # String\n\t\treturn self.get_query_params().get('VSwitchId')\n\n\tdef set_VSwitchId(self, VSwitchId): # String\n\t\tself.add_query_param('VSwitchId', VSwitchId)\n\tdef get_InstanceIds(self): # String\n\t\treturn self.get_query_params().get('InstanceIds')\n\n\tdef set_InstanceIds(self, InstanceIds): # String\n\t\tself.add_query_param('InstanceIds', InstanceIds)\n\tdef get_ArchitectureType(self): # String\n\t\treturn self.get_query_params().get('ArchitectureType')\n\n\tdef set_ArchitectureType(self, ArchitectureType): # String\n\t\tself.add_query_param('ArchitectureType', ArchitectureType)\n\tdef get_VpcId(self): # String\n\t\treturn self.get_query_params().get('VpcId')\n\n\tdef set_VpcId(self, VpcId): # String\n\t\tself.add_query_param('VpcId', VpcId)\n\tdef get_ZoneId(self): # String\n\t\treturn self.get_query_params().get('ZoneId')\n\n\tdef set_ZoneId(self, ZoneId): # String\n\t\tself.add_query_param('ZoneId', ZoneId)\n\tdef get_ChargeType(self): # String\n\t\treturn self.get_query_params().get('ChargeType')\n\n\tdef set_ChargeType(self, ChargeType): # String\n\t\tself.add_query_param('ChargeType', ChargeType)"},"code_compressed":{"kind":"null"}}},{"rowIdx":373,"cells":{"id":{"kind":"number","value":373,"string":"373"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdklive.endpoint import endpoint_data\n\nclass AddCustomLiveStreamTranscodeRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'live', '2016-11-01', 'AddCustomLiveStreamTranscode','live')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_ResWithSource(self): # String\n\t\treturn self.get_query_params().get('ResWithSource')\n\n\tdef set_ResWithSource(self, ResWithSource): # String\n\t\tself.add_query_param('ResWithSource', ResWithSource)\n\tdef get_Gop(self): # String\n\t\treturn self.get_query_params().get('Gop')\n\n\tdef set_Gop(self, Gop): # String\n\t\tself.add_query_param('Gop', Gop)\n\tdef get_AudioCodec(self): # String\n\t\treturn self.get_query_params().get('AudioCodec')\n\n\tdef set_AudioCodec(self, AudioCodec): # String\n\t\tself.add_query_param('AudioCodec', AudioCodec)\n\tdef get_KmsUID(self): # String\n\t\treturn self.get_query_params().get('KmsUID')\n\n\tdef set_KmsUID(self, KmsUID): # String\n\t\tself.add_query_param('KmsUID', KmsUID)\n\tdef get_Height(self): # Integer\n\t\treturn self.get_query_params().get('Height')\n\n\tdef set_Height(self, Height): # Integer\n\t\tself.add_query_param('Height', Height)\n\tdef get_App(self): # String\n\t\treturn self.get_query_params().get('App')\n\n\tdef set_App(self, App): # String\n\t\tself.add_query_param('App', App)\n\tdef get_Profile(self): # Integer\n\t\treturn self.get_query_params().get('Profile')\n\n\tdef set_Profile(self, Profile): # Integer\n\t\tself.add_query_param('Profile', Profile)\n\tdef get_OwnerId(self): # Long\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self, OwnerId): # Long\n\t\tself.add_query_param('OwnerId', OwnerId)\n\tdef get_ExtWithSource(self): # String\n\t\treturn self.get_query_params().get('ExtWithSource')\n\n\tdef set_ExtWithSource(self, ExtWithSource): # String\n\t\tself.add_query_param('ExtWithSource', ExtWithSource)\n\tdef get_BitrateWithSource(self): # String\n\t\treturn self.get_query_params().get('BitrateWithSource')\n\n\tdef set_BitrateWithSource(self, BitrateWithSource): # String\n\t\tself.add_query_param('BitrateWithSource', BitrateWithSource)\n\tdef get_Domain(self): # String\n\t\treturn self.get_query_params().get('Domain')\n\n\tdef set_Domain(self, Domain): # String\n\t\tself.add_query_param('Domain', Domain)\n\tdef get_Template(self): # String\n\t\treturn self.get_query_params().get('Template')\n\n\tdef set_Template(self, Template): # String\n\t\tself.add_query_param('Template', Template)\n\tdef get_Lazy(self): # String\n\t\treturn self.get_query_params().get('Lazy')\n\n\tdef set_Lazy(self, Lazy): # String\n\t\tself.add_query_param('Lazy', Lazy)\n\tdef get_KmsKeyExpireInterval(self): # String\n\t\treturn self.get_query_params().get('KmsKeyExpireInterval')\n\n\tdef set_KmsKeyExpireInterval(self, KmsKeyExpireInterval): # String\n\t\tself.add_query_param('KmsKeyExpireInterval', KmsKeyExpireInterval)\n\tdef get_TemplateType(self): # String\n\t\treturn self.get_query_params().get('TemplateType')\n\n\tdef set_TemplateType(self, TemplateType): # String\n\t\tself.add_query_param('TemplateType', TemplateType)\n\tdef get_AudioProfile(self): # String\n\t\treturn self.get_query_params().get('AudioProfile')\n\n\tdef set_AudioProfile(self, AudioProfile): # String\n\t\tself.add_query_param('AudioProfile', AudioProfile)\n\tdef get_EncryptParameters(self): # String\n\t\treturn self.get_query_params().get('EncryptParameters')\n\n\tdef set_EncryptParameters(self, EncryptParameters): # String\n\t\tself.add_query_param('EncryptParameters', EncryptParameters)\n\tdef get_AudioChannelNum(self): # Integer\n\t\treturn self.get_query_params().get('AudioChannelNum')\n\n\tdef set_AudioChannelNum(self, AudioChannelNum): # Integer\n\t\tself.add_query_param('AudioChannelNum', AudioChannelNum)\n\tdef get_FPS(self): # Integer\n\t\treturn self.get_query_params().get('FPS')\n\n\tdef METHOD_NAME(self, FPS): # Integer\n\t\tself.add_query_param('FPS', FPS)\n\tdef get_AudioRate(self): # Integer\n\t\treturn self.get_query_params().get('AudioRate')\n\n\tdef set_AudioRate(self, AudioRate): # Integer\n\t\tself.add_query_param('AudioRate', AudioRate)\n\tdef get_FpsWithSource(self): # String\n\t\treturn self.get_query_params().get('FpsWithSource')\n\n\tdef set_FpsWithSource(self, FpsWithSource): # String\n\t\tself.add_query_param('FpsWithSource', FpsWithSource)\n\tdef get_AudioBitrate(self): # Integer\n\t\treturn self.get_query_params().get('AudioBitrate')\n\n\tdef set_AudioBitrate(self, AudioBitrate): # Integer\n\t\tself.add_query_param('AudioBitrate', AudioBitrate)\n\tdef get_Width(self): # Integer\n\t\treturn self.get_query_params().get('Width')\n\n\tdef set_Width(self, Width): # Integer\n\t\tself.add_query_param('Width', Width)\n\tdef get_VideoBitrate(self): # Integer\n\t\treturn self.get_query_params().get('VideoBitrate')\n\n\tdef set_VideoBitrate(self, VideoBitrate): # Integer\n\t\tself.add_query_param('VideoBitrate', VideoBitrate)\n\tdef get_KmsKeyID(self): # String\n\t\treturn self.get_query_params().get('KmsKeyID')\n\n\tdef set_KmsKeyID(self, KmsKeyID): # String\n\t\tself.add_query_param('KmsKeyID', KmsKeyID)"},"code_compressed":{"kind":"null"}}},{"rowIdx":374,"cells":{"id":{"kind":"number","value":374,"string":"374"},"code":{"kind":"string","value":"# Copyright 2021 Memgraph Ltd.\n#\n# Use of this software is governed by the Business Source License\n# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source\n# License, and you may not use this file except in compliance with the Business Source License.\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0, included in the file\n# licenses/APL.txt.\n\nimport copy\nimport os\nimport subprocess\nimport sys\nimport time\n\nimport mgclient\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\nPROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, \"..\", \"..\"))\nBUILD_DIR = os.path.join(PROJECT_DIR, \"build\")\nMEMGRAPH_BINARY = os.path.join(BUILD_DIR, \"memgraph\")\n\n\ndef wait_for_server(port, delay=0.01):\n cmd = [\"nc\", \"-z\", \"-w\", \"1\", \"127.0.0.1\", str(port)]\n count = 0\n while subprocess.call(cmd) != 0:\n time.sleep(0.01)\n if count > 10 / 0.01:\n print(\"Could not wait for server on port\", port, \"to startup!\")\n sys.exit(1)\n count += 1\n time.sleep(delay)\n\n\ndef extract_bolt_port(args):\n for arg_index, arg in enumerate(args):\n if arg.startswith(\"--bolt-port=\"):\n maybe_port = arg.split(\"=\")[1]\n if not maybe_port.isdigit():\n raise Exception(\"Unable to read Bolt port after --bolt-port=.\")\n return int(maybe_port)\n elif arg == \"--bolt-port\":\n maybe_port = args[arg_index + 1]\n if not maybe_port.isdigit():\n raise Exception(\"Unable to read Bolt port after --bolt-port.\")\n return int(maybe_port)\n return 7687\n\n\ndef replace_paths(path):\n return path.replace(\"$PROJECT_DIR\", PROJECT_DIR).replace(\"$SCRIPT_DIR\", SCRIPT_DIR).replace(\"$BUILD_DIR\", BUILD_DIR)\n\n\nclass MemgraphInstanceRunner:\n def __init__(self, binary_path=MEMGRAPH_BINARY, use_ssl=False):\n self.host = \"127.0.0.1\"\n self.bolt_port = None\n self.binary_path = binary_path\n self.args = None\n self.proc_mg = None\n self.ssl = use_ssl\n\n def METHOD_NAME(self, setup_queries):\n if setup_queries is None:\n return\n # An assumption being database instance is fresh, no need for the auth.\n conn = mgclient.connect(host=self.host, port=self.bolt_port, sslmode=self.ssl)\n conn.autocommit = True\n cursor = conn.cursor()\n for query_coll in setup_queries:\n if isinstance(query_coll, str):\n cursor.execute(query_coll)\n elif isinstance(query_coll, list):\n for query in query_coll:\n cursor.execute(query)\n cursor.close()\n conn.close()\n\n # NOTE: Both query and get_connection may esablish new connection -> auth\n # details required -> username/password should be optional arguments.\n def query(self, query, conn=None, username=\"\", password=\"\"):\n new_conn = conn is None\n if new_conn:\n conn = self.get_connection(username, password)\n cursor = conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n cursor.close()\n if new_conn:\n conn.close()\n return data\n\n def get_connection(self, username=\"\", password=\"\"):\n conn = mgclient.connect(\n host=self.host, port=self.bolt_port, sslmode=self.ssl, username=username, password=password\n )\n conn.autocommit = True\n return conn\n\n def start(self, restart=False, args=None, setup_queries=None):\n if not restart and self.is_running():\n return\n self.stop()\n if args is not None:\n self.args = copy.deepcopy(args)\n self.args = [replace_paths(arg) for arg in self.args]\n args_mg = [\n self.binary_path,\n \"--storage-wal-enabled\",\n \"--storage-snapshot-interval-sec\",\n \"300\",\n \"--storage-properties-on-edges\",\n ] + self.args\n self.bolt_port = extract_bolt_port(args_mg)\n self.proc_mg = subprocess.Popen(args_mg)\n wait_for_server(self.bolt_port)\n self.METHOD_NAME(setup_queries)\n assert self.is_running(), \"The Memgraph process died!\"\n\n def is_running(self):\n if self.proc_mg is None:\n return False\n if self.proc_mg.poll() is not None:\n return False\n return True\n\n def stop(self):\n if not self.is_running():\n return\n self.proc_mg.terminate()\n code = self.proc_mg.wait()\n assert code == 0, \"The Memgraph process exited with non-zero!\"\n\n def kill(self):\n if not self.is_running():\n return\n self.proc_mg.kill()\n code = self.proc_mg.wait()\n assert code == -9, \"The killed Memgraph process exited with non-nine!\""},"code_compressed":{"kind":"null"}}},{"rowIdx":375,"cells":{"id":{"kind":"number","value":375,"string":"375"},"code":{"kind":"string","value":"# Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pipeline.hpc.logger import Logger\n\n\ndef _perform_command(action, msg, error_msg, skip_on_failure):\n Logger.info(msg)\n try:\n action()\n except RuntimeError as e:\n Logger.warn(error_msg)\n if not skip_on_failure:\n raise RuntimeError(error_msg, e)\n\n\nclass GridEngineType:\n\n SGE = \"SGE\"\n SLURM = \"SLURM\"\n\n def __init__(self):\n pass\n\n\nclass AllocationRuleParsingError(RuntimeError):\n pass\n\n\nclass AllocationRule:\n ALLOWED_VALUES = ['$pe_slots', '$fill_up', '$round_robin']\n\n def __init__(self, value):\n if value in AllocationRule.ALLOWED_VALUES:\n self.value = value\n else:\n raise AllocationRuleParsingError('Wrong AllocationRule value, only %s is available!' % AllocationRule.ALLOWED_VALUES)\n\n @staticmethod\n def pe_slots():\n return AllocationRule('$pe_slots')\n\n @staticmethod\n def fill_up():\n return AllocationRule('$fill_up')\n\n @staticmethod\n def round_robin():\n return AllocationRule('$round_robin')\n\n @staticmethod\n def fractional_rules():\n return [AllocationRule.round_robin(), AllocationRule.fill_up()]\n\n @staticmethod\n def integral_rules():\n return [AllocationRule.pe_slots()]\n\n def __eq__(self, other):\n if not isinstance(other, AllocationRule):\n # don't attempt to compare against unrelated types\n return False\n return other.value == self.value\n\n\nclass GridEngineJobState:\n RUNNING = 'running'\n PENDING = 'pending'\n SUSPENDED = 'suspended'\n ERROR = 'errored'\n DELETED = 'deleted'\n COMPLETED = 'completed'\n UNKNOWN = 'unknown'\n\n _letter_codes_to_states = {\n # Job statuses: [SGE] + [SLURM]\n RUNNING: ['r', 't', 'Rr', 'Rt'] + ['RUNNING'],\n PENDING: ['qw', 'qw', 'hqw', 'hqw', 'hRwq', 'hRwq', 'hRwq', 'qw', 'qw'] + ['PENDING'],\n SUSPENDED: ['s', 'ts', 'S', 'tS', 'T', 'tT', 'Rs', 'Rts', 'RS', 'RtS', 'RT', 'RtT'] + ['SUSPENDED', 'STOPPED'],\n ERROR: ['Eqw', 'Ehqw', 'EhRqw'] + ['DEADLINE', ' FAILED'],\n DELETED: ['dr', 'dt', 'dRr', 'dRt', 'ds', 'dS', 'dT', 'dRs', 'dRS', 'dRT'] + ['DELETED', 'CANCELLED'],\n COMPLETED: [] + ['COMPLETED', 'COMPLETING']\n }\n\n @staticmethod\n def from_letter_code(code):\n for key in GridEngineJobState._letter_codes_to_states:\n if code in GridEngineJobState._letter_codes_to_states[key]:\n return key\n return GridEngineJobState.UNKNOWN\n\n\nclass GridEngineJob:\n\n def __init__(self, id, root_id, name, user, state, datetime, hosts=None, cpu=0, gpu=0, mem=0, pe='local'):\n self.id = id\n self.root_id = root_id\n self.name = name\n self.user = user\n self.state = state\n self.datetime = datetime\n self.hosts = hosts if hosts else []\n self.cpu = cpu\n self.gpu = gpu\n self.mem = mem\n self.pe = pe\n\n def __repr__(self):\n return str(self.__dict__)\n\n\nclass GridEngine:\n\n def get_jobs(self):\n pass\n\n def disable_host(self, host):\n \"\"\"\n Disables host to prevent receiving new jobs from the queue.\n This command does not abort currently running jobs.\n\n :param host: Host to be enabled.\n \"\"\"\n pass\n\n def enable_host(self, host):\n \"\"\"\n Enables host to make it available to receive new jobs from the queue.\n\n :param host: Host to be enabled.\n \"\"\"\n pass\n\n def get_pe_allocation_rule(self, pe):\n \"\"\"\n Returns allocation rule of the pe\n\n :param pe: Parallel environment to return allocation rule.\n \"\"\"\n pass\n\n def delete_host(self, host, skip_on_failure=False):\n \"\"\"\n Completely deletes host from GE:\n 1. Shutdown host execution daemon.\n 2. Removes host from queue settings.\n 3. Removes host from host group.\n 4. Removes host from administrative hosts.\n 5. Removes host from GE.\n\n :param host: Host to be removed.\n :param skip_on_failure: Specifies if the host killing should be continued even if some of\n the commands has failed.\n \"\"\"\n pass\n\n def get_host_supplies(self):\n pass\n\n def METHOD_NAME(self, host):\n pass\n\n def get_engine_type(self):\n pass\n\n def is_valid(self, host):\n \"\"\"\n Validates host in GE checking corresponding execution host availability and its states.\n\n :param host: Host to be checked.\n :return: True if execution host is valid.\n \"\"\"\n return True\n\n def kill_jobs(self, jobs, force=False):\n \"\"\"\n Kills jobs in GE.\n\n :param jobs: Grid engine jobs.\n :param force: Specifies if this command should be performed with -f flag.\n \"\"\"\n pass\n\n\nclass GridEngineDemandSelector:\n\n def select(self, jobs):\n pass\n\n\nclass GridEngineJobValidator:\n\n def validate(self, jobs):\n pass"},"code_compressed":{"kind":"null"}}},{"rowIdx":376,"cells":{"id":{"kind":"number","value":376,"string":"376"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkvpc.endpoint import endpoint_data\n\nclass ListPublicIpAddressPoolsRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ListPublicIpAddressPools','vpc')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_ResourceOwnerId(self): # Long\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self, ResourceOwnerId): # Long\n\t\tself.add_query_param('ResourceOwnerId', ResourceOwnerId)\n\tdef get_Isp(self): # String\n\t\treturn self.get_query_params().get('Isp')\n\n\tdef set_Isp(self, Isp): # String\n\t\tself.add_query_param('Isp', Isp)\n\tdef get_ResourceGroupId(self): # String\n\t\treturn self.get_query_params().get('ResourceGroupId')\n\n\tdef set_ResourceGroupId(self, ResourceGroupId): # String\n\t\tself.add_query_param('ResourceGroupId', ResourceGroupId)\n\tdef get_NextToken(self): # String\n\t\treturn self.get_query_params().get('NextToken')\n\n\tdef set_NextToken(self, NextToken): # String\n\t\tself.add_query_param('NextToken', NextToken)\n\tdef get_DryRun(self): # Boolean\n\t\treturn self.get_query_params().get('DryRun')\n\n\tdef set_DryRun(self, DryRun): # Boolean\n\t\tself.add_query_param('DryRun', DryRun)\n\tdef get_PublicIpAddressPoolIdss(self): # RepeatList\n\t\treturn self.get_query_params().get('PublicIpAddressPoolIds')\n\n\tdef set_PublicIpAddressPoolIdss(self, PublicIpAddressPoolIds): # RepeatList\n\t\tfor depth1 in range(len(PublicIpAddressPoolIds)):\n\t\t\tself.add_query_param('PublicIpAddressPoolIds.' + str(depth1 + 1), PublicIpAddressPoolIds[depth1])\n\tdef get_ResourceOwnerAccount(self): # String\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String\n\t\tself.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)\n\tdef get_OwnerAccount(self): # String\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self, OwnerAccount): # String\n\t\tself.add_query_param('OwnerAccount', OwnerAccount)\n\tdef get_OwnerId(self): # Long\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self, OwnerId): # Long\n\t\tself.add_query_param('OwnerId', OwnerId)\n\tdef get_Tagss(self): # RepeatList\n\t\treturn self.get_query_params().get('Tags')\n\n\tdef set_Tagss(self, Tags): # RepeatList\n\t\tfor depth1 in range(len(Tags)):\n\t\t\tif Tags[depth1].get('Key') is not None:\n\t\t\t\tself.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))\n\t\t\tif Tags[depth1].get('Value') is not None:\n\t\t\t\tself.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_query_params().get('Name')\n\n\tdef set_Name(self, Name): # String\n\t\tself.add_query_param('Name', Name)\n\tdef get_MaxResults(self): # Integer\n\t\treturn self.get_query_params().get('MaxResults')\n\n\tdef set_MaxResults(self, MaxResults): # Integer\n\t\tself.add_query_param('MaxResults', MaxResults)\n\tdef get_Status(self): # String\n\t\treturn self.get_query_params().get('Status')\n\n\tdef set_Status(self, Status): # String\n\t\tself.add_query_param('Status', Status)"},"code_compressed":{"kind":"null"}}},{"rowIdx":377,"cells":{"id":{"kind":"number","value":377,"string":"377"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkecs.endpoint import endpoint_data\n\nclass DescribeDisksFullStatusRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDisksFullStatus','ecs')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_EventIds(self): # RepeatList\n\t\treturn self.get_query_params().get('EventId')\n\n\tdef set_EventIds(self, EventId): # RepeatList\n\t\tfor depth1 in range(len(EventId)):\n\t\t\tself.add_query_param('EventId.' + str(depth1 + 1), EventId[depth1])\n\tdef get_ResourceOwnerId(self): # Long\n\t\treturn self.get_query_params().get('ResourceOwnerId')\n\n\tdef set_ResourceOwnerId(self, ResourceOwnerId): # Long\n\t\tself.add_query_param('ResourceOwnerId', ResourceOwnerId)\n\tdef get_PageNumber(self): # Integer\n\t\treturn self.get_query_params().get('PageNumber')\n\n\tdef set_PageNumber(self, PageNumber): # Integer\n\t\tself.add_query_param('PageNumber', PageNumber)\n\tdef get_EventTimeStart(self): # String\n\t\treturn self.get_query_params().get('EventTime.Start')\n\n\tdef set_EventTimeStart(self, EventTimeStart): # String\n\t\tself.add_query_param('EventTime.Start', EventTimeStart)\n\tdef get_ResourceGroupId(self): # String\n\t\treturn self.get_query_params().get('ResourceGroupId')\n\n\tdef set_ResourceGroupId(self, ResourceGroupId): # String\n\t\tself.add_query_param('ResourceGroupId', ResourceGroupId)\n\tdef get_PageSize(self): # Integer\n\t\treturn self.get_query_params().get('PageSize')\n\n\tdef METHOD_NAME(self, PageSize): # Integer\n\t\tself.add_query_param('PageSize', PageSize)\n\tdef get_DiskIds(self): # RepeatList\n\t\treturn self.get_query_params().get('DiskId')\n\n\tdef set_DiskIds(self, DiskId): # RepeatList\n\t\tfor depth1 in range(len(DiskId)):\n\t\t\tself.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1])\n\tdef get_Tags(self): # RepeatList\n\t\treturn self.get_query_params().get('Tag')\n\n\tdef set_Tags(self, Tag): # RepeatList\n\t\tfor depth1 in range(len(Tag)):\n\t\t\tif Tag[depth1].get('Key') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))\n\t\t\tif Tag[depth1].get('Value') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))\n\tdef get_ResourceOwnerAccount(self): # String\n\t\treturn self.get_query_params().get('ResourceOwnerAccount')\n\n\tdef set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String\n\t\tself.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)\n\tdef get_OwnerAccount(self): # String\n\t\treturn self.get_query_params().get('OwnerAccount')\n\n\tdef set_OwnerAccount(self, OwnerAccount): # String\n\t\tself.add_query_param('OwnerAccount', OwnerAccount)\n\tdef get_OwnerId(self): # Long\n\t\treturn self.get_query_params().get('OwnerId')\n\n\tdef set_OwnerId(self, OwnerId): # Long\n\t\tself.add_query_param('OwnerId', OwnerId)\n\tdef get_EventTimeEnd(self): # String\n\t\treturn self.get_query_params().get('EventTime.End')\n\n\tdef set_EventTimeEnd(self, EventTimeEnd): # String\n\t\tself.add_query_param('EventTime.End', EventTimeEnd)\n\tdef get_HealthStatus(self): # String\n\t\treturn self.get_query_params().get('HealthStatus')\n\n\tdef set_HealthStatus(self, HealthStatus): # String\n\t\tself.add_query_param('HealthStatus', HealthStatus)\n\tdef get_EventType(self): # String\n\t\treturn self.get_query_params().get('EventType')\n\n\tdef set_EventType(self, EventType): # String\n\t\tself.add_query_param('EventType', EventType)\n\tdef get_Status(self): # String\n\t\treturn self.get_query_params().get('Status')\n\n\tdef set_Status(self, Status): # String\n\t\tself.add_query_param('Status', Status)"},"code_compressed":{"kind":"null"}}},{"rowIdx":378,"cells":{"id":{"kind":"number","value":378,"string":"378"},"code":{"kind":"string","value":"\"\"\"CustomFCNMaskHead for OTX template.\"\"\"\n# Copyright (C) 2023 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport torch\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.roi_heads.mask_heads.fcn_mask_head import FCNMaskHead\n\nfrom otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled\n\n\n@HEADS.register_module()\nclass CustomFCNMaskHead(FCNMaskHead):\n \"\"\"Custom FCN Mask Head for fast mask evaluation.\"\"\"\n\n def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale):\n \"\"\"Get segmentation masks from mask_pred and bboxes.\n\n The original `FCNMaskHead.get_seg_masks` grid sampled 28 x 28 masks to the original image resolution.\n As a result, the resized masks occupy a large amount of memory and slow down the inference.\n This method directly returns 28 x 28 masks and resize to bounding boxes size in post-processing step.\n Doing so can save memory and speed up the inference.\n\n Args:\n mask_pred (Tensor or ndarray): shape (n, #class, h, w).\n For single-scale testing, mask_pred is the direct output of\n model, whose type is Tensor, while for multi-scale testing,\n it will be converted to numpy array outside of this method.\n det_bboxes (Tensor): shape (n, 4/5)\n det_labels (Tensor): shape (n, )\n rcnn_test_cfg (dict): rcnn testing config\n ori_shape (Tuple): original image height and width, shape (2,)\n scale_factor(ndarray | Tensor): If ``rescale is True``, box\n coordinates are divided by this scale factor to fit\n ``ori_shape``.\n rescale (bool): If True, the resulting masks will be rescaled to\n ``ori_shape``.\n\n Returns:\n list[list]: encoded masks. The c-th item in the outer list\n corresponds to the c-th class. Given the c-th outer list, the\n i-th item in that inner list is the mask for the i-th box with\n class label c.\n\n \"\"\"\n if isinstance(mask_pred, torch.Tensor):\n mask_pred = mask_pred.sigmoid()\n else:\n # In AugTest, has been activated before\n mask_pred = det_bboxes.new_tensor(mask_pred)\n\n cls_segms = [[] for _ in range(self.num_classes)] # BG is not included in num_classes\n labels = det_labels\n\n N = len(mask_pred)\n # The actual implementation split the input into chunks,\n # and paste them chunk by chunk.\n\n threshold = rcnn_test_cfg.mask_thr_binary\n\n if not self.class_agnostic:\n mask_pred = mask_pred[range(N), labels][:, None]\n\n for i in range(N):\n mask = mask_pred[i]\n if threshold >= 0:\n mask = (mask >= threshold).to(dtype=torch.bool)\n else:\n # for visualization and debugging\n mask = (mask * 255).to(dtype=torch.uint8)\n mask = mask.detach().cpu().numpy()\n cls_segms[labels[i]].append(mask[0])\n return cls_segms\n\n def get_scaled_seg_masks(self, *args, **kwargs):\n \"\"\"Original method \"get_seg_mask\" from FCNMaskHead. Used in Semi-SL algorithm.\"\"\"\n return super().get_seg_masks(*args, **kwargs)\n\n\nif is_mmdeploy_enabled():\n from mmdeploy.core import FUNCTION_REWRITER\n\n @FUNCTION_REWRITER.register_rewriter(\n \"otx.algorithms.detection.adapters.mmdet.models.\" \"heads.custom_fcn_mask_head.CustomFCNMaskHead.get_seg_masks\"\n )\n def METHOD_NAME(\n ctx, self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, **kwargs\n ):\n \"\"\"Rewrite `get_seg_masks` of `FCNMaskHead` for default backend.\n\n Rewrite the get_seg_masks for only fcn_mask_head inference.\n\n Args:\n ctx (dict): context dict\n self (CustomFCNMaskHead): CustomFCNMaskHead instance\n mask_pred (Tensor): shape (n, #class, h, w).\n det_bboxes (Tensor): shape (n, 4/5)\n det_labels (Tensor): shape (n, )\n rcnn_test_cfg (dict): rcnn testing config\n ori_shape (Tuple): original image height and width, shape (2,)\n kwargs (dict): other arguments\n\n Returns:\n Tensor: a mask of shape (N, img_h, img_w).\n \"\"\"\n mask_pred = mask_pred.sigmoid()\n bboxes = det_bboxes[:, :4]\n labels = det_labels\n if not self.class_agnostic:\n box_inds = torch.arange(mask_pred.shape[0], device=bboxes.device)\n mask_pred = mask_pred[box_inds, labels][:, None]\n return mask_pred"},"code_compressed":{"kind":"null"}}},{"rowIdx":379,"cells":{"id":{"kind":"number","value":379,"string":"379"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\n\nclass UpdateTaskDetailRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'UpdateTaskDetail')\n\t\tself.set_method('POST')\n\n\tdef get_Note(self): # String\n\t\treturn self.get_body_params().get('Note')\n\n\tdef set_Note(self, Note): # String\n\t\tself.add_body_params('Note', Note)\n\tdef get_ExecutorId(self): # String\n\t\treturn self.get_body_params().get('ExecutorId')\n\n\tdef set_ExecutorId(self, ExecutorId): # String\n\t\tself.add_body_params('ExecutorId', ExecutorId)\n\tdef get_StartDate(self): # String\n\t\treturn self.get_body_params().get('StartDate')\n\n\tdef set_StartDate(self, StartDate): # String\n\t\tself.add_body_params('StartDate', StartDate)\n\tdef get_DelInvolvers(self): # String\n\t\treturn self.get_body_params().get('DelInvolvers')\n\n\tdef set_DelInvolvers(self, DelInvolvers): # String\n\t\tself.add_body_params('DelInvolvers', DelInvolvers)\n\tdef get_Content(self): # String\n\t\treturn self.get_body_params().get('Content')\n\n\tdef set_Content(self, Content): # String\n\t\tself.add_body_params('Content', Content)\n\tdef get_SprintId(self): # String\n\t\treturn self.get_body_params().get('SprintId')\n\n\tdef set_SprintId(self, SprintId): # String\n\t\tself.add_body_params('SprintId', SprintId)\n\tdef get_CustomFieldId(self): # String\n\t\treturn self.get_body_params().get('CustomFieldId')\n\n\tdef set_CustomFieldId(self, CustomFieldId): # String\n\t\tself.add_body_params('CustomFieldId', CustomFieldId)\n\tdef get_ProjectId(self): # String\n\t\treturn self.get_body_params().get('ProjectId')\n\n\tdef set_ProjectId(self, ProjectId): # String\n\t\tself.add_body_params('ProjectId', ProjectId)\n\tdef get_TaskId(self): # String\n\t\treturn self.get_body_params().get('TaskId')\n\n\tdef set_TaskId(self, TaskId): # String\n\t\tself.add_body_params('TaskId', TaskId)\n\tdef get_TaskFlowStatusId(self): # String\n\t\treturn self.get_body_params().get('TaskFlowStatusId')\n\n\tdef set_TaskFlowStatusId(self, TaskFlowStatusId): # String\n\t\tself.add_body_params('TaskFlowStatusId', TaskFlowStatusId)\n\tdef get_TagIds(self): # String\n\t\treturn self.get_body_params().get('TagIds')\n\n\tdef set_TagIds(self, TagIds): # String\n\t\tself.add_body_params('TagIds', TagIds)\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_body_params().get('AddInvolvers')\n\n\tdef set_AddInvolvers(self, AddInvolvers): # String\n\t\tself.add_body_params('AddInvolvers', AddInvolvers)\n\tdef get_Priority(self): # Long\n\t\treturn self.get_body_params().get('Priority')\n\n\tdef set_Priority(self, Priority): # Long\n\t\tself.add_body_params('Priority', Priority)\n\tdef get_OrgId(self): # String\n\t\treturn self.get_body_params().get('OrgId')\n\n\tdef set_OrgId(self, OrgId): # String\n\t\tself.add_body_params('OrgId', OrgId)\n\tdef get_DueDate(self): # String\n\t\treturn self.get_body_params().get('DueDate')\n\n\tdef set_DueDate(self, DueDate): # String\n\t\tself.add_body_params('DueDate', DueDate)\n\tdef get_WorkTimes(self): # Long\n\t\treturn self.get_body_params().get('WorkTimes')\n\n\tdef set_WorkTimes(self, WorkTimes): # Long\n\t\tself.add_body_params('WorkTimes', WorkTimes)\n\tdef get_StoryPoint(self): # String\n\t\treturn self.get_body_params().get('StoryPoint')\n\n\tdef set_StoryPoint(self, StoryPoint): # String\n\t\tself.add_body_params('StoryPoint', StoryPoint)\n\tdef get_CustomFieldValues(self): # String\n\t\treturn self.get_body_params().get('CustomFieldValues')\n\n\tdef set_CustomFieldValues(self, CustomFieldValues): # String\n\t\tself.add_body_params('CustomFieldValues', CustomFieldValues)"},"code_compressed":{"kind":"null"}}},{"rowIdx":380,"cells":{"id":{"kind":"number","value":380,"string":"380"},"code":{"kind":"string","value":"\"\"\"EmceeSampler class.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import List, Union\n\nimport numpy as np\n\nfrom ..problem import Problem\nfrom ..result import McmcPtResult\nfrom ..startpoint import UniformStartpoints, uniform\nfrom .sampler import Sampler, SamplerImportError\n\nlogger = logging.getLogger(__name__)\n\n\nclass EmceeSampler(Sampler):\n \"\"\"Use emcee for sampling.\n\n Wrapper around https://emcee.readthedocs.io/en/stable/, see there for\n details.\n \"\"\"\n\n def __init__(\n self,\n nwalkers: int = 1,\n sampler_args: dict = None,\n run_args: dict = None,\n ):\n \"\"\"\n Initialize sampler.\n\n Parameters\n ----------\n nwalkers:\n The number of walkers in the ensemble.\n sampler_args:\n Further keyword arguments that are passed on to\n ``emcee.EnsembleSampler.__init__``.\n run_args:\n Further keyword arguments that are passed on to\n ``emcee.EnsembleSampler.run_mcmc``.\n \"\"\"\n # check dependencies\n try:\n import emcee\n except ImportError:\n raise SamplerImportError(\"emcee\")\n\n super().__init__()\n self.nwalkers: int = nwalkers\n\n if sampler_args is None:\n sampler_args = {}\n self.sampler_args: dict = sampler_args\n\n if run_args is None:\n run_args = {}\n self.run_args: dict = run_args\n\n # set in initialize\n self.problem: Union[Problem, None] = None\n self.sampler: Union[emcee.EnsembleSampler, None] = None\n self.state: Union[emcee.State, None] = None\n\n def METHOD_NAME(\n self,\n center: np.ndarray,\n problem: Problem,\n epsilon: float = 1e-3,\n ):\n \"\"\"Get walker initial positions as samples from an epsilon ball.\n\n The ball is scaled in each direction according to the magnitude of the\n center in that direction.\n\n It is assumed that, because vectors are generated near a good point,\n all generated vectors are evaluable, so evaluability is not checked.\n\n Points that are generated outside the problem bounds will get shifted\n to lie on the edge of the problem bounds.\n\n Parameters\n ----------\n center:\n The center of the epsilon ball. The dimension should match the full\n dimension of the pyPESTO problem. This will be returned as the\n first position.\n problem:\n The pyPESTO problem.\n epsilon:\n The relative radius of the ball. e.g., if `epsilon=0.5`\n and the center of the first dimension is at 100, then the upper\n and lower bounds of the epsilon ball in the first dimension will\n be 150 and 50, respectively.\n \"\"\"\n # Epsilon ball\n lb = center * (1 - epsilon)\n ub = center * (1 + epsilon)\n\n # Adjust bounds to satisfy problem bounds\n lb[lb < problem.lb] = problem.lb[lb < problem.lb]\n ub[ub > problem.ub] = problem.ub[ub > problem.ub]\n\n # Sample initial positions\n initial_state_after_first = uniform(\n n_starts=self.nwalkers - 1,\n lb=lb,\n ub=ub,\n )\n\n # Include `center` in initial positions\n initial_state = np.row_stack(\n (\n center,\n initial_state_after_first,\n )\n )\n\n return initial_state\n\n def initialize(\n self,\n problem: Problem,\n x0: Union[np.ndarray, List[np.ndarray]],\n ) -> None:\n \"\"\"Initialize the sampler.\n\n It is recommended to initialize walkers\n\n Parameters\n ----------\n x0:\n The \"a priori preferred position\". e.g., an optimized parameter\n vector. https://emcee.readthedocs.io/en/stable/user/faq/\n The position of the first walker will be this, the remaining\n walkers will be assigned positions uniformly in a smaller ball\n around this vector.\n Alternatively, a set of vectors can be provided, which will be used\n to initialize walkers. In this case, any remaining walkers will be\n initialized at points sampled uniformly within the problem bounds.\n \"\"\"\n import emcee\n\n self.problem = problem\n\n # extract for pickling efficiency\n objective = self.problem.objective\n lb = self.problem.lb\n ub = self.problem.ub\n\n # parameter dimenstion\n ndim = len(self.problem.x_free_indices)\n\n def log_prob(x):\n \"\"\"Log-probability density function.\"\"\"\n # check if parameter lies within bounds\n if any(x < lb) or any(x > ub):\n return -np.inf\n # invert sign\n return -1.0 * objective(x)\n\n # initialize sampler\n self.sampler = emcee.EnsembleSampler(\n nwalkers=self.nwalkers,\n ndim=ndim,\n log_prob_fn=log_prob,\n **self.sampler_args,\n )\n\n # assign startpoints\n if self.state is None:\n if x0.ndim > 1 and len(x0.shape[0]) > 1:\n logger.warning(\n \"More than a single vector was provided to initialize the \"\n \"walker positions. If these vectors do not exist in a \"\n \"small ball around a high-probability position (e.g. \"\n \"optimized vector) then sampling may be inefficient (see \"\n \"emcee FAQ: \"\n \"https://emcee.readthedocs.io/en/stable/user/faq/ ).\"\n )\n # extract x0\n x0 = np.asarray(x0)\n if x0.ndim == 1:\n x0 = [x0]\n x0 = np.array([problem.get_full_vector(x) for x in x0])\n x_guesses_full0 = problem.x_guesses_full\n # add x0 to guesses\n problem.set_x_guesses(\n np.row_stack(\n (\n x0,\n problem.x_guesses_full,\n )\n )\n )\n # sample start points\n initial_state = UniformStartpoints(\n use_guesses=True,\n check_fval=True,\n check_grad=False,\n )(\n n_starts=self.nwalkers,\n problem=problem,\n )\n # restore original guesses\n problem.set_x_guesses(x_guesses_full0)\n else:\n initial_state = self.METHOD_NAME(\n center=x0,\n problem=problem,\n )\n\n self.state = initial_state\n\n def sample(self, n_samples: int, beta: float = 1.0) -> None:\n \"\"\"Return the most recent sample state.\"\"\"\n self.state = self.sampler.run_mcmc(\n initial_state=self.state,\n nsteps=n_samples,\n **self.run_args,\n )\n\n def get_samples(self) -> McmcPtResult:\n \"\"\"Get the samples into the fitting pypesto format.\"\"\"\n # all walkers are concatenated, yielding a flat array\n trace_x = np.array([self.sampler.get_chain(flat=True)])\n trace_neglogpost = np.array([-self.sampler.get_log_prob(flat=True)])\n # the sampler does not know priors\n trace_neglogprior = np.full(trace_neglogpost.shape, np.nan)\n # the walkers all run on temperature 1\n betas = np.array([1.0])\n\n result = McmcPtResult(\n trace_x=trace_x,\n trace_neglogpost=trace_neglogpost,\n trace_neglogprior=trace_neglogprior,\n betas=betas,\n )\n\n return result"},"code_compressed":{"kind":"null"}}},{"rowIdx":381,"cells":{"id":{"kind":"number","value":381,"string":"381"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkoceanbasepro.endpoint import endpoint_data\n\nclass DescribeSlowSQLListRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeSlowSQLList','oceanbase')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_StartTime(self): # String\n\t\treturn self.get_body_params().get('StartTime')\n\n\tdef set_StartTime(self, StartTime): # String\n\t\tself.add_body_params('StartTime', StartTime)\n\tdef METHOD_NAME(self): # Integer\n\t\treturn self.get_body_params().get('PageNumber')\n\n\tdef set_PageNumber(self, PageNumber): # Integer\n\t\tself.add_body_params('PageNumber', PageNumber)\n\tdef get_SearchRule(self): # String\n\t\treturn self.get_body_params().get('SearchRule')\n\n\tdef set_SearchRule(self, SearchRule): # String\n\t\tself.add_body_params('SearchRule', SearchRule)\n\tdef get_TenantId(self): # String\n\t\treturn self.get_body_params().get('TenantId')\n\n\tdef set_TenantId(self, TenantId): # String\n\t\tself.add_body_params('TenantId', TenantId)\n\tdef get_PageSize(self): # Integer\n\t\treturn self.get_body_params().get('PageSize')\n\n\tdef set_PageSize(self, PageSize): # Integer\n\t\tself.add_body_params('PageSize', PageSize)\n\tdef get_SearchParameter(self): # String\n\t\treturn self.get_body_params().get('SearchParameter')\n\n\tdef set_SearchParameter(self, SearchParameter): # String\n\t\tself.add_body_params('SearchParameter', SearchParameter)\n\tdef get_SortOrder(self): # String\n\t\treturn self.get_body_params().get('SortOrder')\n\n\tdef set_SortOrder(self, SortOrder): # String\n\t\tself.add_body_params('SortOrder', SortOrder)\n\tdef get_SearchValue(self): # String\n\t\treturn self.get_body_params().get('SearchValue')\n\n\tdef set_SearchValue(self, SearchValue): # String\n\t\tself.add_body_params('SearchValue', SearchValue)\n\tdef get_SQLId(self): # String\n\t\treturn self.get_body_params().get('SQLId')\n\n\tdef set_SQLId(self, SQLId): # String\n\t\tself.add_body_params('SQLId', SQLId)\n\tdef get_FilterCondition(self): # String\n\t\treturn self.get_body_params().get('FilterCondition')\n\n\tdef set_FilterCondition(self, FilterCondition): # String\n\t\tself.add_body_params('FilterCondition', FilterCondition)\n\tdef get_EndTime(self): # String\n\t\treturn self.get_body_params().get('EndTime')\n\n\tdef set_EndTime(self, EndTime): # String\n\t\tself.add_body_params('EndTime', EndTime)\n\tdef get_NodeIp(self): # String\n\t\treturn self.get_body_params().get('NodeIp')\n\n\tdef set_NodeIp(self, NodeIp): # String\n\t\tself.add_body_params('NodeIp', NodeIp)\n\tdef get_DbName(self): # String\n\t\treturn self.get_body_params().get('DbName')\n\n\tdef set_DbName(self, DbName): # String\n\t\tself.add_body_params('DbName', DbName)\n\tdef get_SearchKeyWord(self): # String\n\t\treturn self.get_body_params().get('SearchKeyWord')\n\n\tdef set_SearchKeyWord(self, SearchKeyWord): # String\n\t\tself.add_body_params('SearchKeyWord', SearchKeyWord)\n\tdef get_SortColumn(self): # String\n\t\treturn self.get_body_params().get('SortColumn')\n\n\tdef set_SortColumn(self, SortColumn): # String\n\t\tself.add_body_params('SortColumn', SortColumn)"},"code_compressed":{"kind":"null"}}},{"rowIdx":382,"cells":{"id":{"kind":"number","value":382,"string":"382"},"code":{"kind":"string","value":"from functools import partial\nfrom unittest import (\n TestCase,\n mock,\n)\n\nfrom lxml import etree\n\nfrom pcs.lib.cib.resource import primitive\nfrom pcs.lib.cib.tools import IdProvider\nfrom pcs.lib.resource_agent import ResourceAgentName\n\nfrom pcs_test.tools.assertions import assert_xml_equal\n\n\nclass FindPrimitivesByAgent(TestCase):\n # pylint: disable=protected-access\n def setUp(self):\n self.resources_section = etree.fromstring(\n \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n )\n\n def test_stonith(self):\n results = primitive._find_primitives_by_agent(\n self.resources_section,\n ResourceAgentName(\n \"stonith\",\n None,\n \"agent_type\",\n ),\n )\n expected_results = [\n '',\n '',\n ]\n self.assertEqual(len(expected_results), len(results))\n for i, res in enumerate(results):\n assert_xml_equal(expected_results[i], etree.tostring(res).decode())\n\n def test_with_provider(self):\n results = primitive._find_primitives_by_agent(\n self.resources_section,\n ResourceAgentName(\n \"standard\",\n \"provider\",\n \"agent_type\",\n ),\n )\n expected_results = [\n \"\"\"\"\"\",\n \"\"\"\"\"\",\n \"\"\"\"\"\",\n \"\"\"\"\"\",\n ]\n self.assertEqual(len(expected_results), len(results))\n for i, res in enumerate(results):\n assert_xml_equal(expected_results[i], etree.tostring(res).decode())\n\n\n@mock.patch(\"pcs.lib.cib.resource.primitive.append_new_instance_attributes\")\n@mock.patch(\"pcs.lib.cib.resource.primitive.append_new_meta_attributes\")\n@mock.patch(\"pcs.lib.cib.resource.primitive.create_operations\")\nclass AppendNew(TestCase):\n def setUp(self):\n self.resources_section = etree.fromstring(\"\")\n\n self.instance_attributes = {\"a\": \"b\"}\n self.meta_attributes = {\"c\": \"d\"}\n self.operation_list = [{\"name\": \"monitoring\"}]\n self.id_provider = IdProvider(self.resources_section)\n\n self.run = partial(\n primitive.append_new,\n self.resources_section,\n self.id_provider,\n instance_attributes=self.instance_attributes,\n meta_attributes=self.meta_attributes,\n operation_list=self.operation_list,\n )\n\n def check_mocks(\n self,\n primitive_element,\n create_operations,\n append_new_meta_attributes,\n append_new_instance_attributes,\n ):\n create_operations.assert_called_once_with(\n primitive_element, self.id_provider, self.operation_list\n )\n append_new_meta_attributes.assert_called_once_with(\n primitive_element, self.meta_attributes, self.id_provider\n )\n append_new_instance_attributes.assert_called_once_with(\n primitive_element, self.instance_attributes, self.id_provider\n )\n\n def METHOD_NAME(\n self,\n create_operations,\n append_new_meta_attributes,\n append_new_instance_attributes,\n ):\n primitive_element = self.run(\"RESOURCE_ID\", \"OCF\", None, \"DUMMY\")\n self.assertEqual(\n primitive_element, self.resources_section.find(\".//primitive\")\n )\n self.assertEqual(primitive_element.attrib[\"class\"], \"OCF\")\n self.assertEqual(primitive_element.attrib[\"type\"], \"DUMMY\")\n self.assertFalse(primitive_element.attrib.has_key(\"provider\"))\n\n self.check_mocks(\n primitive_element,\n create_operations,\n append_new_meta_attributes,\n append_new_instance_attributes,\n )\n\n def test_append_with_provider(\n self,\n create_operations,\n append_new_meta_attributes,\n append_new_instance_attributes,\n ):\n primitive_element = self.run(\"RESOURCE_ID\", \"OCF\", \"HEARTBEAT\", \"DUMMY\")\n self.assertEqual(\n primitive_element, self.resources_section.find(\".//primitive\")\n )\n self.assertEqual(primitive_element.attrib[\"class\"], \"OCF\")\n self.assertEqual(primitive_element.attrib[\"type\"], \"DUMMY\")\n self.assertEqual(primitive_element.attrib[\"provider\"], \"HEARTBEAT\")\n\n self.check_mocks(\n primitive_element,\n create_operations,\n append_new_meta_attributes,\n append_new_instance_attributes,\n )"},"code_compressed":{"kind":"null"}}},{"rowIdx":383,"cells":{"id":{"kind":"number","value":383,"string":"383"},"code":{"kind":"string","value":"from itertools import product\n\nfrom website.notifications.emails import compile_subscriptions\nfrom website.notifications import utils, constants\n\n\ndef get_file_subs_from_folder(addon, user, kind, path, name):\n \"\"\"Find the file tree under a specified folder.\"\"\"\n folder = dict(kind=kind, path=path, name=name)\n file_tree = addon._get_file_tree(filenode=folder, user=user, version='latest-published')\n return list_of_files(file_tree)\n\n\ndef list_of_files(file_object):\n files = []\n if file_object['kind'] == 'file':\n return [file_object['path']]\n else:\n for child in file_object['children']:\n files.extend(list_of_files(child))\n return files\n\n\ndef compile_user_lists(files, user, source_node, node):\n \"\"\"Take multiple file ids and compiles them.\n\n :param files: List of WaterButler paths\n :param user: User who initiated action/event\n :param source_node: Node instance from\n :param node: Node instance to\n :return: move, warn, and remove dicts\n \"\"\"\n # initialise subscription dictionaries\n move = {key: [] for key in constants.NOTIFICATION_TYPES}\n warn = {key: [] for key in constants.NOTIFICATION_TYPES}\n remove = {key: [] for key in constants.NOTIFICATION_TYPES}\n # get the node subscription\n if len(files) == 0:\n move, warn, remove = categorize_users(\n user, 'file_updated', source_node, 'file_updated', node\n )\n # iterate through file subscriptions\n for file_path in files:\n path = file_path.strip('/')\n t_move, t_warn, t_remove = categorize_users(\n user, path + '_file_updated', source_node,\n path + '_file_updated', node\n )\n # Add file subs to overall list of subscriptions\n for notification in constants.NOTIFICATION_TYPES:\n move[notification] = list(set(move[notification]).union(set(t_move[notification])))\n warn[notification] = list(set(warn[notification]).union(set(t_warn[notification])))\n remove[notification] = list(set(remove[notification]).union(set(t_remove[notification])))\n return move, warn, remove\n\n\ndef categorize_users(user, source_event, source_node, event, node):\n \"\"\"Categorize users from a file subscription into three categories.\n\n Puts users in one of three bins:\n - Moved: User has permissions on both nodes, subscribed to both\n - Warned: User has permissions on both, not subscribed to destination\n - Removed: Does not have permission on destination node\n :param user: User instance who started the event\n :param source_event: _event_name\n :param source_node: node from where the event happened\n :param event: new guid event name\n :param node: node where event ends up\n :return: Moved, to be warned, and removed users.\n \"\"\"\n remove = utils.users_to_remove(source_event, source_node, node)\n source_node_subs = compile_subscriptions(source_node, utils.find_subscription_type(source_event))\n new_subs = compile_subscriptions(node, utils.find_subscription_type(source_event), event)\n\n # Moves users into the warn bucket or the move bucket\n move = subscriptions_users_union(source_node_subs, new_subs)\n warn = subscriptions_users_difference(source_node_subs, new_subs)\n\n # Removes users without permissions\n warn, remove = METHOD_NAME(node, warn, remove)\n\n # Remove duplicates\n warn = subscriptions_users_remove_duplicates(warn, new_subs, remove_same=False)\n move = subscriptions_users_remove_duplicates(move, new_subs, remove_same=False)\n\n # Remove duplicates between move and warn; and move and remove\n move = subscriptions_users_remove_duplicates(move, warn, remove_same=True)\n move = subscriptions_users_remove_duplicates(move, remove, remove_same=True)\n\n for notifications in constants.NOTIFICATION_TYPES:\n # Remove the user who started this whole thing.\n user_id = user._id\n if user_id in warn[notifications]:\n warn[notifications].remove(user_id)\n if user_id in move[notifications]:\n move[notifications].remove(user_id)\n if user_id in remove[notifications]:\n remove[notifications].remove(user_id)\n\n return move, warn, remove\n\n\ndef METHOD_NAME(node, warn_subscription, remove_subscription):\n for notification in constants.NOTIFICATION_TYPES:\n subbed, removed = utils.separate_users(node, warn_subscription[notification])\n warn_subscription[notification] = subbed\n remove_subscription[notification].extend(removed)\n remove_subscription[notification] = list(set(remove_subscription[notification]))\n return warn_subscription, remove_subscription\n\n\ndef subscriptions_users_union(emails_1, emails_2):\n return {\n notification:\n list(\n set(emails_1[notification]).union(set(emails_2[notification]))\n )\n for notification in constants.NOTIFICATION_TYPES.keys()\n }\n\n\ndef subscriptions_users_difference(emails_1, emails_2):\n return {\n notification:\n list(\n set(emails_1[notification]).difference(set(emails_2[notification]))\n )\n for notification in constants.NOTIFICATION_TYPES.keys()\n }\n\n\ndef subscriptions_users_remove_duplicates(emails_1, emails_2, remove_same=False):\n emails_list = dict(emails_1)\n product_list = product(constants.NOTIFICATION_TYPES, repeat=2)\n for notification_1, notification_2 in product_list:\n if notification_2 == notification_1 and not remove_same or notification_2 == 'none':\n continue\n emails_list[notification_1] = list(\n set(emails_list[notification_1]).difference(set(emails_2[notification_2]))\n )\n return emails_list"},"code_compressed":{"kind":"null"}}},{"rowIdx":384,"cells":{"id":{"kind":"number","value":384,"string":"384"},"code":{"kind":"string","value":"from datetime import datetime\n\nfrom flask import g, request\nfrom flask_appbuilder import ModelRestApi\nfrom flask_appbuilder.api import expose, safe\nfrom flask_appbuilder.const import API_RESULT_RES_KEY\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.security.decorators import permission_name, protect\nfrom flask_appbuilder.security.sqla.apis.user.schema import (\n UserPostSchema,\n UserPutSchema,\n)\nfrom flask_appbuilder.security.sqla.models import Role, User\nfrom marshmallow import ValidationError\nfrom sqlalchemy.exc import IntegrityError\nfrom werkzeug.security import generate_password_hash\n\n\nclass UserApi(ModelRestApi):\n resource_name = \"security/users\"\n openapi_spec_tag = \"Security Users\"\n class_permission_name = \"User\"\n datamodel = SQLAInterface(User)\n allow_browser_login = True\n\n list_columns = [\n \"id\",\n \"roles.id\",\n \"roles.name\",\n \"first_name\",\n \"last_name\",\n \"username\",\n \"active\",\n \"email\",\n \"last_login\",\n \"login_count\",\n \"fail_login_count\",\n \"created_on\",\n \"changed_on\",\n \"created_by.id\",\n \"changed_by.id\",\n ]\n show_columns = list_columns\n add_columns = [\n \"roles\",\n \"first_name\",\n \"last_name\",\n \"username\",\n \"active\",\n \"email\",\n \"password\",\n ]\n edit_columns = add_columns\n search_columns = [\n \"username\",\n \"first_name\",\n \"last_name\",\n \"active\",\n \"email\",\n \"created_by\",\n \"changed_by\",\n \"roles\",\n ]\n\n add_model_schema = UserPostSchema()\n edit_model_schema = UserPutSchema()\n\n def METHOD_NAME(self, item):\n item.changed_on = datetime.now()\n item.changed_by_fk = g.user.id\n if item.password:\n item.password = generate_password_hash(item.password)\n\n def pre_add(self, item):\n item.password = generate_password_hash(item.password)\n\n @expose(\"/\", methods=[\"POST\"])\n @protect()\n @safe\n @permission_name(\"post\")\n def post(self):\n \"\"\"Create new user\n ---\n post:\n requestBody:\n description: Model schema\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.post'\n responses:\n 201:\n description: Item changed\n content:\n application/json:\n schema:\n type: object\n properties:\n result:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.post'\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n try:\n item = self.add_model_schema.load(request.json)\n model = User()\n roles = []\n for key, value in item.items():\n if key != \"roles\":\n setattr(model, key, value)\n else:\n for role_id in item[key]:\n role = (\n self.datamodel.session.query(Role)\n .filter(Role.id == role_id)\n .one_or_none()\n )\n if role:\n role.user_id = model.id\n role.role_id = role_id\n roles.append(role)\n\n if \"roles\" in item.keys():\n model.roles = roles\n\n self.pre_add(model)\n self.datamodel.add(model, raise_exception=True)\n return self.response(201, id=model.id)\n except ValidationError as error:\n return self.response_400(message=error.messages)\n except IntegrityError as e:\n return self.response_422(message=str(e.orig))\n\n @expose(\"/\", methods=[\"PUT\"])\n @protect()\n @safe\n @permission_name(\"put\")\n def put(self, pk):\n \"\"\"Edit user\n ---\n put:\n parameters:\n - in: path\n schema:\n type: integer\n name: pk\n requestBody:\n description: Model schema\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.put'\n responses:\n 200:\n description: Item changed\n content:\n application/json:\n schema:\n type: object\n properties:\n result:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.put'\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n try:\n item = self.edit_model_schema.load(request.json)\n model = self.datamodel.get(pk, self._base_filters)\n roles = []\n\n for key, value in item.items():\n if key != \"roles\":\n setattr(model, key, value)\n else:\n for role_id in item[key]:\n role = (\n self.datamodel.session.query(Role)\n .filter(Role.id == role_id)\n .one_or_none()\n )\n if role:\n role.user_id = model.id\n role.role_id = role_id\n roles.append(role)\n\n if \"roles\" in item.keys():\n model.roles = roles\n\n self.METHOD_NAME(model)\n self.datamodel.edit(model, raise_exception=True)\n return self.response(\n 200,\n **{API_RESULT_RES_KEY: self.edit_model_schema.dump(item, many=False)},\n )\n\n except ValidationError as e:\n return self.response_400(message=e.messages)\n except IntegrityError as e:\n return self.response_422(message=str(e.orig))"},"code_compressed":{"kind":"null"}}},{"rowIdx":385,"cells":{"id":{"kind":"number","value":385,"string":"385"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkecd.endpoint import endpoint_data\n\nclass ExportDesktopListInfoRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'ecd', '2020-09-30', 'ExportDesktopListInfo')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_OfficeSiteId(self): # String\n\t\treturn self.get_query_params().get('OfficeSiteId')\n\n\tdef set_OfficeSiteId(self, OfficeSiteId): # String\n\t\tself.add_query_param('OfficeSiteId', OfficeSiteId)\n\tdef get_DesktopStatus(self): # String\n\t\treturn self.get_query_params().get('DesktopStatus')\n\n\tdef set_DesktopStatus(self, DesktopStatus): # String\n\t\tself.add_query_param('DesktopStatus', DesktopStatus)\n\tdef get_NextToken(self): # String\n\t\treturn self.get_query_params().get('NextToken')\n\n\tdef set_NextToken(self, NextToken): # String\n\t\tself.add_query_param('NextToken', NextToken)\n\tdef get_EndUserIds(self): # RepeatList\n\t\treturn self.get_query_params().get('EndUserId')\n\n\tdef set_EndUserIds(self, EndUserId): # RepeatList\n\t\tfor depth1 in range(len(EndUserId)):\n\t\t\tself.add_query_param('EndUserId.' + str(depth1 + 1), EndUserId[depth1])\n\tdef get_DesktopIds(self): # RepeatList\n\t\treturn self.get_query_params().get('DesktopId')\n\n\tdef set_DesktopIds(self, DesktopId): # RepeatList\n\t\tfor depth1 in range(len(DesktopId)):\n\t\t\tself.add_query_param('DesktopId.' + str(depth1 + 1), DesktopId[depth1])\n\tdef get_Tags(self): # RepeatList\n\t\treturn self.get_query_params().get('Tag')\n\n\tdef set_Tags(self, Tag): # RepeatList\n\t\tfor depth1 in range(len(Tag)):\n\t\t\tif Tag[depth1].get('Value') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))\n\t\t\tif Tag[depth1].get('Key') is not None:\n\t\t\t\tself.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))\n\tdef get_DesktopName(self): # String\n\t\treturn self.get_query_params().get('DesktopName')\n\n\tdef set_DesktopName(self, DesktopName): # String\n\t\tself.add_query_param('DesktopName', DesktopName)\n\tdef get_GroupId(self): # String\n\t\treturn self.get_query_params().get('GroupId')\n\n\tdef set_GroupId(self, GroupId): # String\n\t\tself.add_query_param('GroupId', GroupId)\n\tdef get_ExpiredTime(self): # String\n\t\treturn self.get_query_params().get('ExpiredTime')\n\n\tdef set_ExpiredTime(self, ExpiredTime): # String\n\t\tself.add_query_param('ExpiredTime', ExpiredTime)\n\tdef get_MaxResults(self): # Integer\n\t\treturn self.get_query_params().get('MaxResults')\n\n\tdef set_MaxResults(self, MaxResults): # Integer\n\t\tself.add_query_param('MaxResults', MaxResults)\n\tdef get_LangType(self): # String\n\t\treturn self.get_query_params().get('LangType')\n\n\tdef set_LangType(self, LangType): # String\n\t\tself.add_query_param('LangType', LangType)\n\tdef get_ChargeType(self): # String\n\t\treturn self.get_query_params().get('ChargeType')\n\n\tdef set_ChargeType(self, ChargeType): # String\n\t\tself.add_query_param('ChargeType', ChargeType)\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_query_params().get('PolicyGroupId')\n\n\tdef set_PolicyGroupId(self, PolicyGroupId): # String\n\t\tself.add_query_param('PolicyGroupId', PolicyGroupId)\n\tdef get_UserName(self): # String\n\t\treturn self.get_query_params().get('UserName')\n\n\tdef set_UserName(self, UserName): # String\n\t\tself.add_query_param('UserName', UserName)"},"code_compressed":{"kind":"null"}}},{"rowIdx":386,"cells":{"id":{"kind":"number","value":386,"string":"386"},"code":{"kind":"string","value":"from datetime import datetime\nfrom unittest import mock\n\nimport pytest\nimport responses\n\nfrom api.share.utils import shtrove_ingest_url, sharev2_push_url\nfrom framework.auth.core import Auth\nfrom osf.models.spam import SpamStatus\nfrom osf.utils.permissions import READ, WRITE, ADMIN\nfrom osf_tests.factories import (\n AuthUserFactory,\n ProjectFactory,\n SubjectFactory,\n PreprintFactory,\n PreprintProviderFactory,\n)\nfrom website import settings\nfrom website.preprints.tasks import on_preprint_updated\nfrom ._utils import expect_preprint_ingest_request\n\n\n@pytest.mark.django_db\n@pytest.mark.enable_enqueue_task\nclass TestPreprintShare:\n @pytest.fixture(scope='class', autouse=True)\n def METHOD_NAME(self):\n with mock.patch.object(settings, 'USE_CELERY', False):\n yield\n\n @pytest.fixture\n def user(self):\n return AuthUserFactory()\n\n @pytest.fixture\n def auth(self, user):\n return Auth(user=user)\n\n @pytest.fixture\n def provider(self):\n return PreprintProviderFactory(\n name='Lars Larson Snowmobiling Experience',\n access_token='Snowmobiling'\n )\n\n @pytest.fixture\n def project(self, user, mock_share_responses):\n return ProjectFactory(creator=user, is_public=True)\n\n @pytest.fixture\n def subject(self):\n return SubjectFactory(text='Subject #1')\n\n @pytest.fixture\n def subject_two(self):\n return SubjectFactory(text='Subject #2')\n\n @pytest.fixture\n def preprint(self, project, user, provider, subject):\n return PreprintFactory(\n creator=user,\n filename='second_place.pdf',\n provider=provider,\n subjects=[[subject._id]],\n project=project,\n is_published=False\n )\n\n def test_save_unpublished_not_called(self, mock_share_responses, preprint):\n # expecting no ingest requests (delete or otherwise)\n with expect_preprint_ingest_request(mock_share_responses, preprint, count=0):\n preprint.save()\n\n def test_save_published_called(self, mock_share_responses, preprint, user, auth):\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.set_published(True, auth=auth, save=True)\n\n # This covers an edge case where a preprint is forced back to unpublished\n # that it sends the information back to share\n def test_save_unpublished_called_forced(self, mock_share_responses, auth, preprint):\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.set_published(True, auth=auth, save=True)\n with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):\n preprint.is_published = False\n preprint.save(**{'force_update': True})\n\n def test_save_published_subject_change_called(self, mock_share_responses, auth, preprint, subject, subject_two):\n preprint.set_published(True, auth=auth, save=True)\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.set_subjects([[subject_two._id]], auth=auth)\n\n def test_save_unpublished_subject_change_not_called(self, mock_share_responses, auth, preprint, subject_two):\n with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):\n preprint.set_subjects([[subject_two._id]], auth=auth)\n\n def test_send_to_share_is_true(self, mock_share_responses, auth, preprint):\n preprint.set_published(True, auth=auth, save=True)\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n on_preprint_updated(preprint._id, saved_fields=['title'])\n\n def test_preprint_contributor_changes_updates_preprints_share(self, mock_share_responses, user, auth):\n preprint = PreprintFactory(is_published=True, creator=user)\n preprint.set_published(True, auth=auth, save=True)\n user2 = AuthUserFactory()\n\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.add_contributor(contributor=user2, auth=auth, save=True)\n\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.move_contributor(contributor=user, index=0, auth=auth, save=True)\n\n data = [{'id': user._id, 'permissions': ADMIN, 'visible': True},\n {'id': user2._id, 'permissions': WRITE, 'visible': False}]\n\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.manage_contributors(data, auth=auth, save=True)\n\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.update_contributor(user2, READ, True, auth=auth, save=True)\n\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.remove_contributor(contributor=user2, auth=auth)\n\n def test_call_async_update_on_500_failure(self, mock_share_responses, preprint, auth):\n mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=500)\n mock_share_responses.replace(responses.POST, sharev2_push_url(), status=500)\n preprint.set_published(True, auth=auth, save=True)\n with expect_preprint_ingest_request(mock_share_responses, preprint, count=5):\n preprint.update_search()\n\n def test_no_call_async_update_on_400_failure(self, mock_share_responses, preprint, auth):\n mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=400)\n mock_share_responses.replace(responses.POST, sharev2_push_url(), status=400)\n preprint.set_published(True, auth=auth, save=True)\n with expect_preprint_ingest_request(mock_share_responses, preprint, count=1):\n preprint.update_search()\n\n def test_delete_from_share(self, mock_share_responses):\n preprint = PreprintFactory()\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.update_search()\n preprint.date_withdrawn = datetime.now()\n preprint.save()\n with expect_preprint_ingest_request(mock_share_responses, preprint):\n preprint.update_search()\n preprint.spam_status = SpamStatus.SPAM\n preprint.save()\n with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):\n preprint.update_search()"},"code_compressed":{"kind":"null"}}},{"rowIdx":387,"cells":{"id":{"kind":"number","value":387,"string":"387"},"code":{"kind":"string","value":"import pytest\n\npytestmark = [\n pytest.mark.django_db,\n pytest.mark.usefixtures(\"purchase\"),\n]\n\n\n@pytest.fixture\ndef METHOD_NAME(another_user, another_answer, question):\n another_answer.question = question\n another_answer.author = another_user\n another_answer.save()\n\n return another_answer\n\n\n@pytest.mark.freeze_time(\"2022-10-09 10:30:12+12:00\") # +12 hours kamchatka timezone\n@pytest.mark.usefixtures(\"kamchatka_timezone\")\ndef test_ok(api, question, answer):\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got[0]) == 9\n assert got[0][\"created\"] == \"2022-10-09T10:30:12+12:00\"\n assert got[0][\"modified\"] == \"2022-10-09T10:30:12+12:00\"\n assert got[0][\"slug\"] == str(answer.slug)\n assert got[0][\"question\"] == str(answer.question.slug)\n assert \"test\" in got[0][\"text\"]\n assert got[0][\"src\"] == \"*test*\"\n assert got[0][\"author\"][\"uuid\"] == str(api.user.uuid)\n assert got[0][\"author\"][\"first_name\"] == api.user.first_name\n assert got[0][\"author\"][\"last_name\"] == api.user.last_name\n assert got[0][\"has_descendants\"] is False\n assert got[0][\"reactions\"] == []\n\n\ndef test_has_reaction_fields_if_there_is_reaction(api, question, answer, reaction):\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n reactions = got[0][\"reactions\"]\n assert len(reactions[0]) == 4\n assert reactions[0][\"emoji\"] == reaction.emoji\n assert reactions[0][\"slug\"] == str(reaction.slug)\n assert reactions[0][\"answer\"] == str(reaction.answer.slug)\n assert reactions[0][\"author\"][\"uuid\"] == str(reaction.author.uuid)\n assert reactions[0][\"author\"][\"first_name\"] == reaction.author.first_name\n assert reactions[0][\"author\"][\"last_name\"] == reaction.author.last_name\n\n\ndef test_has_descendants_is_true_if_answer_has_children(api, question, answer, another_answer):\n another_answer.parent = answer\n another_answer.save()\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert got[0][\"has_descendants\"] is True\n\n\ndef test_nplusone(api, question, answer, another_answer, django_assert_num_queries, mixer):\n for _ in range(5):\n mixer.blend(\"homework.Reaction\", author=api.user, answer=answer)\n mixer.blend(\"homework.Reaction\", author=api.user, answer=another_answer)\n\n with django_assert_num_queries(7):\n api.get(f\"/api/v2/homework/answers/?question={question.slug}\")\n\n\n@pytest.mark.usefixtures(\"answer\")\ndef test_answers_from_other_questions_are_excluded(api, another_question):\n got = api.get(f\"/api/v2/homework/answers/?question={another_question.slug}\")[\"results\"]\n\n assert len(got) == 0\n\n\ndef test_non_root_answers_are_excluded(api, question, answer, METHOD_NAME):\n answer.parent = METHOD_NAME\n answer.save()\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got) == 1 # only answer_from_another_user\n assert got[0][\"slug\"] == str(METHOD_NAME.slug)\n\n\n@pytest.mark.usefixtures(\"answer\", \"answer_from_another_user\")\ndef test_answers_from_other_questions_are_excluded_even_if_user_has_the_permission(api, another_question):\n api.user.add_perm(\"homework.answer.see_all_answers\")\n\n got = api.get(f\"/api/v2/homework/answers/?question={another_question.slug}\")[\"results\"]\n\n assert len(got) == 0\n\n\n@pytest.mark.usefixtures(\"answer_from_another_user\")\ndef test_answers_from_another_authors_are_excluded(api, question):\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got) == 0\n\n\ndef test_answers_from_another_authors_are_included_if_already_seen(api, mixer, question, METHOD_NAME):\n mixer.blend(\"homework.AnswerAccessLogEntry\", user=api.user, answer=METHOD_NAME)\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got) == 1\n\n\ndef test_answers_from_another_authors_are_excluded_if_author_is_filtered(api, mixer, question, METHOD_NAME):\n mixer.blend(\"homework.AnswerAccessLogEntry\", user=api.user, answer=METHOD_NAME)\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}&author={api.user.uuid}\")[\"results\"]\n\n assert len(got) == 0\n\n\ndef test_access_log_entries_from_another_users_do_not_break_the_select(api, mixer, question, answer):\n mixer.cycle(5).blend(\"homework.AnswerAccessLogEntry\", question=question, answer=answer)\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got) == 1\n\n\n@pytest.mark.usefixtures(\"answer_from_another_user\")\ndef test_users_with_permission_may_see_all_answers(api, question):\n api.user.add_perm(\"homework.answer.see_all_answers\")\n\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}\")[\"results\"]\n\n assert len(got) == 1\n\n\ndef test_no_anon(anon, question):\n anon.get(f\"/api/v2/homework/answers/?question={question.slug}\", expected_status_code=401)\n\n\n@pytest.mark.parametrize(\n \"disable_pagination_value\",\n [\n \"True\",\n \"true\",\n \"1\",\n ],\n)\ndef test_pagination_could_be_disable_with_query_param(api, question, answer, disable_pagination_value):\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}\")\n\n assert len(got) == 1\n assert got[0][\"slug\"] == str(answer.slug)\n\n\n@pytest.mark.parametrize(\n \"disable_pagination_value\",\n [\n \"false\",\n \"False\",\n \"any-other-value\",\n ],\n)\ndef test_paginated_response_with_disable_pagination_false_or_invalid_value(api, question, answer, disable_pagination_value):\n got = api.get(f\"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}\")\n\n assert \"results\" in got\n assert \"count\" in got\n assert len(got[\"results\"]) == 1"},"code_compressed":{"kind":"null"}}},{"rowIdx":388,"cells":{"id":{"kind":"number","value":388,"string":"388"},"code":{"kind":"string","value":"from galaxy import model\nfrom galaxy.util.unittest import TestCase\nfrom galaxy.workflow import extract\n\nUNDEFINED_JOB = object()\n\n\nclass TestWorkflowExtractSummary(TestCase):\n def setUp(self):\n self.history = MockHistory()\n self.trans = MockTrans(self.history)\n\n def METHOD_NAME(self):\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert not job_dict\n\n def test_summarize_returns_name_and_dataset_list(self):\n # Create two jobs and three datasets, test they are groupped\n # by job correctly with correct output names.\n hda1 = MockHda()\n self.history.active_datasets.append(hda1)\n hda2 = MockHda(job=hda1.job, output_name=\"out2\")\n self.history.active_datasets.append(hda2)\n hda3 = MockHda(output_name=\"out3\")\n self.history.active_datasets.append(hda3)\n\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert len(job_dict) == 2\n assert not warnings\n assert job_dict[hda1.job] == [(\"out1\", hda1), (\"out2\", hda2)]\n assert job_dict[hda3.job] == [(\"out3\", hda3)]\n\n def test_finds_original_job_if_copied(self):\n hda = MockHda()\n derived_hda_1 = MockHda()\n derived_hda_1.copied_from_history_dataset_association = hda\n derived_hda_2 = MockHda()\n derived_hda_2.copied_from_history_dataset_association = derived_hda_1\n self.history.active_datasets.append(derived_hda_2)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n assert job_dict[hda.job] == [(\"out1\", derived_hda_2)]\n\n def test_fake_job_hda(self):\n \"\"\"Fakes job if creating_job_associations is empty.\"\"\"\n hda = MockHda(job=UNDEFINED_JOB)\n self.history.active_datasets.append(hda)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n fake_job = next(iter(job_dict.keys()))\n assert fake_job.id.startswith(\"fake_\")\n datasets = next(iter(job_dict.values()))\n assert datasets == [(None, hda)]\n\n def test_fake_job_hda_name_guess(self):\n hda_from_history = MockHda(job=UNDEFINED_JOB)\n hda_from_history.copied_from_history_dataset_association = MockHda(job=UNDEFINED_JOB)\n self.history.active_datasets.append(hda_from_history)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n fake_job = next(iter(job_dict.keys()))\n assert \"History\" in fake_job.name\n self.history.active_datasets.remove(hda_from_history)\n\n hda_from_library = MockHda(job=UNDEFINED_JOB)\n hda_from_library.copied_from_library_dataset_dataset_association = MockHda(job=UNDEFINED_JOB)\n self.history.active_datasets.append(hda_from_library)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n fake_job = next(iter(job_dict.keys()))\n assert \"Library\" in fake_job.name\n\n def test_fake_job_hdca(self):\n hdca = MockHdca()\n self.history.active_datasets.append(hdca)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n fake_job = next(iter(job_dict.keys()))\n assert fake_job.id.startswith(\"fake_\")\n assert fake_job.is_fake\n content_instances = next(iter(job_dict.values()))\n assert content_instances == [(None, hdca)]\n\n def test_implicit_map_job_hdca(self):\n creating_job = model.Job()\n hdca = MockHdca(implicit_output_name=\"out1\", job=creating_job)\n self.history.active_datasets.append(hdca)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert not warnings\n assert len(job_dict) == 1\n job = next(iter(job_dict.keys()))\n assert job is creating_job\n\n def test_warns_and_skips_datasets_if_not_finished(self):\n hda = MockHda(state=\"queued\")\n self.history.active_datasets.append(hda)\n job_dict, warnings = extract.summarize(trans=self.trans)\n assert warnings\n assert len(job_dict) == 0\n\n\nclass MockJobToOutputDatasetAssociation:\n job = None\n\n def __init__(self, name, dataset):\n self.name = name\n self.dataset = dataset\n\n\nclass MockHistory:\n def __init__(self):\n self.active_datasets = []\n\n @property\n def active_contents(self):\n return self.active_datasets\n\n\nclass MockTrans:\n def __init__(self, history):\n self.history = history\n\n def get_history(self):\n return self.history\n\n\nclass MockHda:\n def __init__(self, state=\"ok\", output_name=\"out1\", job=None):\n self.hid = 1\n self.id = 123\n self.state = state\n self.copied_from_history_dataset_association = None\n self.copied_from_library_dataset_dataset_association = None\n self.history_content_type = \"dataset\"\n if job is not UNDEFINED_JOB:\n if not job:\n job = model.Job()\n self.job = job\n assoc = MockJobToOutputDatasetAssociation(output_name, self)\n assoc.job = job\n self.creating_job_associations = [assoc]\n else:\n self.creating_job_associations = []\n\n\nclass MockHdca:\n def __init__(self, implicit_output_name=None, job=None, hid=1):\n self.id = 124\n self.copied_from_history_dataset_collection_association = None\n self.history_content_type = \"dataset_collection\"\n self.implicit_output_name = implicit_output_name\n self.hid = 1\n self.collection = model.DatasetCollection()\n self.creating_job_associations = []\n element = model.DatasetCollectionElement(\n collection=self.collection,\n element=model.HistoryDatasetAssociation(),\n element_index=0,\n element_identifier=\"moocow\",\n )\n element.dataset_instance.dataset = model.Dataset()\n element.dataset_instance.dataset.state = \"ok\"\n creating = model.JobToOutputDatasetAssociation(\n implicit_output_name,\n element.dataset_instance,\n )\n creating.job = job\n element.dataset_instance.creating_job_associations = [\n creating,\n ]"},"code_compressed":{"kind":"null"}}},{"rowIdx":389,"cells":{"id":{"kind":"number","value":389,"string":"389"},"code":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\nrequests.api\n~~~~~~~~~~~~\n\nThis module implements the Requests API.\n\n:copyright: (c) 2012 by Kenneth Reitz.\n:license: Apache2, see LICENSE for more details.\n\"\"\"\n\nfrom . import sessions\n\n\ndef request(method, url, **kwargs):\n \"\"\"Constructs and sends a :class:`Request `.\n\n :param method: method for the new :class:`Request` object.\n :param url: URL for the new :class:`Request` object.\n :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.\n :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.\n :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.\n :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.\n ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``\n or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string\n defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers\n to add for the file.\n :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.\n :param timeout: (optional) How many seconds to wait for the server to send data\n before giving up, as a float, or a :ref:`(connect timeout, read\n timeout) ` tuple.\n :type timeout: float or tuple\n :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.\n :type allow_redirects: bool\n :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.\n :param verify: (optional) Either a boolean, in which case it controls whether we verify\n the server's TLS certificate, or a string, in which case it must be a path\n to a CA bundle to use. Defaults to ``True``.\n :param stream: (optional) if ``False``, the response content will be immediately downloaded.\n :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.\n :return: :class:`Response ` object\n :rtype: requests.Response\n\n Usage::\n\n >>> import requests\n >>> req = requests.request('GET', 'http://httpbin.org/get')\n \n \"\"\"\n\n # By using the 'with' statement we are sure the session is closed, thus we\n # avoid leaving sockets open which can trigger a ResourceWarning in some\n # cases, and look like a memory leak in others.\n with sessions.Session() as session:\n return session.request(method=method, url=url, **kwargs)\n\n\ndef get(url, params=None, **kwargs):\n r\"\"\"Sends a GET request.\n\n :param url: URL for the new :class:`Request` object.\n :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return request('get', url, params=params, **kwargs)\n\n\ndef options(url, **kwargs):\n r\"\"\"Sends an OPTIONS request.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return request('options', url, **kwargs)\n\n\ndef METHOD_NAME(url, **kwargs):\n r\"\"\"Sends a HEAD request.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n kwargs.setdefault('allow_redirects', False)\n return request('head', url, **kwargs)\n\n\ndef post(url, data=None, json=None, **kwargs):\n r\"\"\"Sends a POST request.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n return request('post', url, data=data, json=json, **kwargs)\n\n\ndef put(url, data=None, **kwargs):\n r\"\"\"Sends a PUT request.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n return request('put', url, data=data, **kwargs)\n\n\ndef patch(url, data=None, **kwargs):\n r\"\"\"Sends a PATCH request.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n return request('patch', url, data=data, **kwargs)\n\n\ndef delete(url, **kwargs):\n r\"\"\"Sends a DELETE request.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n \"\"\"\n\n return request('delete', url, **kwargs)"},"code_compressed":{"kind":"null"}}},{"rowIdx":390,"cells":{"id":{"kind":"number","value":390,"string":"390"},"code":{"kind":"string","value":"# Copyright 2021-2023 AIPlan4EU project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom warnings import warn\nimport unified_planning as up\nfrom unified_planning.model.types import _UserType\nfrom unified_planning.exceptions import UPProblemDefinitionError, UPValueError\nfrom typing import List, Dict, Optional, cast\n\n\nclass UserTypesSetMixin:\n \"\"\"\n This class is a mixin that contains a `set` of `user types` with some related methods.\n\n NOTE: when this mixin is used in combination with other mixins that share some\n of the attributes (e.g. `has_name_method`), it is required to pass the very same\n arguments to the mixins constructors.\n \"\"\"\n\n def __init__(self, env, has_name_method):\n self._env = env\n self._has_name_method = has_name_method\n self._user_types: List[\"up.model.types.Type\"] = []\n # The field _user_types_hierarchy stores the information about the types and the list of their sons.\n self._user_types_hierarchy: Dict[\n Optional[\"up.model.types.Type\"], List[\"up.model.types.Type\"]\n ] = {}\n\n def _add_user_type(self, type: \"up.model.types.Type\"):\n \"\"\"This method adds a Type, together with all it's ancestors, to the user_types_hierarchy\"\"\"\n assert type.is_user_type()\n if type not in self._user_types:\n ut = cast(_UserType, type)\n if self._has_name_method(ut.name):\n msg = f\"The type name {ut.name} is already used in the problem! Different elements of a problem can have the same name if the environment flag error_used_name is disabled.\"\n if self._env.error_used_name or any(\n ut.name == cast(_UserType, t).name for t in self._user_types\n ):\n raise UPProblemDefinitionError(msg)\n else:\n warn(msg)\n if ut.father is not None:\n self._add_user_type(ut.father)\n self._user_types.append(type)\n\n @property\n def user_types(self) -> List[\"up.model.types.Type\"]:\n \"\"\"Returns the `list` of all the `user types` in the `problem`.\"\"\"\n return self._user_types\n\n def user_type(self, name: str) -> \"up.model.types.Type\":\n \"\"\"\n Returns the `user type` in the `problem` with the given `name`.\n\n :param name: The target `name` for the `type`.\n :return: The `type` in the `problem` with the given `name`.\n \"\"\"\n for ut in self.user_types:\n assert ut.is_user_type()\n if cast(_UserType, ut).name == name:\n return ut\n raise UPValueError(f\"UserType {name} is not defined!\")\n\n def METHOD_NAME(self, name: str) -> bool:\n \"\"\"\n Returns `True` if the `type` with the given `name` is defined in the\n `problem`, `False`, otherwise.\n\n :param name: The target `name` for the `type`.\n :return: `True` if a `type` with the given `name` is in the `problem`,\n `False` otherwise.\n \"\"\"\n for ut in self.user_types:\n assert ut.is_user_type()\n if cast(_UserType, ut).name == name:\n return True\n return False\n\n @property\n def user_types_hierarchy(\n self,\n ) -> Dict[Optional[\"up.model.types.Type\"], List[\"up.model.types.Type\"]]:\n \"\"\"\n Returns a `Dict` where every `key` represents an `Optional Type` and the `value`\n associated to the `key` is the `List` of the `direct sons` of the `Optional Type`.\n\n All the `user types` corresponding to the 'None' key are fatherless.\n \"\"\"\n res: Dict[Optional[\"up.model.types.Type\"], List[\"up.model.types.Type\"]] = {}\n for t in self._user_types:\n if t not in res:\n res[t] = []\n f = cast(_UserType, t).father\n if f not in res:\n res[f] = [t]\n else:\n res[f].append(t)\n return res\n\n def __eq__(self, other):\n return isinstance(other, UserTypesSetMixin) and set(self._user_types) == set(\n other._user_types\n )\n\n def __hash__(self):\n return sum(map(hash, self._user_types))\n\n def _clone_to(self, other: \"UserTypesSetMixin\"):\n other._user_types = self._user_types[:]\n other._user_types_hierarchy = self._user_types_hierarchy.copy()"},"code_compressed":{"kind":"null"}}},{"rowIdx":391,"cells":{"id":{"kind":"number","value":391,"string":"391"},"code":{"kind":"string","value":"#!/usr/bin/env python\n## @ CommonUtility.py\n# Common utility script\n#\n# Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.
    \n# SPDX-License-Identifier: BSD-2-Clause-Patent\n#\n##\n\n##\n# Import Modules\n#\nimport os\nimport sys\nimport re\nimport shutil\nimport subprocess\nimport struct\nimport hashlib\nimport string\nfrom ctypes import *\nfrom functools import reduce\nfrom importlib.machinery import SourceFileLoader\n\ndef print_bytes (data, indent=0, offset=0, show_ascii = False):\n bytes_per_line = 16\n printable = ' ' + string.ascii_letters + string.digits + string.punctuation\n str_fmt = '{:s}{:04x}: {:%ds} {:s}' % (bytes_per_line * 3)\n bytes_per_line\n data_array = bytearray(data)\n for idx in range(0, len(data_array), bytes_per_line):\n hex_str = ' '.join('%02X' % val for val in data_array[idx:idx + bytes_per_line])\n asc_str = ''.join('%c' % (val if (chr(val) in printable) else '.')\n for val in data_array[idx:idx + bytes_per_line])\n print (str_fmt.format(indent * ' ', offset + idx, hex_str, ' ' + asc_str if show_ascii else ''))\n\ndef get_bits_from_bytes (bytes, start, length):\n if length == 0:\n return 0\n byte_start = (start) // 8\n byte_end = (start + length - 1) // 8\n bit_start = start & 7\n mask = (1 << length) - 1\n val = bytes_to_value (bytes[byte_start:byte_end + 1])\n val = (val >> bit_start) & mask\n return val\n\ndef set_bits_to_bytes (bytes, start, length, bvalue):\n if length == 0:\n return\n byte_start = (start) // 8\n byte_end = (start + length - 1) // 8\n bit_start = start & 7\n mask = (1 << length) - 1\n val = bytes_to_value (bytes[byte_start:byte_end + 1])\n val &= ~(mask << bit_start)\n val |= ((bvalue & mask) << bit_start)\n bytes[byte_start:byte_end+1] = value_to_bytearray (val, byte_end + 1 - byte_start)\n\ndef value_to_bytes (value, length):\n return value.to_bytes(length, 'little')\n\ndef bytes_to_value (bytes):\n return int.from_bytes (bytes, 'little')\n\ndef value_to_bytearray (value, length):\n return bytearray(value_to_bytes(value, length))\n\ndef value_to_bytearray (value, length):\n return bytearray(value_to_bytes(value, length))\n\ndef get_aligned_value (value, alignment = 4):\n if alignment != (1 << (alignment.bit_length() - 1)):\n raise Exception ('Alignment (0x%x) should to be power of 2 !' % alignment)\n value = (value + (alignment - 1)) & ~(alignment - 1)\n return value\n\ndef get_padding_length (data_len, alignment = 4):\n new_data_len = get_aligned_value (data_len, alignment)\n return new_data_len - data_len\n\ndef METHOD_NAME (file, mode = 'rb'):\n return open(file, mode).read()\n\ndef gen_file_from_object (file, object):\n open (file, 'wb').write(object)\n\ndef gen_file_with_size (file, size):\n open (file, 'wb').write(b'\\xFF' * size);\n\ndef check_files_exist (base_name_list, dir = '', ext = ''):\n for each in base_name_list:\n if not os.path.exists (os.path.join (dir, each + ext)):\n return False\n return True\n\ndef load_source (name, filepath):\n mod = SourceFileLoader (name, filepath).load_module()\n return mod\n\ndef get_openssl_path ():\n if os.name == 'nt':\n if 'OPENSSL_PATH' not in os.environ:\n openssl_dir = \"C:\\\\Openssl\\\\bin\\\\\"\n if os.path.exists (openssl_dir):\n os.environ['OPENSSL_PATH'] = openssl_dir\n else:\n os.environ['OPENSSL_PATH'] = \"C:\\\\Openssl\\\\\"\n if 'OPENSSL_CONF' not in os.environ:\n openssl_cfg = \"C:\\\\Openssl\\\\openssl.cfg\"\n if os.path.exists(openssl_cfg):\n os.environ['OPENSSL_CONF'] = openssl_cfg\n openssl = os.path.join(os.environ.get ('OPENSSL_PATH', ''), 'openssl.exe')\n else:\n # Get openssl path for Linux cases\n openssl = shutil.which('openssl')\n\n return openssl\n\ndef run_process (arg_list, print_cmd = False, capture_out = False):\n sys.stdout.flush()\n if os.name == 'nt' and os.path.splitext(arg_list[0])[1] == '' and \\\n os.path.exists (arg_list[0] + '.exe'):\n arg_list[0] += '.exe'\n if print_cmd:\n print (' '.join(arg_list))\n\n exc = None\n result = 0\n output = ''\n try:\n if capture_out:\n output = subprocess.check_output(arg_list).decode()\n else:\n result = subprocess.call (arg_list)\n except Exception as ex:\n result = 1\n exc = ex\n\n if result:\n if not print_cmd:\n print ('Error in running process:\\n %s' % ' '.join(arg_list))\n if exc is None:\n sys.exit(1)\n else:\n raise exc\n\n return output"},"code_compressed":{"kind":"null"}}},{"rowIdx":392,"cells":{"id":{"kind":"number","value":392,"string":"392"},"code":{"kind":"string","value":"# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Type\n\nif TYPE_CHECKING:\n from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem\n\nlogger = logging.getLogger(__name__)\n\ndef aws_batch_batch_system_factory():\n from toil.batchSystems.awsBatch import AWSBatchBatchSystem\n return AWSBatchBatchSystem\n\ndef gridengine_batch_system_factory():\n from toil.batchSystems.gridengine import GridEngineBatchSystem\n return GridEngineBatchSystem\n\n\ndef parasol_batch_system_factory():\n from toil.batchSystems.parasol import ParasolBatchSystem\n return ParasolBatchSystem\n\n\ndef lsf_batch_system_factory():\n from toil.batchSystems.lsf import LSFBatchSystem\n return LSFBatchSystem\n\n\ndef single_machine_batch_system_factory():\n from toil.batchSystems.singleMachine import SingleMachineBatchSystem\n return SingleMachineBatchSystem\n\n\ndef mesos_batch_system_factory():\n from toil.batchSystems.mesos.batchSystem import MesosBatchSystem\n return MesosBatchSystem\n\n\ndef slurm_batch_system_factory():\n from toil.batchSystems.slurm import SlurmBatchSystem\n return SlurmBatchSystem\n\ndef tes_batch_system_factory():\n from toil.batchSystems.tes import TESBatchSystem\n return TESBatchSystem\n\ndef torque_batch_system_factory():\n from toil.batchSystems.torque import TorqueBatchSystem\n return TorqueBatchSystem\n\n\ndef htcondor_batch_system_factory():\n from toil.batchSystems.htcondor import HTCondorBatchSystem\n return HTCondorBatchSystem\n\n\ndef kubernetes_batch_system_factory():\n from toil.batchSystems.kubernetes import KubernetesBatchSystem\n return KubernetesBatchSystem\n\n\nBATCH_SYSTEM_FACTORY_REGISTRY: Dict[str, Callable[[], Type[\"AbstractBatchSystem\"]]] = {\n 'aws_batch' : aws_batch_batch_system_factory,\n 'parasol' : parasol_batch_system_factory,\n 'single_machine' : single_machine_batch_system_factory,\n 'grid_engine' : gridengine_batch_system_factory,\n 'lsf' : lsf_batch_system_factory,\n 'mesos' : mesos_batch_system_factory,\n 'slurm' : slurm_batch_system_factory,\n 'tes' : tes_batch_system_factory,\n 'torque' : torque_batch_system_factory,\n 'htcondor' : htcondor_batch_system_factory,\n 'kubernetes' : kubernetes_batch_system_factory\n}\nBATCH_SYSTEMS = list(BATCH_SYSTEM_FACTORY_REGISTRY.keys())\nDEFAULT_BATCH_SYSTEM = 'single_machine'\n\ndef addBatchSystemFactory(key: str, batchSystemFactory: Callable[[], Type['AbstractBatchSystem']]):\n \"\"\"\n Adds a batch system to the registry for workflow-supplied batch systems.\n \"\"\"\n BATCH_SYSTEMS.append(key)\n BATCH_SYSTEM_FACTORY_REGISTRY[key] = batchSystemFactory\n\n# We need a snapshot save/restore system for testing. We can't just tamper with\n# the globals because module-level globals are their own references, so we\n# can't touch this module's global name bindings from a client module.\n\ndef METHOD_NAME() -> Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]:\n \"\"\"\n Return a snapshot of the plugin registry that can be restored to remove\n added plugins. Useful for testing the plugin system in-process with other\n tests.\n \"\"\"\n\n snapshot = (list(BATCH_SYSTEMS), dict(BATCH_SYSTEM_FACTORY_REGISTRY))\n return snapshot\n\ndef restore_batch_system_plugin_state(snapshot: Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]):\n \"\"\"\n Restore the batch system registry state to a snapshot from\n save_batch_system_plugin_state().\n \"\"\"\n\n # We need to apply the snapshot without rebinding the names, because that\n # won't affect modules that imported the names.\n wanted_batch_systems, wanted_registry = snapshot\n BATCH_SYSTEMS.clear()\n BATCH_SYSTEMS.extend(wanted_batch_systems)\n BATCH_SYSTEM_FACTORY_REGISTRY.clear()\n BATCH_SYSTEM_FACTORY_REGISTRY.update(wanted_registry)"},"code_compressed":{"kind":"null"}}},{"rowIdx":393,"cells":{"id":{"kind":"number","value":393,"string":"393"},"code":{"kind":"string","value":"#/*##########################################################################\n# Copyright (C) 2004-2022 European Synchrotron Radiation Facility\n#\n# This file is part of the PyMca X-ray Fluorescence Toolkit developed at\n# the ESRF.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n#############################################################################*/\n__author__ = \"V.A. Sole - ESRF\"\n__contact__ = \"sole@esrf.fr\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__doc__ = \"\"\"\nThis plugin open a plot window with a browser to browse all images in\nthe stack.\n\nA averaging filter with a configurable width is provided, to display an\naverage of several consecutive frames rather than a single frame.\n\nThe plot has also mask tools synchronized with the mask in the primary\nwindow.\n\"\"\"\n\nimport logging\nfrom PyMca5 import StackPluginBase\n\nfrom PyMca5.PyMcaGui.pymca import StackBrowser\nfrom PyMca5.PyMcaGui import PyMca_Icons\n\n_logger = logging.getLogger(__name__)\n\n\nclass StackBrowserPlugin(StackPluginBase.StackPluginBase):\n def __init__(self, stackWindow, **kw):\n if _logger.getEffectiveLevel() == logging.DEBUG:\n StackPluginBase.pluginBaseLogger.setLevel(logging.DEBUG)\n StackPluginBase.StackPluginBase.__init__(self, stackWindow, **kw)\n self.methodDict = {'Show':[self.METHOD_NAME,\n \"Show Stack Image Browser\",\n PyMca_Icons.brushselect]}\n self.__methodKeys = ['Show']\n self.widget = None\n\n def stackUpdated(self):\n _logger.debug(\"StackBrowserPlugin.stackUpdated() called\")\n if self.widget is None:\n return\n if self.widget.isHidden():\n return\n stack = self.getStackDataObject()\n self.widget.setStackDataObject(stack, stack_name=\"Stack Index\")\n self.widget.setBackgroundImage(self._getBackgroundImage())\n mask = self.getStackSelectionMask()\n self.widget.setSelectionMask(mask)\n\n def _getBackgroundImage(self):\n images, names = self.getStackROIImagesAndNames()\n B = None\n for key in names:\n if key.endswith(\"ackground\"):\n B = images[names.index(key)]\n return B\n\n def selectionMaskUpdated(self):\n if self.widget is None:\n return\n if self.widget.isHidden():\n return\n mask = self.getStackSelectionMask()\n self.widget.setSelectionMask(mask)\n\n def stackROIImageListUpdated(self):\n if self.widget is None:\n return\n self.widget.setBackgroundImage(self._getBackgroundImage())\n\n def mySlot(self, ddict):\n _logger.debug(\"mySlot %s %s\", ddict['event'], ddict.keys())\n if ddict['event'] == \"selectionMaskChanged\":\n self.setStackSelectionMask(ddict['current'])\n elif ddict['event'] == \"addImageClicked\":\n self.addImage(ddict['image'], ddict['title'])\n elif ddict['event'] == \"removeImageClicked\":\n self.removeImage(ddict['title'])\n elif ddict['event'] == \"replaceImageClicked\":\n self.replaceImage(ddict['image'], ddict['title'])\n elif ddict['event'] == \"resetSelection\":\n self.setStackSelectionMask(None)\n\n #Methods implemented by the plugin\n def getMethods(self):\n return self.__methodKeys\n\n def getMethodToolTip(self, name):\n return self.methodDict[name][1]\n\n def getMethodPixmap(self, name):\n return self.methodDict[name][2]\n\n def applyMethod(self, name):\n return self.methodDict[name][0]()\n\n def METHOD_NAME(self):\n if self.widget is None:\n self.widget = StackBrowser.StackBrowser(parent=None,\n rgbwidget=None,\n selection=True,\n colormap=True,\n imageicons=True,\n standalonesave=True,\n profileselection=True)\n self.widget.setSelectionMode(True)\n qt = StackBrowser.qt\n self.widget.sigMaskImageWidgetSignal.connect(self.mySlot)\n\n #Show\n self.widget.show()\n self.widget.raise_()\n\n #update\n self.stackUpdated()\n\n\nMENU_TEXT = \"Stack Image Browser\"\ndef getStackPluginInstance(stackWindow, **kw):\n ob = StackBrowserPlugin(stackWindow)\n return ob"},"code_compressed":{"kind":"null"}}},{"rowIdx":394,"cells":{"id":{"kind":"number","value":394,"string":"394"},"code":{"kind":"string","value":"#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n\n'''\nCopyright (c) 2023, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\nDESCRIPTION\nSee docstring for FirefoxGetLocaleAndVersion class\n'''\n\n# Standard imports\nimport configparser\nimport glob\nimport os\nimport plistlib\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib.DmgMounter import DmgMounter\nfrom autopkglib import ProcessorError\n\n\n__all__ = ['FirefoxGetLocaleAndVersion']\n__version__ = '1.0'\n\n\n# pylint: disable = too-few-public-methods\nclass FirefoxGetLocaleAndVersion(DmgMounter):\n '''\n Returns the locale and version of the Firefox.app passed to dmg_path\n\n Raising if Firefox.app not located at dmg_path.\n\n Based off of:\n https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/AppDmgVersioner.py#L69-L86\n '''\n\n description = __doc__\n input_variables = {\n 'choosen_locale': {\n 'required': True,\n 'description': ('Value of LOCALE in the override.'),\n },\n 'dmg_path': {\n 'required': True,\n 'description': ('Path to the downloaded DMG.'),\n }\n }\n\n output_variables = {\n 'app_locale': {\n 'description': ('Locale of the .app.'),\n },\n 'app_version': {\n 'description': ('Version of the .app.'),\n },\n }\n\n\n\n def METHOD_NAME(self, path):\n '''\n Find app bundle at path\n '''\n\n # Look for any .app in the mounted dmg\n apps = glob.glob(os.path.join(path, \"*.app\"))\n\n # Raise if no .app found\n if len(apps) == 0:\n raise ProcessorError(\"No app found in dmg\")\n\n # Return 1st found .app only\n return apps[0]\n\n\n def main(self):\n '''\n See docstring for the FirefoxGetLocaleAndVersion class\n '''\n\n # Mount the image.\n mount_point = self.mount(self.env[\"dmg_path\"])\n\n # Wrap all other actions in a try/finally so the image is always\n # unmounted.\n try:\n\n # Get the path the the .app in the DMG, raise if no .app found\n app_path = self.METHOD_NAME(mount_point)\n self.output(f\"app_path = {app_path}\")\n\n # Get the path to locale.ini, if doesn't exist and LOCALE is en-US we're good\n app_locale_ini = os.path.join(app_path, 'Contents/Resources/locale.ini')\n self.output(f\"Looking for {app_locale_ini}...\")\n\n # Get the .app's locale, if app_locale_ini exists\n if os.path.exists(app_locale_ini):\n # Progress notification\n self.output(f\"Found: {app_locale_ini}...\")\n # Try Read in the locale, raise if cannot be parsed\n try:\n # Create confgparser object\n app_config = configparser.ConfigParser()\n app_config.read(app_locale_ini)\n # Setting app_locale\n self.env['app_locale'] = app_config['locale']['locale']\n # Raise if app_locale cannot be retrieved from app_locale_ini\n except Exception as locale_parse_error:\n raise ProcessorError(\"Cannot determine app_locale\") from locale_parse_error\n # en-US doesn't have a app_locale_ini, so if selected then\n elif self.env[\"choosen_locale\"] == 'en-US':\n # Setting app_locale\n self.env['app_locale'] = 'en-US'\n self.output(f\"Setting app_locale to \\\"en-US\\\", as {app_locale_ini} does \"\n f\"not exist for the \\\"en-US\\\" locale\")\n # Raise if we can't find app_locale_ini and choosen_locale isn't en-US\n else:\n raise ProcessorError(f\"Cannot find {app_locale_ini}\")\n\n # Progress notification\n self.output(f\"app_locale: {self.env['app_locale']}\")\n # Now we need to get the version\n app_info_plist = os.path.join(app_path, 'Contents/Info.plist')\n\n # If the info.plist exists\n if os.path.exists(app_info_plist):\n # Try to read in app_info_plist, raise if cannot be parsed\n try:\n # Read in the plist\n with open(app_info_plist, \"rb\") as plist_file:\n parsed_plist = plistlib.load(plist_file)\n # Get version from info.plist\n self.env['app_version'] = parsed_plist['CFBundleShortVersionString']\n self.output(f\"app_version: {self.env['app_version']}\")\n # Raising if plist cannot be parsed or version determined from plist\n except Exception as info_plist_error:\n raise ProcessorError(f\"Cannot parse {app_info_plist}\") from info_plist_error\n # Raise if we can't find app_info_plist\n else:\n raise ProcessorError(f\"Cannot find {app_info_plist}\")\n\n # Unmount the dmg\n finally:\n self.output(\"unmounting...\")\n self.unmount(self.env[\"dmg_path\"])\n\nif __name__ == '__main__':\n PROCESSOR = FirefoxGetLocaleAndVersion()"},"code_compressed":{"kind":"null"}}},{"rowIdx":395,"cells":{"id":{"kind":"number","value":395,"string":"395"},"code":{"kind":"string","value":"import logging\nimport os\nimport subprocess\nimport time\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional\n\nfrom fastapi import FastAPI, HTTPException\n\nfrom meerkat.interactive.server import Server\nfrom meerkat.tools.utils import WeakMapping\n\nif TYPE_CHECKING:\n from meerkat.interactive.modification import Modification\n from meerkat.mixins.identifiable import IdentifiableMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Secrets:\n api_keys: Dict[str, str] = field(default_factory=dict)\n\n def add(self, api: str, api_key: str):\n self.api_keys[api] = api_key\n\n def get(self, api: str):\n try:\n return self.api_keys[api]\n except KeyError:\n raise HTTPException(\n status_code=404,\n detail=f\"No API key found for {api}.\\\n Add one with `secrets.add(api, api_key)`.\",\n )\n\n\n@dataclass\nclass LanguageModel:\n manifest: Any = None\n\n def set(self, client: str = \"ai21\", engine: str = \"j1-jumbo\"):\n from manifest import Manifest\n\n self.manifest = Manifest(\n client_name=client,\n client_connection=state.secrets.get(client),\n engine=engine,\n cache_name=\"sqlite\",\n cache_connection=\"./logs\",\n )\n\n def get(self):\n return self.manifest\n\n\n@dataclass\nclass APIInfo:\n api: Optional[FastAPI]\n port: Optional[int]\n server: Optional[Server] = None\n name: str = \"localhost\"\n shared: bool = False\n process: Optional[subprocess.Popen] = None\n _url: Optional[str] = None\n\n @property\n def url(self):\n if self._url:\n return self._url\n if self.shared:\n return f\"http://{self.name}\"\n return f\"http://{self.name}:{self.port}\"\n\n @property\n def METHOD_NAME(self):\n return f\"{self.url}/docs\"\n\n @property\n def docs(self):\n from IPython.display import IFrame\n\n return IFrame(self.METHOD_NAME, width=800, height=600)\n\n\n@dataclass\nclass FrontendInfo:\n package_manager: Optional[str]\n port: Optional[int]\n name: str = \"localhost\"\n shared: bool = False\n process: Optional[subprocess.Popen] = None\n _url: Optional[str] = None\n\n @property\n def url(self):\n if self._url:\n return self._url\n if self.shared:\n return f\"http://{self.name}\"\n return f\"http://{self.name}:{self.port}\"\n\n\n@dataclass\nclass Identifiables:\n \"\"\"We maintain a separate group for each type of identifiable object.\n\n Objects in the group are identified by a unique id.\n \"\"\"\n\n columns: WeakMapping = field(default_factory=WeakMapping)\n dataframes: WeakMapping = field(default_factory=WeakMapping)\n pages: Mapping = field(default_factory=dict)\n slicebys: WeakMapping = field(default_factory=WeakMapping)\n aggregations: WeakMapping = field(default_factory=WeakMapping)\n box_operations: WeakMapping = field(default_factory=WeakMapping)\n components: WeakMapping = field(default_factory=WeakMapping)\n refs: WeakMapping = field(default_factory=WeakMapping)\n stores: WeakMapping = field(default_factory=WeakMapping)\n endpoints: WeakMapping = field(default_factory=WeakMapping)\n routers: WeakMapping = field(default_factory=WeakMapping)\n nodes: WeakMapping = field(default_factory=WeakMapping)\n states: WeakMapping = field(default_factory=WeakMapping)\n\n def add(self, obj: \"IdentifiableMixin\"):\n group = getattr(self, obj.identifiable_group)\n group[obj.id] = obj\n\n def get(self, id: str, group: str):\n group, group_name = getattr(self, group), group\n try:\n value = group[id]\n except KeyError:\n raise HTTPException(\n status_code=404,\n detail=f\"No object in group '{group_name}' with id '{id}'\",\n )\n return value\n\n\n@dataclass\nclass ModificationQueue:\n \"\"\"A queue of modifications to be applied to a dataframe.\"\"\"\n\n queue: List[\"Modification\"] = field(default_factory=list)\n\n # Boolean attribute that controls whether the queue is accepting new\n # modifications\n # When _ready is False, `add` will no-op\n _ready: bool = False\n\n def add(self, modification: \"Modification\"):\n if self._ready:\n logger.debug(f\"Adding modification {modification} to queue.\")\n self.queue.append(modification)\n return\n # Do nothing if not ready\n logger.debug(f\"Modification queue not ready. Ignoring {modification}.\")\n\n def clear(self) -> List[\"Modification\"]:\n \"\"\"Clear the modification queue, and return the old queue.\"\"\"\n logger.debug(\"Clearing modification queue.\")\n current_queue = self.queue\n self.queue = []\n return current_queue\n\n def ready(self):\n \"\"\"Ready the queue for accepting new modifications.\"\"\"\n count = 0\n while self._ready:\n # Modification queue is already in use\n # Wait for it to be unready\n logger.debug(\"Modification queue is already in use. Waiting...\")\n time.sleep(0.1)\n count += 1\n if count == 1e-3:\n logger.warn(\n \"Modification queue is taking a long time to unready.\"\n \"Check for deadlocks.\"\n )\n\n self._ready = True\n logger.debug(\"Modification queue is now ready.\")\n\n def unready(self):\n \"\"\"Unready the queue for accepting new modifications.\"\"\"\n self._ready = False\n logger.debug(\"Modification queue is now unready.\")\n\n\n@dataclass\nclass ProgressQueue:\n \"\"\"A queue of progress messages to be displayed to the user.\"\"\"\n\n queue: list = field(default_factory=list)\n\n def add(self, message: str):\n self.queue.append(message)\n\n def clear(self) -> list:\n \"\"\"Clear the progress queue, and return the old queue.\"\"\"\n current_queue = self.queue\n self.queue = []\n return current_queue\n\n\n@dataclass\nclass GlobalState:\n api_info: Optional[APIInfo] = None\n frontend_info: Optional[FrontendInfo] = None\n identifiables: Identifiables = field(default_factory=Identifiables)\n secrets: Secrets = field(default_factory=Secrets)\n llm: LanguageModel = field(default_factory=LanguageModel)\n modification_queue: ModificationQueue = field(default_factory=ModificationQueue)\n progress_queue: ProgressQueue = field(default_factory=ProgressQueue)\n\n\nglobal state\nstate = GlobalState()\n\n\ndef add_secret(api: str, api_key: str):\n \"\"\"Add an API key to the global state.\"\"\"\n state.secrets.add(api, api_key)\n\n\ndef run_on_startup():\n \"\"\"Run on startup.\"\"\"\n frontend_url = os.environ.get(\"MEERKAT_FRONTEND_URL\", None)\n if frontend_url:\n state.frontend_info = FrontendInfo(None, None, _url=frontend_url)\n\n api_url = os.environ.get(\"MEERKAT_API_URL\", None)\n if api_url:\n state.api_info = APIInfo(None, None, _url=api_url)\n\n\nrun_on_startup()"},"code_compressed":{"kind":"null"}}},{"rowIdx":396,"cells":{"id":{"kind":"number","value":396,"string":"396"},"code":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkidaas_doraemon.endpoint import endpoint_data\n\nclass VerifyUserAuthenticationRequest(RpcRequest):\n\n\tdef __init__(self):\n\t\tRpcRequest.__init__(self, 'idaas-doraemon', '2021-05-20', 'VerifyUserAuthentication')\n\t\tself.set_protocol_type('https')\n\t\tself.set_method('POST')\n\n\t\tif hasattr(self, \"endpoint_map\"):\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\n\t\tif hasattr(self, \"endpoint_regional\"):\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\n\n\tdef get_LogParams(self): # String\n\t\treturn self.get_query_params().get('LogParams')\n\n\tdef set_LogParams(self, LogParams): # String\n\t\tself.add_query_param('LogParams', LogParams)\n\tdef METHOD_NAME(self): # String\n\t\treturn self.get_query_params().get('ClientExtendParamsJson')\n\n\tdef set_ClientExtendParamsJson(self, ClientExtendParamsJson): # String\n\t\tself.add_query_param('ClientExtendParamsJson', ClientExtendParamsJson)\n\tdef get_UserId(self): # String\n\t\treturn self.get_query_params().get('UserId')\n\n\tdef set_UserId(self, UserId): # String\n\t\tself.add_query_param('UserId', UserId)\n\tdef get_LogTag(self): # String\n\t\treturn self.get_query_params().get('LogTag')\n\n\tdef set_LogTag(self, LogTag): # String\n\t\tself.add_query_param('LogTag', LogTag)\n\tdef get_ServerExtendParamsJson(self): # String\n\t\treturn self.get_query_params().get('ServerExtendParamsJson')\n\n\tdef set_ServerExtendParamsJson(self, ServerExtendParamsJson): # String\n\t\tself.add_query_param('ServerExtendParamsJson', ServerExtendParamsJson)\n\tdef get_RequireBindHashBase64(self): # String\n\t\treturn self.get_query_params().get('RequireBindHashBase64')\n\n\tdef set_RequireBindHashBase64(self, RequireBindHashBase64): # String\n\t\tself.add_query_param('RequireBindHashBase64', RequireBindHashBase64)\n\tdef get_AuthenticationContext(self): # String\n\t\treturn self.get_query_params().get('AuthenticationContext')\n\n\tdef set_AuthenticationContext(self, AuthenticationContext): # String\n\t\tself.add_query_param('AuthenticationContext', AuthenticationContext)\n\tdef get_RequireChallengeBase64(self): # String\n\t\treturn self.get_query_params().get('RequireChallengeBase64')\n\n\tdef set_RequireChallengeBase64(self, RequireChallengeBase64): # String\n\t\tself.add_query_param('RequireChallengeBase64', RequireChallengeBase64)\n\tdef get_AuthenticatorType(self): # String\n\t\treturn self.get_query_params().get('AuthenticatorType')\n\n\tdef set_AuthenticatorType(self, AuthenticatorType): # String\n\t\tself.add_query_param('AuthenticatorType', AuthenticatorType)\n\tdef get_ClientExtendParamsJsonSign(self): # String\n\t\treturn self.get_query_params().get('ClientExtendParamsJsonSign')\n\n\tdef set_ClientExtendParamsJsonSign(self, ClientExtendParamsJsonSign): # String\n\t\tself.add_query_param('ClientExtendParamsJsonSign', ClientExtendParamsJsonSign)\n\tdef get_UserSourceIp(self): # String\n\t\treturn self.get_query_params().get('UserSourceIp')\n\n\tdef set_UserSourceIp(self, UserSourceIp): # String\n\t\tself.add_query_param('UserSourceIp', UserSourceIp)\n\tdef get_ApplicationExternalId(self): # String\n\t\treturn self.get_query_params().get('ApplicationExternalId')\n\n\tdef set_ApplicationExternalId(self, ApplicationExternalId): # String\n\t\tself.add_query_param('ApplicationExternalId', ApplicationExternalId)"},"code_compressed":{"kind":"null"}}},{"rowIdx":397,"cells":{"id":{"kind":"number","value":397,"string":"397"},"code":{"kind":"string","value":"# This script compares an on-going release notes file with published release notes files.\n# If the ongoing release notes file has a duplicate note with the published one, the script reports the note and replaces it with the published one.\n\nimport re, os\nfrom tempfile import mkstemp\nfrom shutil import move\nfrom os import remove\n\n\n# 获取已发布的 release notes Issue 号和 PR 号\ndef METHOD_NAME(ext_path,main_path):\n\n exst_notes = []\n exst_issue_nums = []\n exst_note_levels = []\n\n for maindir, subdir, files in os.walk(ext_path):\n for afile in files:\n file_path = (os.path.join(maindir, afile))\n if file_path.endswith('.md') and not os.path.samefile(file_path,main_path):\n with open(file_path,'r', encoding='utf-8') as fp:\n level1 = level2 = level3 = \"\"\n for line in fp:\n exst_issue_num = re.search(r'https://github.com/(pingcap|tikv)/\\w+/(issues|pull)/\\d+', line)\n if exst_issue_num:\n if exst_issue_num.group() not in exst_issue_nums:\n note_level = level1 + level2 + level3\n note_pair = [exst_issue_num.group(),line,afile, note_level]\n exst_issue_nums.append(exst_issue_num.group())\n exst_notes.append(note_pair)\n else:\n continue\n elif line.startswith(\"##\"):\n level1 = \"> \" + line.replace(\"##\",\"\").strip()\n level2 = level3 = \"\"\n elif line.startswith (\"+\") or line.startswith (\"-\"):\n level2 = \"> \" + line.replace(\"+\",\"\").replace(\"-\",\"\").strip()\n level3 = \"\"\n elif line.startswith (\" +\") or line.startswith (\" -\"):\n level3 = \"> \" + line.replace(\" +\",\"\").replace(\" -\",\"\").strip()\n else:\n continue\n else:\n pass\n\n if len(exst_issue_nums) != 0:\n return exst_notes\n else:\n return 0\n\n\n# 检查当前准备中的 release notes 的 Issue 号和 PR 号是否有重复,如果有就进行替换\ndef check_exst_rn(note_pairs, main_path):\n DupNum = 0\n NoteNum = 0\n target_file_path = mkstemp()[1]\n source_file_path = main_path\n with open(target_file_path, 'w', encoding='utf-8') as target_file:\n with open(source_file_path, 'r', encoding='utf-8') as source_file:\n LineNum = 0\n for line in source_file:\n LineNum += 1\n issue_num = re.search('https://github.com/(pingcap|tikv)/\\w+/(issues|pull)/\\d+', line)\n if issue_num:\n NoteNum +=1\n for note_pair in note_pairs:\n if issue_num.group() == note_pair[0] and not line.strip().startswith(\"(dup\"):\n print('A duplicated note is found in line ' + str(LineNum) + \" from \" + note_pair[2] + note_pair[1])\n match = re.fullmatch(r'(\\s*)(?:- .+?)( @.+?)?\\s*', line)\n if match:\n line = '{}(dup: {} {}){}{}\\n'.format(match.group(1), note_pair[2], note_pair[3], note_pair[1].strip(), match.group(2) or \"\")\n print('The duplicated note is replaced with ' + line)\n DupNum += 1\n else:\n continue\n break\n target_file.write(line)\n\n remove(source_file_path)\n move(target_file_path, source_file_path)\n DupRate = \"%.0f%%\" % (DupNum/NoteNum*100) #计算 release notes 重复率\n print (str(DupNum) + \" duplicated notes are found in \" + str(NoteNum) + \" notes. The duplicated rate is \" + str(DupRate) + \".\")\n\n\nif __name__ == \"__main__\":\n\n ext_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases' # 已发布的 release notes 文件夹\n main_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases/release-5.3.1.md' # 当前正在准备的release notes 文档路径\n note_pairs = METHOD_NAME(ext_path,main_path)\n check_exst_rn(note_pairs, main_path)"},"code_compressed":{"kind":"null"}}},{"rowIdx":398,"cells":{"id":{"kind":"number","value":398,"string":"398"},"code":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport re\nimport hmac\nimport hashlib\nimport logging\n\nfrom django.apps import apps\nfrom nameparser import HumanName\nfrom werkzeug.utils import cached_property\n\nfrom framework.flask import request\n\nfrom website import settings\nfrom website.conferences.exceptions import ConferenceError\n\n\nlogger = logging.getLogger(__name__)\n\nSSCORE_MAX_VALUE = 5\nDKIM_PASS_VALUES = ['Pass']\nSPF_PASS_VALUES = ['Pass', 'Neutral']\n\nANGLE_BRACKETS_REGEX = re.compile(r'<(.*?)>')\nBASE_REGEX = r\"\"\"\n (?P(test|stage)(\\d*)-)?\n (?P\\w*?)\n -\n (?P{allowed_types})\n @osf\\.io\n \"\"\"\n\nclass ConferenceMessage(object):\n\n def __init__(self):\n self.request = request._get_current_object()\n\n def verify(self):\n self.verify_signature()\n _ = [self.sender_email, self.route] # noqa\n\n def verify_signature(self):\n \"\"\"Verify that request comes from Mailgun. Based on sample code from\n http://documentation.mailgun.com/user_manual.html#webhooks\n \"\"\"\n signature = hmac.new(\n key=settings.MAILGUN_API_KEY.encode(),\n msg='{}{}'.format(\n self.form['timestamp'],\n self.form['token'],\n ).encode(),\n digestmod=hashlib.sha256,\n ).hexdigest()\n if signature != self.form['signature']:\n raise ConferenceError('Invalid headers on incoming mail')\n\n @cached_property\n def is_spam(self):\n \"\"\"Check SSCORE, DKIM, and SPF headers for spam.\n See http://documentation.mailgun.com/user_manual.html#spam-filter for\n details.\n\n :return: At least one header indicates spam\n \"\"\"\n try:\n # Mailgun only inserts score headers for messages checked for spam.\n sscore_header = float(self.form.get('X-Mailgun-Sscore', 0))\n except (TypeError, ValueError):\n return True\n dkim_header = self.form.get('X-Mailgun-Dkim-Check-Result')\n spf_header = self.form.get('X-Mailgun-Spf')\n return (\n (sscore_header and sscore_header > SSCORE_MAX_VALUE) or\n (dkim_header and dkim_header not in DKIM_PASS_VALUES) or\n (spf_header and spf_header not in SPF_PASS_VALUES)\n )\n\n @cached_property\n def form(self):\n return self.request.form\n\n @cached_property\n def raw(self):\n return {\n 'headers': dict(self.request.headers),\n 'form': self.request.form.to_dict(),\n 'args': self.request.args.to_dict(),\n }\n\n @cached_property\n def subject(self):\n subject = self.form['subject']\n subject = re.sub(r'^re:', '', subject, flags=re.I)\n subject = re.sub(r'^fwd:', '', subject, flags=re.I)\n return subject.strip()\n\n @cached_property\n def METHOD_NAME(self):\n return self.form['recipient']\n\n @cached_property\n def text(self):\n # Not included if there is no message body\n # https://documentation.mailgun.com/user_manual.html#routes\n return self.form.get('stripped-text', '')\n\n @cached_property\n def sender(self):\n return self.form['from']\n\n @cached_property\n def sender_name(self):\n if '<' in self.sender:\n # sender format: \"some name\" \n name = ANGLE_BRACKETS_REGEX.sub('', self.sender)\n name = name.strip().replace('\"', '')\n else:\n # sender format: email@domain.tld\n name = self.sender\n return str(HumanName(name))\n\n @cached_property\n def sender_email(self):\n match = ANGLE_BRACKETS_REGEX.search(self.sender)\n if match:\n # sender format: \"some name\" \n return match.groups()[0].lower().strip()\n elif '@' in self.sender:\n # sender format: email@domain.tld\n return self.sender.lower().strip()\n raise ConferenceError('Could not extract sender email')\n\n @cached_property\n def sender_display(self):\n return self.sender_name or self.sender_email.split('@')[0]\n\n @cached_property\n def route(self):\n match = re.search(re.compile(BASE_REGEX.format(allowed_types=(self.allowed_types or 'poster|talk')), re.IGNORECASE | re.VERBOSE), self.form['recipient'])\n if not match:\n raise ConferenceError('Invalid recipient: '.format(self.form['recipient']))\n data = match.groupdict()\n if bool(settings.DEV_MODE) != bool(data['test']):\n # NOTE: test.osf.io has DEV_MODE = False\n if not data['test'] or (data['test'] and data['test'].rstrip('-') != 'test'):\n raise ConferenceError(\n 'Mismatch between `DEV_MODE` and recipient {0}'.format(\n self.form['recipient']\n )\n )\n return data\n\n @cached_property\n def conference_name(self):\n return self.route['meeting']\n\n @cached_property\n def conference_category(self):\n return self.route['category']\n\n @cached_property\n def attachments(self):\n count = self.form.get('attachment-count', 0)\n try:\n count = int(count)\n except (TypeError, ValueError):\n count = 0\n return list(filter(\n lambda value: value is not None,\n list(map(\n lambda idx: self.request.files.get('attachment-{0}'.format(idx + 1)),\n list(range(count)),\n )),\n ))\n\n @property\n def allowed_types(self):\n Conference = apps.get_model('osf.Conference')\n allowed_types = []\n for field_names in Conference.objects.values_list('field_names', flat=True):\n allowed_types.extend([field_names['submission1'], field_names['submission2']])\n regex_types_allowed = '|'.join(set(allowed_types))\n return regex_types_allowed"},"code_compressed":{"kind":"null"}}},{"rowIdx":399,"cells":{"id":{"kind":"number","value":399,"string":"399"},"code":{"kind":"string","value":"\"\"\"Unit Test for otx.algorithms.action.adapters.mmaction.utils.config_utils.\"\"\"\n\n# Copyright (C) 2023 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nfrom typing import Any\n\nimport torch\nfrom mmaction.models import Recognizer3D\nfrom mmcv.runner import BaseModule\nfrom mmcv.utils import Config\nfrom torch import nn\n\nfrom otx.algorithms.action.adapters.mmaction.models.detectors.fast_rcnn import (\n AVAFastRCNN,\n)\nfrom otx.algorithms.action.adapters.mmaction.utils.export_utils import (\n Exporter,\n _convert_sync_batch_to_normal_batch,\n)\nfrom tests.test_suite.e2e_test_system import e2e_pytest_unit\n\n\nclass MockRecognizer3D(Recognizer3D, BaseModule):\n \"\"\"Mock class for Recognizer3D.\"\"\"\n\n def __init__(self) -> None:\n super(BaseModule, self).__init__()\n\n def forward(self, inputs: Any) -> str:\n return \"Forward function is replaced!\"\n\n def load_state_dict(self, weights) -> Recognizer3D:\n pass\n\n\nclass MockAVAFastRCNN(AVAFastRCNN):\n \"\"\"Mock class for AVAFastRCNN.\"\"\"\n\n def __init__(self) -> None:\n super(BaseModule, self).__init__()\n self.deploy_cfg = None\n\n def METHOD_NAME(self) -> None:\n pass\n\n def forward_infer(self, inputs: Any, img_metas: Any) -> str:\n return \"Forward function is replaced!\"\n\n def load_state_dict(self, weights) -> AVAFastRCNN:\n pass\n\n\ndef _mock_sync_batchnorm(inputs):\n \"\"\"Mock function for _sync_batch_to_normal_batch function.\n\n It returns its inputs\n \"\"\"\n\n return inputs\n\n\n@e2e_pytest_unit\ndef test_convert_sync_batch_to_normal_batch() -> None:\n \"\"\"Test _convert_sync_batch_to_normal_batch function.\n\n \n 1. Create sample module, which has some Conv3D, SyncBatchNorm, BatchNorm3d ops\n 2. Run _convert_sync_batch_to_normal_batch function to sample module\n 3. Check SyncBatchNorm is changed into BatchNorm3d\n 4. Check the other ops don't affect by this function\n \"\"\"\n\n sample_module = nn.Sequential(\n nn.Conv3d(100, 100, 3), nn.SyncBatchNorm(100), nn.Conv3d(100, 100, 3), nn.BatchNorm3d(100)\n )\n output_module = _convert_sync_batch_to_normal_batch(sample_module)\n assert isinstance(output_module[0], nn.Conv3d)\n assert isinstance(output_module[1], nn.BatchNorm3d)\n assert isinstance(output_module[2], nn.Conv3d)\n assert isinstance(output_module[3], nn.BatchNorm3d)\n\n\nclass MockTaskProcessor:\n \"\"\"Mock class of task_processor.\"\"\"\n\n def __init__(self, model_cfg, deploy_cfg, device):\n self.model_cfg = model_cfg\n\n def init_pytorch_model(self, weights):\n if self.model_cfg.model == \"cls\":\n return MockRecognizer3D()\n return MockAVAFastRCNN()\n\n\ndef mock_build_task_processor(model_cfg, deploy_cfg, device):\n return MockTaskProcessor(model_cfg, deploy_cfg, device)\n\n\nclass TestExporter:\n \"\"\"Test class for Exporter.\"\"\"\n\n @e2e_pytest_unit\n def test_init(self, mocker) -> None:\n \"\"\"Test __init__ function.\n\n \n 1. Create mock task_processor\n 2. Create mock Recognizer3D using task_processor\n 3. Get inputs\n 4. Create mock AVAFastRCNN using task_processor\n 5. Get inputs\n 6. Check mo options when half precision\n \"\"\"\n\n mocker.patch(\n \"otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor\",\n side_effect=mock_build_task_processor,\n )\n\n recipe_cfg = Config(dict(model=\"cls\"))\n deploy_cfg = Config(\n dict(\n backend_config=dict(\n type=\"openvino\",\n mo_options={},\n model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))],\n )\n )\n )\n exporter = Exporter(recipe_cfg, None, deploy_cfg, \"./tmp_dir/openvino\", False, False)\n assert isinstance(exporter.model, Recognizer3D)\n assert exporter.input_tensor.shape == torch.Size([1, 1, 3, 32, 224, 224])\n assert exporter.input_metas is None\n\n recipe_cfg = Config(dict(model=\"det\"))\n deploy_cfg = Config(\n dict(\n backend_config=dict(\n type=\"openvino\",\n mo_options={},\n model_inputs=[dict(opt_shapes=dict(input=[1, 3, 32, 224, 224]))],\n )\n )\n )\n exporter = Exporter(recipe_cfg, None, deploy_cfg, \"./tmp_dir/openvino\", False, False)\n assert isinstance(exporter.model, AVAFastRCNN)\n assert exporter.input_tensor.shape == torch.Size([1, 3, 32, 224, 224])\n assert exporter.input_metas is not None\n\n exporter = Exporter(recipe_cfg, None, deploy_cfg, \"./tmp_dir/openvino\", True, False)\n assert exporter.deploy_cfg.backend_config.mo_options[\"flags\"] == [\"--compress_to_fp16\"]\n\n @e2e_pytest_unit\n def test_export(self, mocker) -> None:\n \"\"\"Test export function.\"\"\"\n\n mocker.patch(\"otx.algorithms.action.adapters.mmaction.utils.export_utils.export\", return_value=True)\n mocker.patch(\"otx.algorithms.action.adapters.mmaction.utils.export_utils.from_onnx\", return_value=True)\n mocker.patch(\n \"otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor\",\n side_effect=mock_build_task_processor,\n )\n\n recipe_cfg = Config(dict(model=\"cls\"))\n deploy_cfg = Config(\n dict(\n backend_config=dict(\n type=\"openvino\",\n mo_options={},\n model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))],\n ),\n ir_config=dict(input_names=[\"input\"], output_names=[\"output\"]),\n )\n )\n exporter = Exporter(recipe_cfg, None, deploy_cfg, \"./tmp_dir/openvino\", False, False)\n exporter.export()"},"code_compressed":{"kind":"null"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":3,"numItemsPerPage":100,"numTotalItems":6000,"offset":300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjE3NTYyNywic3ViIjoiL2RhdGFzZXRzL3N0YXMxay9sbG0tYm9vdGNhbXAtdGVzdCIsImV4cCI6MTc1NjE3OTIyNywiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.3oNEy4X1L4tyCeWuLfzJzl0UJ8GKRUTHBLdQz933BFpEWg5jFoytShte6KMqyTOwxWIhSK5C69hOASk7KQYLCQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
    id
    int64
    0
    6k
    code
    stringlengths
    4k
    8k
    code_compressed
    null
    300
    # Copyright (C) Jan 2020 Mellanox Technologies Ltd. All rights reserved. # Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # -- ####################################################### # # SegmentCreator.py # Python implementation of the Class SegmentCreator # Generated by Enterprise Architect # Created on: 14-Aug-2019 10:12:03 AM # Original author: talve # ####################################################### from segments.SegmentFactory import SegmentFactory from utils import constants as cs class SegmentCreator: """this class is responsible for splitting the raw data to segments and creating segments objects. """ def create(self, raw_data): """convert segments data into a segments objects by using SegmentFactory. """ try: segments = [] raw_data_segments_lst = self._parse_segments(raw_data) for raw_seg in raw_data_segments_lst: seg_type = '{:0b}'.format(raw_seg[cs.SEGMENT_TYPE_DWORD_LOCATION]).zfill(32)[cs.SEGMENT_TYPE_START: cs. SEGMENT_TYPE_END] seg_type = hex(int(seg_type, 2)) seg_type_for_create = SegmentCreator.METHOD_NAME(seg_type) seg = SegmentFactory.create(seg_type_for_create, raw_seg) seg.resource_type = seg_type segments.append(seg) except Exception as e: raise Exception("Failed to create segments with error: {0}".format(e)) return segments def _parse_segments(self, raw_data): """splitting the raw data into segments raw data is represented as a list of dword's """ splitted_segments = [] try: end_index = len(raw_data) - 1 current_index = 0 while current_index <= end_index: # seg size specified in dwords seg_size = '{:032b}'.format(raw_data[cs.SEGMENT_SIZE_DWORD_LOCATION + current_index])[ cs.SEGMENT_SIZE_START: cs.SEGMENT_SIZE_END] seg_size = int(seg_size, 2) if seg_size == 0: raise Exception("Error in segments splitting. raw_data didn't get smaller - found segment_size = 0") seg_data = raw_data[current_index:seg_size + current_index] splitted_segments.append(seg_data) current_index += seg_size except Exception as e: raise Exception("Failed to split segments with error: {0}".format(e)) return splitted_segments @classmethod def is_resource_segment(cls, seg_type): """This method check if the segment type is a inside the interval of a resource segment """ return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MAX >= seg_type >= cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MIN @classmethod def METHOD_NAME(cls, seg_type): """This method check if the segment type is a reference segment and return the right type of that segment. """ if cls.is_resource_segment(seg_type): return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE return seg_type
    null
    301
    from methods.regular.regular_api import * from default.tests.test_utils import testing_setup from shared.tests.test_utils import common_actions, data_mocking from base64 import b64encode from unittest.mock import patch from methods.task.task_template.job_resync import job_resync_core, threaded_job_resync from shared.utils import job_dir_sync_utils class TestJobResync(testing_setup.DiffgramBaseTestCase): """ """ def METHOD_NAME(self): # TODO: this test is assuming the 'my-sandbox-project' exists and some object have been previously created. # For future tests a mechanism of setting up and tearing down the database should be created. super(TestJobResync, self).METHOD_NAME() project_data = data_mocking.create_project_with_context( { 'users': [ {'username': 'Test', 'email': '[email protected]', 'password': 'diffgram123', } ] }, self.session ) self.project_data = project_data self.project = project_data['project'] def test_job_resync_api(self): # Create mock job. job = data_mocking.create_job({ 'name': 'my-test-job', 'project': self.project }, self.session) request_data = { 'task_template_id': job.id, } endpoint = f"/api/v1/project/{job.project.project_string_id}/job/resync" auth_api = common_actions.create_project_auth(project = job.project, session = self.session) credentials = b64encode(f"{auth_api.client_id}:{auth_api.client_secret}".encode()).decode('utf-8') response = self.client.post( endpoint, data = json.dumps(request_data), headers = { 'directory_id': str(self.project.directory_default_id), 'Authorization': f"Basic {credentials}" } ) data = response.json self.assertEqual(response.status_code, 200) self.assertEqual(data['resync_result'], True) def test_job_resync_core(self): job = data_mocking.create_job({ 'name': 'my-test-job', 'project': self.project }, self.session) auth_api = common_actions.create_project_auth(project = job.project, session = self.session) resync_result, log = job_resync_core(session = self.session, project = self.project, member = auth_api.member, task_template_id = job.id, log = regular_log.default()) self.assertTrue(resync_result) self.assertEqual(len(log['error'].keys()), 0) def test_threaded_job_resync(self): job = data_mocking.create_job({ 'name': 'my-test-job', 'status': 'active', 'project': self.project }, self.session) auth_api = common_actions.create_project_auth(project = job.project, session = self.session) file = data_mocking.create_file({'project_id': self.project.id}, self.session) file_missing1 = data_mocking.create_file({'project_id': self.project.id}, self.session) file_missing2 = data_mocking.create_file({'project_id': self.project.id}, self.session) directory = data_mocking.create_directory({ 'project': self.project, 'user': self.project_data['users'][0], 'files': [file, file_missing1, file_missing2] }, self.session) job.update_attached_directories(self.session, [{'directory_id': directory.id, 'selected': 'sync'}] ) log = regular_log.default() sync_manager = job_dir_sync_utils.JobDirectorySyncManager( session = self.session, log = log, job = job ) sync_manager.add_file_into_job( file, directory, create_tasks = True ) self.session.commit() result = threaded_job_resync( task_template_id = job.id, member_id = auth_api.member_id ) self.assertEqual(len(result), 2)
    null
    302
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkhitsdb.endpoint import endpoint_data class UpgradeLindormInstanceRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'hitsdb', '2020-06-15', 'UpgradeLindormInstance','hitsdb') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_PhoenixCoreNum(self): # Integer return self.get_query_params().get('PhoenixCoreNum') def set_PhoenixCoreNum(self, PhoenixCoreNum): # Integer self.add_query_param('PhoenixCoreNum', PhoenixCoreNum) def get_PhoenixCoreSpec(self): # String return self.get_query_params().get('PhoenixCoreSpec') def set_PhoenixCoreSpec(self, PhoenixCoreSpec): # String self.add_query_param('PhoenixCoreSpec', PhoenixCoreSpec) def get_UpgradeType(self): # String return self.get_query_params().get('UpgradeType') def set_UpgradeType(self, UpgradeType): # String self.add_query_param('UpgradeType', UpgradeType) def get_TsdbSpec(self): # String return self.get_query_params().get('TsdbSpec') def set_TsdbSpec(self, TsdbSpec): # String self.add_query_param('TsdbSpec', TsdbSpec) def get_FilestoreSpec(self): # String return self.get_query_params().get('FilestoreSpec') def set_FilestoreSpec(self, FilestoreSpec): # String self.add_query_param('FilestoreSpec', FilestoreSpec) def get_LogSpec(self): # String return self.get_query_params().get('LogSpec') def set_LogSpec(self, LogSpec): # String self.add_query_param('LogSpec', LogSpec) def get_SecurityToken(self): # String return self.get_query_params().get('SecurityToken') def set_SecurityToken(self, SecurityToken): # String self.add_query_param('SecurityToken', SecurityToken) def get_TsdbNum(self): # Integer return self.get_query_params().get('TsdbNum') def set_TsdbNum(self, TsdbNum): # Integer self.add_query_param('TsdbNum', TsdbNum) def get_LindormSpec(self): # String return self.get_query_params().get('LindormSpec') def set_LindormSpec(self, LindormSpec): # String self.add_query_param('LindormSpec', LindormSpec) def METHOD_NAME(self): # Integer return self.get_query_params().get('SolrNum') def set_SolrNum(self, SolrNum): # Integer self.add_query_param('SolrNum', SolrNum) def get_ColdStorage(self): # Integer return self.get_query_params().get('ColdStorage') def set_ColdStorage(self, ColdStorage): # Integer self.add_query_param('ColdStorage', ColdStorage) def get_LogNum(self): # Integer return self.get_query_params().get('LogNum') def set_LogNum(self, LogNum): # Integer self.add_query_param('LogNum', LogNum) def get_SolrSpec(self): # String return self.get_query_params().get('SolrSpec') def set_SolrSpec(self, SolrSpec): # String self.add_query_param('SolrSpec', SolrSpec) def get_CoreSingleStorage(self): # Integer return self.get_query_params().get('CoreSingleStorage') def set_CoreSingleStorage(self, CoreSingleStorage): # Integer self.add_query_param('CoreSingleStorage', CoreSingleStorage) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_FilestoreNum(self): # Integer return self.get_query_params().get('FilestoreNum') def set_FilestoreNum(self, FilestoreNum): # Integer self.add_query_param('FilestoreNum', FilestoreNum) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_LindormNum(self): # Integer return self.get_query_params().get('LindormNum') def set_LindormNum(self, LindormNum): # Integer self.add_query_param('LindormNum', LindormNum) def get_LtsCoreNum(self): # Integer return self.get_query_params().get('LtsCoreNum') def set_LtsCoreNum(self, LtsCoreNum): # Integer self.add_query_param('LtsCoreNum', LtsCoreNum) def get_InstanceId(self): # String return self.get_query_params().get('InstanceId') def set_InstanceId(self, InstanceId): # String self.add_query_param('InstanceId', InstanceId) def get_LtsCoreSpec(self): # String return self.get_query_params().get('LtsCoreSpec') def set_LtsCoreSpec(self, LtsCoreSpec): # String self.add_query_param('LtsCoreSpec', LtsCoreSpec) def get_ClusterStorage(self): # Integer return self.get_query_params().get('ClusterStorage') def set_ClusterStorage(self, ClusterStorage): # Integer self.add_query_param('ClusterStorage', ClusterStorage) def get_LogSingleStorage(self): # Integer return self.get_query_params().get('LogSingleStorage') def set_LogSingleStorage(self, LogSingleStorage): # Integer self.add_query_param('LogSingleStorage', LogSingleStorage) def get_ZoneId(self): # String return self.get_query_params().get('ZoneId') def set_ZoneId(self, ZoneId): # String self.add_query_param('ZoneId', ZoneId)
    null
    303
    from __future__ import annotations import numpy as np import pytest import dask.array as da from dask.array.tests.test_dispatch import EncapsulateNDArray, WrappedArray from dask.array.utils import assert_eq @pytest.mark.parametrize( "func", [ lambda x: np.append(x, x), lambda x: np.concatenate([x, x, x]), lambda x: np.cov(x, x), lambda x: np.dot(x, x), lambda x: np.dstack((x, x)), lambda x: np.flip(x, axis=0), lambda x: np.hstack((x, x)), lambda x: np.matmul(x, x), lambda x: np.mean(x), lambda x: np.stack([x, x]), lambda x: np.block([x, x]), lambda x: np.sum(x), lambda x: np.var(x), lambda x: np.vstack((x, x)), lambda x: np.linalg.norm(x), lambda x: np.min(x), lambda x: np.amin(x), lambda x: np.round(x), lambda x: np.insert(x, 0, 3, axis=0), lambda x: np.delete(x, 0, axis=0), lambda x: np.select( [x < 0.3, x < 0.6, x > 0.7], [x * 2, x, x / 2], default=0.65 ), ], ) def test_array_function_dask(func): x = np.random.default_rng().random((100, 100)) y = da.from_array(x, chunks=(50, 50)) res_x = func(x) res_y = func(y) assert isinstance(res_y, da.Array) assert_eq(res_y, res_x) @pytest.mark.parametrize( "func", [ lambda x: np.dstack(x), lambda x: np.hstack(x), lambda x: np.vstack(x), ], ) def test_stack_functions_require_sequence_of_arrays(func): x = np.random.default_rng().random((100, 100)) y = da.from_array(x, chunks=(50, 50)) with pytest.raises( NotImplementedError, match="expects a sequence of arrays as the first argument" ): func(y) @pytest.mark.parametrize("func", [np.fft.fft, np.fft.fft2]) def test_array_function_fft(func): x = np.random.default_rng().random((100, 100)) y = da.from_array(x, chunks=(100, 100)) res_x = func(x) res_y = func(y) if func.__module__ != "mkl_fft._numpy_fft": assert isinstance(res_y, da.Array) assert_eq(res_y, res_x) @pytest.mark.parametrize( "func", [ lambda x: np.min_scalar_type(x), lambda x: np.linalg.det(x), lambda x: np.linalg.eigvals(x), ], ) def test_array_notimpl_function_dask(func): x = np.random.default_rng().random((100, 100)) y = da.from_array(x, chunks=(50, 50)) with pytest.warns( FutureWarning, match="The `.*` function is not implemented by Dask" ): func(y) @pytest.mark.parametrize( "func", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)] ) def test_array_function_sparse(func): sparse = pytest.importorskip("sparse") x = da.random.default_rng().random((500, 500), chunks=(100, 100)) x[x < 0.9] = 0 y = x.map_blocks(sparse.COO) assert_eq(func(x), func(y)) def test_array_function_sparse_tensordot(): sparse = pytest.importorskip("sparse") rng = np.random.default_rng() x = rng.random((2, 3, 4)) x[x < 0.9] = 0 y = rng.random((4, 3, 2)) y[y < 0.9] = 0 xx = sparse.COO(x) yy = sparse.COO(y) assert_eq( np.tensordot(x, y, axes=(2, 0)), np.tensordot(xx, yy, axes=(2, 0)).todense() ) @pytest.mark.parametrize("chunks", [(100, 100), (500, 100)]) def test_array_function_cupy_svd(chunks): cupy = pytest.importorskip("cupy") x = cupy.random.default_rng().random((500, 100)) y = da.from_array(x, chunks=chunks, asarray=False) u_base, s_base, v_base = da.linalg.svd(y) u, s, v = np.linalg.svd(y) assert_eq(u, u_base) assert_eq(s, s_base) assert_eq(v, v_base) @pytest.mark.parametrize( "func", [ lambda x: np.concatenate([x, x, x]), lambda x: np.cov(x, x), lambda x: np.dot(x, x), lambda x: np.dstack((x, x)), lambda x: np.flip(x, axis=0), lambda x: np.hstack((x, x)), lambda x: np.matmul(x, x), lambda x: np.mean(x), lambda x: np.stack([x, x]), lambda x: np.sum(x), lambda x: np.var(x), lambda x: np.vstack((x, x)), lambda x: np.linalg.norm(x), ], ) def test_unregistered_func(func): # Wrap a procol-based encapsulated ndarray x = EncapsulateNDArray(np.random.default_rng().random((100, 100))) # See if Dask holds the array fine y = da.from_array(x, chunks=(50, 50)) # Check if it's an equivalent array assert_eq(x, y, check_meta=False, check_type=False) # Perform two NumPy functions, one on the # Encapsulated array xx = func(x) # And one on the Dask array holding these # encapsulated arrays yy = func(y) # Check that they are equivalent arrays. assert_eq(xx, yy, check_meta=False, check_type=False) def test_non_existent_func(): # Regression test for __array_function__ becoming default in numpy 1.17 # dask has no sort function, so ensure that this still calls np.sort x = da.from_array(np.array([1, 2, 4, 3]), chunks=(2,)) with pytest.warns( FutureWarning, match="The `numpy.sort` function is not implemented by Dask" ): assert list(np.sort(x)) == [1, 2, 3, 4] @pytest.mark.parametrize( "func", [ np.equal, np.matmul, np.dot, lambda x, y: np.stack([x, y]), ], ) @pytest.mark.parametrize( "arr_upcast, arr_downcast", [ ( WrappedArray(np.random.default_rng().random((10, 10))), da.random.default_rng().random((10, 10), chunks=(5, 5)), ), ( da.random.default_rng().random((10, 10), chunks=(5, 5)), EncapsulateNDArray(np.random.default_rng().random((10, 10))), ), ( WrappedArray(np.random.default_rng().random((10, 10))), EncapsulateNDArray(np.random.default_rng().random((10, 10))), ), ], ) def METHOD_NAME(func, arr_upcast, arr_downcast): """Test proper dispatch on binary NumPy functions""" assert ( type(func(arr_upcast, arr_downcast)) == type(func(arr_downcast, arr_upcast)) == type(arr_upcast) ) @pytest.mark.parametrize("func", [da.array, da.asarray, da.asanyarray, da.tri]) def test_like_raises(func): assert_eq(func(1, like=func(1)), func(1)) @pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray]) def test_like_with_numpy_func(func): assert_eq(func(1, like=da.array(1)), func(1)) @pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray]) def test_like_with_numpy_func_and_dtype(func): assert_eq(func(1, dtype=float, like=da.array(1)), func(1, dtype=float))
    null
    304
    #------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-------------------------------------------------------------------------- import os from azure import ( WindowsAzureError, MANAGEMENT_HOST, _get_request_body, _parse_response, _str, _update_request_uri_query, ) from azure.http import ( HTTPError, HTTPRequest, ) from azure.http.httpclient import _HTTPClient from azure.servicemanagement import ( AZURE_MANAGEMENT_CERTFILE, AZURE_MANAGEMENT_SUBSCRIPTIONID, _management_error_handler, _parse_response_for_async_op, _update_management_header, ) class _ServiceManagementClient(object): def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST): self.requestid = None self.subscription_id = subscription_id self.cert_file = cert_file self.host = host if not self.cert_file: if AZURE_MANAGEMENT_CERTFILE in os.environ: self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE] if not self.subscription_id: if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ: self.subscription_id = os.environ[ AZURE_MANAGEMENT_SUBSCRIPTIONID] if not self.cert_file or not self.subscription_id: raise WindowsAzureError( 'You need to provide subscription id and certificate file') self._httpclient = _HTTPClient( service_instance=self, cert_file=self.cert_file) self._filter = self._httpclient.perform_request def with_filter(self, filter): '''Returns a new service which will process requests with the specified filter. Filtering operations can include logging, automatic retrying, etc... The filter is a lambda which receives the HTTPRequest and another lambda. The filter can perform any pre-processing on the request, pass it off to the next lambda, and then perform any post-processing on the response.''' res = type(self)(self.subscription_id, self.cert_file, self.host) old_filter = self._filter def new_filter(request): return filter(request, old_filter) res._filter = new_filter return res def set_proxy(self, host, port, user=None, password=None): ''' Sets the proxy server host and port for the HTTP CONNECT Tunnelling. host: Address of the proxy. Ex: '192.168.0.100' port: Port of the proxy. Ex: 6000 user: User for proxy authorization. password: Password for proxy authorization. ''' self._httpclient.set_proxy(host, port, user, password) #--Helper functions -------------------------------------------------- def _perform_request(self, request): try: resp = self._filter(request) except HTTPError as ex: return _management_error_handler(ex) return resp def METHOD_NAME(self, path, response_type): request = HTTPRequest() request.method = 'GET' request.host = self.host request.path = path request.path, request.query = _update_request_uri_query(request) request.headers = _update_management_header(request) response = self._perform_request(request) if response_type is not None: return _parse_response(response, response_type) return response def _perform_put(self, path, body, async=False): request = HTTPRequest() request.method = 'PUT' request.host = self.host request.path = path request.body = _get_request_body(body) request.path, request.query = _update_request_uri_query(request) request.headers = _update_management_header(request) response = self._perform_request(request) if async: return _parse_response_for_async_op(response) return None def _perform_post(self, path, body, response_type=None, async=False): request = HTTPRequest() request.method = 'POST' request.host = self.host request.path = path request.body = _get_request_body(body) request.path, request.query = _update_request_uri_query(request) request.headers = _update_management_header(request) response = self._perform_request(request) if response_type is not None: return _parse_response(response, response_type) if async: return _parse_response_for_async_op(response) return None def _perform_delete(self, path, async=False): request = HTTPRequest() request.method = 'DELETE' request.host = self.host request.path = path request.path, request.query = _update_request_uri_query(request) request.headers = _update_management_header(request) response = self._perform_request(request) if async: return _parse_response_for_async_op(response) return None def _get_path(self, resource, name): path = '/' + self.subscription_id + '/' + resource if name is not None: path += '/' + _str(name) return path
    null
    305
    # Copyright (c) ZenML GmbH 2022. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """Local deployments connected to a docker MySQL server.""" import logging import time from typing import Optional import docker.errors as docker_errors from docker.models.containers import Container from tests.harness.deployment.base import ( DEPLOYMENT_START_TIMEOUT, MYSQL_DEFAULT_PASSWORD, MYSQL_DEFAULT_PORT, MYSQL_DOCKER_IMAGE, BaseTestDeployment, ) from tests.harness.model import ( DeploymentConfig, DeploymentSetup, DeploymentStoreConfig, DeploymentType, ) MYSQL_DOCKER_CONTAINER_NAME_PREFIX = "zenml-mysql-" class LocalDockerTestDeployment(BaseTestDeployment): """A deployment that uses a MySQL Docker container to host the ZenML database.""" def __init__(self, config: DeploymentConfig) -> None: """Initializes the deployment. Args: config: The deployment configuration. """ super().__init__(config) @property def container_name(self) -> str: """The name of the MySQL container. Returns: The name of the MySQL container. """ return f"{MYSQL_DOCKER_CONTAINER_NAME_PREFIX}{self.config.name}" @property def METHOD_NAME(self) -> Optional[Container]: """Returns the Docker container configured for the deployment. Returns: The container for the deployment if it exists, None otherwise. """ try: return self.docker_client.containers.get(self.container_name) except docker_errors.NotFound: return None @property def is_running(self) -> bool: """Returns whether the deployment is running. Returns: Whether the deployment is running. """ # Check if container exists and is running METHOD_NAME = self.METHOD_NAME if METHOD_NAME and METHOD_NAME.status == "running": return True return False def up(self) -> None: """Starts up the deployment. Raises: RuntimeError: If the deployment could not be started. """ from zenml.utils.networking_utils import scan_for_available_port if self.is_running: logging.info( f"Deployment '{self.config.name}' is already running. " f"Skipping provisioning." ) return # Cleanup a previous deployment in a failed state self.down() port = scan_for_available_port(MYSQL_DEFAULT_PORT) if port is None: raise RuntimeError("Could not find an available port for MySQL.") self.docker_client.containers.run( name=self.container_name, image=MYSQL_DOCKER_IMAGE, detach=True, environment={"MYSQL_ROOT_PASSWORD": MYSQL_DEFAULT_PASSWORD}, # Enable the primary key requirement for MySQL to catch errors related to # missing primary keys. command=["--sql_require_primary_key=on"], remove=True, auto_remove=True, ports={MYSQL_DEFAULT_PORT: port}, labels={ "zenml-test": "true", }, extra_hosts={"host.docker.internal": "host-gateway"}, ) timeout = DEPLOYMENT_START_TIMEOUT while True: logging.info( f"Trying to connect to deployment '{self.config.name}'..." ) try: with self.connect() as client: _ = client.zen_store break except RuntimeError as e: timeout -= 1 if timeout == 0: raise RuntimeError( f"Timed out waiting for the '{self.config.name}' " f"deployment to start: {e}" ) from e time.sleep(1) logging.info( f"Started container '{self.container_name}' " f"for deployment '{self.config.name}'." ) def down(self) -> None: """Tears down the deployment.""" METHOD_NAME = self.METHOD_NAME if METHOD_NAME is None: logging.info( f"Deployment '{self.config.name}' is no longer running. " ) return while True: if METHOD_NAME.status == "running": logging.info( f"Stopping container '{self.container_name}' " f"for deployment '{self.config.name}'." ) METHOD_NAME.stop() elif METHOD_NAME.status == "exited": logging.info( f"Removing container '{self.container_name}' " f"for deployment '{self.config.name}'." ) METHOD_NAME.remove() time.sleep(1) METHOD_NAME = self.METHOD_NAME if METHOD_NAME is None: break logging.info(f"Container '{self.container_name}' has been removed.") def get_store_config(self) -> Optional[DeploymentStoreConfig]: """Returns the store config for the deployment. Returns: The store config for the deployment if it is running, None otherwise. Raises: RuntimeError: If the deployment is not running. """ if not self.is_running: raise RuntimeError( f"The {self.config.name} deployment is not running." ) METHOD_NAME = self.METHOD_NAME # Guaranteed to be non-None by the is_running check assert METHOD_NAME is not None try: port = int( METHOD_NAME.ports[f"{MYSQL_DEFAULT_PORT}/tcp"][0]["HostPort"] ) except (KeyError, IndexError): raise RuntimeError( f"Could not find the port for the '{self.config.name}' " f"deployment." ) return DeploymentStoreConfig( url=f"mysql://root:{MYSQL_DEFAULT_PASSWORD}@127.0.0.1:{port}/zenml" ) LocalDockerTestDeployment.register_deployment_class( type=DeploymentType.LOCAL, setup=DeploymentSetup.DOCKER )
    null
    306
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkpolardb.endpoint import endpoint_data class DescribeDBClustersRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'polardb', '2017-08-01', 'DescribeDBClusters','polardb') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_DBClusterDescription(self): # String return self.get_query_params().get('DBClusterDescription') def set_DBClusterDescription(self, DBClusterDescription): # String self.add_query_param('DBClusterDescription', DBClusterDescription) def get_DBClusterStatus(self): # String return self.get_query_params().get('DBClusterStatus') def set_DBClusterStatus(self, DBClusterStatus): # String self.add_query_param('DBClusterStatus', DBClusterStatus) def get_ConnectionString(self): # String return self.get_query_params().get('ConnectionString') def set_ConnectionString(self, ConnectionString): # String self.add_query_param('ConnectionString', ConnectionString) def METHOD_NAME(self): # Integer return self.get_query_params().get('RecentExpirationInterval') def set_RecentExpirationInterval(self, RecentExpirationInterval): # Integer self.add_query_param('RecentExpirationInterval', RecentExpirationInterval) def get_PageNumber(self): # Integer return self.get_query_params().get('PageNumber') def set_PageNumber(self, PageNumber): # Integer self.add_query_param('PageNumber', PageNumber) def get_DBNodeIds(self): # String return self.get_query_params().get('DBNodeIds') def set_DBNodeIds(self, DBNodeIds): # String self.add_query_param('DBNodeIds', DBNodeIds) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_RecentCreationInterval(self): # Integer return self.get_query_params().get('RecentCreationInterval') def set_RecentCreationInterval(self, RecentCreationInterval): # Integer self.add_query_param('RecentCreationInterval', RecentCreationInterval) def get_Expired(self): # Boolean return self.get_query_params().get('Expired') def set_Expired(self, Expired): # Boolean self.add_query_param('Expired', Expired) def get_PageSize(self): # Integer return self.get_query_params().get('PageSize') def set_PageSize(self, PageSize): # Integer self.add_query_param('PageSize', PageSize) def get_Tags(self): # RepeatList return self.get_query_params().get('Tag') def set_Tags(self, Tag): # RepeatList for depth1 in range(len(Tag)): if Tag[depth1].get('Value') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value')) if Tag[depth1].get('Key') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key')) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_DBType(self): # String return self.get_query_params().get('DBType') def set_DBType(self, DBType): # String self.add_query_param('DBType', DBType) def get_DBVersion(self): # String return self.get_query_params().get('DBVersion') def set_DBVersion(self, DBVersion): # String self.add_query_param('DBVersion', DBVersion) def get_PayType(self): # String return self.get_query_params().get('PayType') def set_PayType(self, PayType): # String self.add_query_param('PayType', PayType) def get_DBClusterIds(self): # String return self.get_query_params().get('DBClusterIds') def set_DBClusterIds(self, DBClusterIds): # String self.add_query_param('DBClusterIds', DBClusterIds)
    null
    307
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class GetTaskListFilterRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'GetTaskListFilter') self.set_method('POST') def get_InvolveMembers(self): # String return self.get_body_params().get('InvolveMembers') def set_InvolveMembers(self, InvolveMembers): # String self.add_body_params('InvolveMembers', InvolveMembers) def get_ExecutorId(self): # String return self.get_body_params().get('ExecutorId') def set_ExecutorId(self, ExecutorId): # String self.add_body_params('ExecutorId', ExecutorId) def get_OrderCondition(self): # String return self.get_body_params().get('OrderCondition') def set_OrderCondition(self, OrderCondition): # String self.add_body_params('OrderCondition', OrderCondition) def get_SprintId(self): # String return self.get_body_params().get('SprintId') def set_SprintId(self, SprintId): # String self.add_body_params('SprintId', SprintId) def get_Extra(self): # String return self.get_body_params().get('Extra') def set_Extra(self, Extra): # String self.add_body_params('Extra', Extra) def get_PageSize(self): # Integer return self.get_body_params().get('PageSize') def METHOD_NAME(self, PageSize): # Integer self.add_body_params('PageSize', PageSize) def get_ScenarioFieldConfigId(self): # String return self.get_body_params().get('ScenarioFieldConfigId') def set_ScenarioFieldConfigId(self, ScenarioFieldConfigId): # String self.add_body_params('ScenarioFieldConfigId', ScenarioFieldConfigId) def get_IsDone(self): # Boolean return self.get_body_params().get('IsDone') def set_IsDone(self, IsDone): # Boolean self.add_body_params('IsDone', IsDone) def get_ObjectType(self): # String return self.get_body_params().get('ObjectType') def set_ObjectType(self, ObjectType): # String self.add_body_params('ObjectType', ObjectType) def get_ProjectId(self): # String return self.get_body_params().get('ProjectId') def set_ProjectId(self, ProjectId): # String self.add_body_params('ProjectId', ProjectId) def get_PageToken(self): # String return self.get_body_params().get('PageToken') def set_PageToken(self, PageToken): # String self.add_body_params('PageToken', PageToken) def get_Order(self): # String return self.get_body_params().get('Order') def set_Order(self, Order): # String self.add_body_params('Order', Order) def get_TagId(self): # String return self.get_body_params().get('TagId') def set_TagId(self, TagId): # String self.add_body_params('TagId', TagId) def get_TaskFlowStatusId(self): # String return self.get_body_params().get('TaskFlowStatusId') def set_TaskFlowStatusId(self, TaskFlowStatusId): # String self.add_body_params('TaskFlowStatusId', TaskFlowStatusId) def get_DueDateStart(self): # String return self.get_body_params().get('DueDateStart') def set_DueDateStart(self, DueDateStart): # String self.add_body_params('DueDateStart', DueDateStart) def get_CreatorId(self): # String return self.get_body_params().get('CreatorId') def set_CreatorId(self, CreatorId): # String self.add_body_params('CreatorId', CreatorId) def get_Priority(self): # String return self.get_body_params().get('Priority') def set_Priority(self, Priority): # String self.add_body_params('Priority', Priority) def get_DueDateEnd(self): # String return self.get_body_params().get('DueDateEnd') def set_DueDateEnd(self, DueDateEnd): # String self.add_body_params('DueDateEnd', DueDateEnd) def get_OrgId(self): # String return self.get_body_params().get('OrgId') def set_OrgId(self, OrgId): # String self.add_body_params('OrgId', OrgId) def get_Name(self): # String return self.get_body_params().get('Name') def set_Name(self, Name): # String self.add_body_params('Name', Name)
    null
    308
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkcloudapi.endpoint import endpoint_data class ModifyApiRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'ModifyApi','apigateway') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_WebSocketApiType(self): # String return self.get_query_params().get('WebSocketApiType') def set_WebSocketApiType(self, WebSocketApiType): # String self.add_query_param('WebSocketApiType', WebSocketApiType) def get_ErrorCodeSamples(self): # String return self.get_query_params().get('ErrorCodeSamples') def set_ErrorCodeSamples(self, ErrorCodeSamples): # String self.add_query_param('ErrorCodeSamples', ErrorCodeSamples) def get_AppCodeAuthType(self): # String return self.get_query_params().get('AppCodeAuthType') def set_AppCodeAuthType(self, AppCodeAuthType): # String self.add_query_param('AppCodeAuthType', AppCodeAuthType) def get_Description(self): # String return self.get_query_params().get('Description') def set_Description(self, Description): # String self.add_query_param('Description', Description) def get_DisableInternet(self): # Boolean return self.get_query_params().get('DisableInternet') def set_DisableInternet(self, DisableInternet): # Boolean self.add_query_param('DisableInternet', DisableInternet) def get_BackendId(self): # String return self.get_query_params().get('BackendId') def set_BackendId(self, BackendId): # String self.add_query_param('BackendId', BackendId) def get_ConstantParameters(self): # String return self.get_query_params().get('ConstantParameters') def set_ConstantParameters(self, ConstantParameters): # String self.add_query_param('ConstantParameters', ConstantParameters) def get_AuthType(self): # String return self.get_query_params().get('AuthType') def set_AuthType(self, AuthType): # String self.add_query_param('AuthType', AuthType) def get_AllowSignatureMethod(self): # String return self.get_query_params().get('AllowSignatureMethod') def set_AllowSignatureMethod(self, AllowSignatureMethod): # String self.add_query_param('AllowSignatureMethod', AllowSignatureMethod) def get_ServiceParameters(self): # String return self.get_query_params().get('ServiceParameters') def set_ServiceParameters(self, ServiceParameters): # String self.add_query_param('ServiceParameters', ServiceParameters) def get_FailResultSample(self): # String return self.get_query_params().get('FailResultSample') def set_FailResultSample(self, FailResultSample): # String self.add_query_param('FailResultSample', FailResultSample) def get_SystemParameters(self): # String return self.get_query_params().get('SystemParameters') def set_SystemParameters(self, SystemParameters): # String self.add_query_param('SystemParameters', SystemParameters) def get_ServiceParametersMap(self): # String return self.get_query_params().get('ServiceParametersMap') def set_ServiceParametersMap(self, ServiceParametersMap): # String self.add_query_param('ServiceParametersMap', ServiceParametersMap) def get_SecurityToken(self): # String return self.get_query_params().get('SecurityToken') def set_SecurityToken(self, SecurityToken): # String self.add_query_param('SecurityToken', SecurityToken) def get_OpenIdConnectConfig(self): # String return self.get_query_params().get('OpenIdConnectConfig') def set_OpenIdConnectConfig(self, OpenIdConnectConfig): # String self.add_query_param('OpenIdConnectConfig', OpenIdConnectConfig) def get_RequestParameters(self): # String return self.get_query_params().get('RequestParameters') def set_RequestParameters(self, RequestParameters): # String self.add_query_param('RequestParameters', RequestParameters) def get_ResultDescriptions(self): # String return self.get_query_params().get('ResultDescriptions') def set_ResultDescriptions(self, ResultDescriptions): # String self.add_query_param('ResultDescriptions', ResultDescriptions) def get_Visibility(self): # String return self.get_query_params().get('Visibility') def set_Visibility(self, Visibility): # String self.add_query_param('Visibility', Visibility) def get_GroupId(self): # String return self.get_query_params().get('GroupId') def set_GroupId(self, GroupId): # String self.add_query_param('GroupId', GroupId) def get_ServiceConfig(self): # String return self.get_query_params().get('ServiceConfig') def set_ServiceConfig(self, ServiceConfig): # String self.add_query_param('ServiceConfig', ServiceConfig) def get_ResultType(self): # String return self.get_query_params().get('ResultType') def set_ResultType(self, ResultType): # String self.add_query_param('ResultType', ResultType) def get_ApiName(self): # String return self.get_query_params().get('ApiName') def set_ApiName(self, ApiName): # String self.add_query_param('ApiName', ApiName) def get_ResultSample(self): # String return self.get_query_params().get('ResultSample') def set_ResultSample(self, ResultSample): # String self.add_query_param('ResultSample', ResultSample) def get_BackendEnable(self): # Boolean return self.get_query_params().get('BackendEnable') def set_BackendEnable(self, BackendEnable): # Boolean self.add_query_param('BackendEnable', BackendEnable) def get_ForceNonceCheck(self): # Boolean return self.get_query_params().get('ForceNonceCheck') def set_ForceNonceCheck(self, ForceNonceCheck): # Boolean self.add_query_param('ForceNonceCheck', ForceNonceCheck) def METHOD_NAME(self): # String return self.get_query_params().get('RequestConfig') def set_RequestConfig(self, RequestConfig): # String self.add_query_param('RequestConfig', RequestConfig) def get_ResultBodyModel(self): # String return self.get_query_params().get('ResultBodyModel') def set_ResultBodyModel(self, ResultBodyModel): # String self.add_query_param('ResultBodyModel', ResultBodyModel) def get_ApiId(self): # String return self.get_query_params().get('ApiId') def set_ApiId(self, ApiId): # String self.add_query_param('ApiId', ApiId)
    null
    309
    """ Mixins for Ratable model managers and serializers. """ import logging from typing import Type from sqlalchemy.sql.expression import func from galaxy.model import ItemRatingAssociation from galaxy.model.base import transaction from . import base log = logging.getLogger(__name__) class RatableManagerMixin: rating_assoc: Type[ItemRatingAssociation] def METHOD_NAME(self, item, user, as_int=True): """Returns the integer rating given to this item by the user. Returns the full rating model if `as_int` is False. """ METHOD_NAME = self.query_associated(self.rating_assoc, item).filter_by(user=user).first() # most common case is assumed to be 'get the number' if not as_int: return METHOD_NAME # get the int value if there's a rating return METHOD_NAME.METHOD_NAME if METHOD_NAME is not None else None def ratings(self, item): """Returns a list of all rating values given to this item.""" return [r.METHOD_NAME for r in item.ratings] def ratings_avg(self, item): """Returns the average of all ratings given to this item.""" foreign_key = self._foreign_key(self.rating_assoc) avg = self.session().query(func.avg(self.rating_assoc.METHOD_NAME)).filter(foreign_key == item).scalar() return avg or 0.0 def ratings_count(self, item): """Returns the number of ratings given to this item.""" foreign_key = self._foreign_key(self.rating_assoc) return self.session().query(func.count(self.rating_assoc.METHOD_NAME)).filter(foreign_key == item).scalar() def rate(self, item, user, value, flush=True): """Updates or creates a rating for this item and user. Returns the rating""" # TODO?: possible generic update_or_create # TODO?: update and create to RatingsManager (if not overkill) METHOD_NAME = self.METHOD_NAME(item, user, as_int=False) if not METHOD_NAME: METHOD_NAME = self.rating_assoc(user, item) self.associate(METHOD_NAME, item) METHOD_NAME.METHOD_NAME = value self.session().add(METHOD_NAME) if flush: session = self.session() with transaction(session): session.commit() return METHOD_NAME # TODO?: all ratings for a user class RatableSerializerMixin: def add_serializers(self): self.serializers["user_rating"] = self.serialize_user_rating self.serializers["community_rating"] = self.serialize_community_rating def serialize_user_rating(self, item, key, user=None, **context): """Returns the integer rating given to this item by the user.""" if not user: raise base.ModelSerializingError( "user_rating requires a user", model_class=self.manager.model_class, id=self.serialize_id(item, "id") ) return self.manager.METHOD_NAME(item, user) def serialize_community_rating(self, item, key, **context): """ Returns a dictionary containing: `average` the (float) average of all ratings of this object `count` the number of ratings """ # ??: seems like two queries (albeit in-sql functions) would slower # than getting the rows and calc'ing both here with one query manager = self.manager return { "average": manager.ratings_avg(item), "count": manager.ratings_count(item), } class RatableDeserializerMixin: def add_deserializers(self): self.deserializers["user_rating"] = self.deserialize_rating def deserialize_rating(self, item, key, val, user=None, **context): if not user: raise base.ModelDeserializingError( "user_rating requires a user", model_class=self.manager.model_class, id=self.serialize_id(item, "id") ) val = self.validate.int_range(key, val, 0, 5) return self.manager.rate(item, user, val, flush=False) class RatableFilterMixin: def _ratings_avg_accessor(self, item): return self.manager.ratings_avg(item) def _add_parsers(self): """ Adds the following filters: `community_rating`: filter """ self.fn_filter_parsers.update( { "community_rating": { "op": { "eq": lambda i, v: self._ratings_avg_accessor(i) == v, # TODO: default to greater than (currently 'eq' due to base/controller.py) "ge": lambda i, v: self._ratings_avg_accessor(i) >= v, "le": lambda i, v: self._ratings_avg_accessor(i) <= v, }, "val": float, } } )
    null
    310
    import os import tempfile from galaxy.tool_util.parser import get_tool_source from galaxy.util.compression_utils import CompressedFile from galaxy.util.resources import resource_path from galaxy_test.base import api_asserts from tool_shed.test.base.populators import repo_tars from ..base.api import ShedApiTestCase COLUMN_MAKER_PATH = resource_path(__package__, "../test_data/column_maker/column_maker.tar") class TestShedRepositoriesApi(ShedApiTestCase): def test_create(self): populator = self.populator category_id = populator.new_category(prefix="testcreate").id repos_by_category = populator.repositories_by_category(category_id) repos = repos_by_category.repositories assert len(repos) == 0 populator.new_repository(category_id) repos_by_category = populator.repositories_by_category(category_id) repos = repos_by_category.repositories assert len(repos) == 1 def test_update_repository(self): populator = self.populator prefix = "testupdate" category_id = populator.new_category(prefix=prefix).id repository = populator.new_repository(category_id, prefix=prefix) repository_id = repository.id repository_update = populator.upload_revision( repository_id, COLUMN_MAKER_PATH, ) assert repository_update.is_ok # used by getRepository in TS client. def test_metadata_simple(self): populator = self.populator repository = populator.setup_column_maker_repo(prefix="repoformetadata") repository_metadata = populator.get_metadata(repository) metadata_for_revisions = repository_metadata.__root__ assert len(metadata_for_revisions) == 1 only_key = list(metadata_for_revisions.keys())[0] assert only_key.startswith("0:") only_revision = list(metadata_for_revisions.values())[0] assert only_revision assert only_revision.downloadable assert not only_revision.malicious def test_index_simple(self): populator = self.populator repo = populator.setup_column_maker_repo(prefix="repoforindex") repository_id = repo.id show_response = self.api_interactor.get(f"repositories/{repository_id}") index_response = self.api_interactor.get("repositories") api_asserts.assert_status_code_is_ok(show_response) api_asserts.assert_status_code_is_ok(index_response) repository_ids = [r["id"] for r in index_response.json()] assert repository_id in repository_ids repository = self.populator.get_repository_for(repo.owner, repo.name) assert repository.owner == repo.owner assert repository.name == repo.name def test_install_info(self): # actually installing requires a whole Galaxy setup and the install manager but # we can test the response validates against the future facing InstallInfo pydandic # models. populator = self.populator repo = populator.setup_column_maker_and_get_metadata(prefix="repoforinstallinfo") populator.get_install_info(repo) def test_get_ordered_installable_revisions(self): # Used in ephemeris... populator = self.populator repository = populator.setup_column_maker_repo(prefix="repoforindex") assert repository.owner assert repository.name revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name) assert len(revisions.__root__) == 1 def test_reset_on_repository(self): populator = self.populator repository = populator.setup_column_maker_repo(prefix="repoforreseta") assert repository.owner assert repository.name revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name) assert len(revisions.__root__) == 1 metadata_response = populator.reset_metadata(repository) assert metadata_response.start_time assert metadata_response.stop_time assert metadata_response.status == "ok" assert len(metadata_response.repository_status) == 1 revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name) assert len(revisions.__root__) == 1 def test_repository_search(self): populator = self.populator repository = populator.setup_column_maker_repo(prefix="repoforreposearch") populator.reindex() results = populator.repo_search_query("repoforreposearch") assert len(results.hits) == 1 first_hit = results.hits[0] assert first_hit.repository.name == repository.name assert first_hit.repository.times_downloaded == 0 def test_repo_tars(self): for index, repo_path in enumerate(repo_tars("column_maker")): path = CompressedFile(repo_path).extract(tempfile.mkdtemp()) tool_xml_path = os.path.join(path, "column_maker.xml") tool_source = get_tool_source(config_file=tool_xml_path) tool_version = tool_source.parse_version() if index == 0: assert tool_version == "1.1.0" elif index == 1: assert tool_version == "1.2.0" elif index == 2: assert tool_version == "1.3.0" else: raise AssertionError("Wrong number of repo tars returned...") def test_reset_on_simple_repository(self): populator = self.populator repository = populator.setup_test_data_repo("column_maker") populator.assert_has_n_installable_revisions(repository, 3) response = self.api_interactor.post( "repositories/reset_metadata_on_repository", data={"repository_id": repository.id} ) api_asserts.assert_status_code_is_ok(response) populator.assert_has_n_installable_revisions(repository, 3) def METHOD_NAME(self): populator = self.populator # setup a repository with 4 revisions but only 3 installable ones due to no version change in a tool repository = populator.setup_test_data_repo("column_maker_with_download_gaps") populator.assert_has_n_installable_revisions(repository, 3) response = self.api_interactor.post( "repositories/reset_metadata_on_repository", data={"repository_id": repository.id} ) api_asserts.assert_status_code_is_ok(response) populator.assert_has_n_installable_revisions(repository, 3) def test_reset_all(self): populator = self.populator repository = populator.setup_test_data_repo("column_maker_with_download_gaps") populator.assert_has_n_installable_revisions(repository, 3) # reseting one at a time or resetting everything via the web controllers works... # reseting all at once via the API does not work - it breaks the repository response = self.api_interactor.post( "repositories/reset_metadata_on_repositories", data={"payload": "can not be empty because bug in controller"}, ) api_asserts.assert_status_code_is_ok(response) populator.assert_has_n_installable_revisions(repository, 3)
    null
    311
    import unittest import torch import torch.nn as nn import torchvision from lightly.models import NNCLR from lightly.models.modules import NNMemoryBankModule def METHOD_NAME(name: str): if name == "resnet18": return torchvision.models.resnet18() elif name == "resnet50": return torchvision.models.resnet50() raise NotImplementedError def get_backbone(model: nn.Module): backbone = torch.nn.Sequential(*(list(model.children())[:-1])) return backbone class TestNNCLR(unittest.TestCase): def setUp(self): self.resnet_variants = dict( resnet18=dict( num_ftrs=512, proj_hidden_dim=512, pred_hidden_dim=128, out_dim=512, ), resnet50=dict( num_ftrs=2048, proj_hidden_dim=2048, pred_hidden_dim=512, out_dim=2048, ), ) self.batch_size = 2 self.input_tensor = torch.rand((self.batch_size, 3, 32, 32)) def test_create_variations_cpu(self): for model_name, config in self.resnet_variants.items(): resnet = METHOD_NAME(model_name) model = NNCLR(get_backbone(resnet), **config) self.assertIsNotNone(model) def test_create_variations_gpu(self): if not torch.cuda.is_available(): return for model_name, config in self.resnet_variants.items(): resnet = METHOD_NAME(model_name) model = NNCLR(get_backbone(resnet), **config).to("cuda") self.assertIsNotNone(model) def test_feature_dim_configurable(self): device = "cuda" if torch.cuda.is_available() else "cpu" for model_name, config in self.resnet_variants.items(): resnet = METHOD_NAME(model_name) model = NNCLR(get_backbone(resnet), **config).to(device) # check that feature vector has correct dimension with torch.no_grad(): out_features = model.backbone(self.input_tensor.to(device)) self.assertEqual(out_features.shape[1], config["num_ftrs"]) # check that projection head output has right dimension with torch.no_grad(): out_projection = model.projection_mlp(out_features.squeeze()) self.assertEqual(out_projection.shape[1], config["out_dim"]) # check that prediction head output has right dimension with torch.no_grad(): out_prediction = model.prediction_mlp(out_projection.squeeze()) self.assertEqual(out_prediction.shape[1], config["out_dim"]) def test_tuple_input(self): device = "cuda" if torch.cuda.is_available() else "cpu" for model_name, config in self.resnet_variants.items(): resnet = METHOD_NAME(model_name) model = NNCLR(get_backbone(resnet), **config).to(device) x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device) x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device) out = model(x0) self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"])) out, features = model(x0, return_features=True) self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"])) self.assertEqual(features.shape, (self.batch_size, config["num_ftrs"])) out0, out1 = model(x0, x1) self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"])) (out0, f0), (out1, f1) = model(x0, x1, return_features=True) self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"])) self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"])) self.assertEqual(f0.shape, (self.batch_size, config["num_ftrs"])) self.assertEqual(f1.shape, (self.batch_size, config["num_ftrs"])) def test_memory_bank(self): device = "cuda" if torch.cuda.is_available() else "cpu" for model_name, config in self.resnet_variants.items(): resnet = METHOD_NAME(model_name) model = NNCLR(get_backbone(resnet), **config).to(device) for nn_size in [2**3, 2**8]: nn_replacer = NNMemoryBankModule(size=nn_size) with torch.no_grad(): for i in range(10): x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device) x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device) (z0, p0), (z1, p1) = model(x0, x1) z0 = nn_replacer(z0.detach(), update=False) z1 = nn_replacer(z1.detach(), update=True)
    null
    312
    import pytest import BioSimSpace.Sandpit.Exscientia.Types as Types import BioSimSpace.Sandpit.Exscientia.Units as Units @pytest.mark.parametrize( "string, dimensions", [ ("kilo Cal oriEs per Mole / angstrom **2", (0, 0, 0, 1, -1, 0, -2)), ("k Cal_per _mOl / nm^2", (0, 0, 0, 1, -1, 0, -2)), ("kj p eR moles / pico METERs2", (0, 0, 0, 1, -1, 0, -2)), ("coul oMbs / secs * ATm os phereS", (0, 1, -1, 1, 0, 0, -3)), ("pm**3 * rads * de grEE", (2, 0, 3, 0, 0, 0, 0)), ], ) def test_supported_units(string, dimensions): """Test that we can create GeneralUnit objects with the correct dimensions by evaluating strings as unit based algebraic expressions. """ # Try to create the GeneralUnit from the string. general_unit = Types._GeneralUnit(string) # Assert that the dimensions match. assert general_unit.dimensions() == dimensions @pytest.mark.parametrize( "string, matching_type", [ ("radian * degree**2 / radian^2", Types.Angle), ("angstrom**3 / nanometer", Types.Area), ("coulombs * angstrom**-2 * nanometer**2", Types.Charge), ("(kcal_per_mol / angstrom**2) * nanometer**2", Types.Energy), ("angstrom**3 * nanometer^-1 / picometer", Types.Length), ("bar * kJ_per_mol**2 / (kcal_per_mol * kJ_per_mol)", Types.Pressure), ("coulomb * kelvin^-3 * celsius**2 * kelvin^2 / e_charge", Types.Temperature), ("nanoseconds^3 * kelvin^-3 * celsius**3 / milliseconds**2", Types.Time), ("angstroms cubed * atm^-3 * bar**3", Types.Volume), ], ) def test_type_conversion(string, matching_type): """Test that GeneralUnit objects can be converted to a type with matching dimensions. """ # Try to create the GeneralUnit from the string. general_unit = Types._GeneralUnit(string) # Assert that the types match. assert type(general_unit) is matching_type @pytest.mark.parametrize( "string, default_unit", [ ("degree", Units.Angle.radian), ("meters2", Units.Area.angstrom2), ("coulombs", Units.Charge.electron_charge), ("kJ_per_mol", Units.Energy.kcal_per_mol), ("nanometer", Units.Length.angstrom), ("bar", Units.Pressure.atm), ("fahrenheit", Units.Temperature.kelvin), ("days", Units.Time.nanosecond), ("picometers**3", Units.Volume.angstrom3), ], ) def test_default_conversion(string, default_unit): """Test that GeneralUnit objects are always converted to the default unit for that type. """ # Try to create the GeneralUnit from the string. general_unit = Types._GeneralUnit(string) # Assert that units match. assert general_unit.unit() == default_unit.unit() @pytest.mark.parametrize( "unit_type", [ Units.Angle.radian, Units.Area.angstrom2, Units.Charge.electron_charge, Units.Energy.kcal_per_mol, Units.Length.angstrom, Units.Pressure.atm, Units.Temperature.kelvin, Units.Time.nanosecond, Units.Volume.angstrom3, ], ) def test_pos_pow(unit_type): """Test that unit-based types can be raised to positive powers.""" # Store the dimensions associated with the original type. old_dimensions = unit_type.dimensions() # Square the unit-based type. unit_type = unit_type**2 # Store the new dimensions. new_dimensions = unit_type.dimensions() # Each dimension entry should be twice the old value. for d0, d1 in zip(old_dimensions, new_dimensions): assert d1 == 2 * d0 @pytest.mark.parametrize( "unit_type", [ Units.Angle.radian, Units.Area.angstrom2, Units.Charge.electron_charge, Units.Energy.kcal_per_mol, Units.Length.angstrom, Units.Pressure.atm, Units.Temperature.kelvin, Units.Time.nanosecond, Units.Volume.angstrom3, ], ) def METHOD_NAME(unit_type): """Test that unit-based types can be raised to negative powers.""" # Store the dimensions associated with the original type. old_dimensions = unit_type.dimensions() # Invert the unit-based type. unit_type = unit_type**-1 # Store the new dimensions. new_dimensions = unit_type.dimensions() # Each dimension entry should be the inverse of the old value. for d0, d1 in zip(old_dimensions, new_dimensions): assert d1 == -d0 @pytest.mark.parametrize( "string", [ "degree", "meters2", "coulombs", "kJ_per_mol", "nanometer", "bar", "fahrenheit", "days", "picometers**3", ], ) def test_dimensionless(string): """Test that GeneralUnit objects convert to dimensionless float values when divided by themself. """ # Try to create the GeneralUnit from the string. general_unit = Types._GeneralUnit(string) # Check that we get back a float when divided by itself. assert isinstance(general_unit / general_unit, float) def test_dimensionless_value(): """Check that conversion to a dimensionless unit preserves the value of the unit conversion. """ value = (Units.Energy.kcal_per_mol / Units.Length.angstrom**2) / ( Units.Energy.kj_per_mol / Units.Length.nanometer**2 ) assert value == pytest.approx(418.4) def test_value_and_unit(): """ Regression test to make sure that a general unit with a value and unit can be parsed correctly. """ general_unit = Types._GeneralUnit(2, "kcal per mol / angstrom**2")
    null
    313
    #!/usr/bin/env python3 # Copyright (c) 2014-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there is a double-spend conflict.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, find_output, ) class TxnMallTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 self.supports_cli = False def METHOD_NAME(self): self.skip_if_no_wallet() def add_options(self, parser): parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: super().setup_network() self.disconnect_nodes(1, 2) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 # All nodes should be out of IBD. # If the nodes are not all out of IBD, that can interfere with # blockchain sync later in the test when nodes are connected, due to # timing issues. for n in self.nodes: assert n.getblockchaininfo()["initialblockdownload"] == False for i in range(3): assert_equal(self.nodes[i].getbalance(), {"bitcoin": starting_balance}) # Assign coins to foo and bar addresses: node0_address_foo = self.nodes[0].getnewaddress() fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress() fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(), {"bitcoin": starting_balance + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin']}) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress() # First: use raw transaction API to send 1240 BTC to node1_address, # but don't broadcast: doublespend_fee = Decimal('-.02') rawtx_input_0 = {} rawtx_input_0["txid"] = fund_foo_txid rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) rawtx_input_1 = {} rawtx_input_1["txid"] = fund_bar_txid rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) inputs = [rawtx_input_0, rawtx_input_1] change_address = self.nodes[0].getnewaddress() outputs = [{node1_address: 1240}, {change_address: 1248 - 1240 + doublespend_fee}, {"fee": (1219+29) - (1240+1248 - 1240 + doublespend_fee)}] rawtx = self.nodes[0].createrawtransaction(inputs, outputs) doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx) assert_equal(doublespend["complete"], True) # Create two spends using 1 50 BTC coin each txid1 = self.nodes[0].sendtoaddress(node1_address, 40) txid2 = self.nodes[0].sendtoaddress(node1_address, 20) # Have node0 mine a block: if (self.options.mine_block): self.nodes[0].generate(1) self.sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus 40, minus 20, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin'] if self.options.mine_block: expected += 50 expected += tx1["amount"]['bitcoin'] + tx1["fee"]['bitcoin'] expected += tx2["amount"]['bitcoin'] + tx2["fee"]['bitcoin'] assert_equal(self.nodes[0].getbalance(), {"bitcoin": expected}) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance(), {"bitcoin": starting_balance - tx1["amount"]['bitcoin'] - tx2["amount"]['bitcoin']}) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Now give doublespend and its parents to miner: self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: self.connect_nodes(1, 2) self.nodes[2].generate(1) # Mine another block to make sure we sync self.sync_blocks() assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Both transactions should be conflicted assert_equal(tx1["confirmations"], -2) assert_equal(tx2["confirmations"], -2) # Node0's total balance should be starting balance, plus 100BTC for # two more matured blocks, minus 1240 for the double-spend, plus fees (which are # negative): expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin'] + doublespend_fee assert_equal(self.nodes[0].getbalance()['bitcoin'], expected) # Node1's balance should be its initial balance (1250 for 25 block rewards) plus the doublespend: assert_equal(self.nodes[1].getbalance()['bitcoin'], 1250 + 1240) if __name__ == '__main__': TxnMallTest().main()
    null
    314
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkalb.endpoint import endpoint_data class UpdateHealthCheckTemplateAttributeRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Alb', '2020-06-16', 'UpdateHealthCheckTemplateAttribute','alb') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_HealthCheckTimeout(self): # Integer return self.get_query_params().get('HealthCheckTimeout') def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer self.add_query_param('HealthCheckTimeout', HealthCheckTimeout) def get_ClientToken(self): # String return self.get_query_params().get('ClientToken') def set_ClientToken(self, ClientToken): # String self.add_query_param('ClientToken', ClientToken) def get_HealthCheckProtocol(self): # String return self.get_query_params().get('HealthCheckProtocol') def set_HealthCheckProtocol(self, HealthCheckProtocol): # String self.add_query_param('HealthCheckProtocol', HealthCheckProtocol) def get_UnhealthyThreshold(self): # Integer return self.get_query_params().get('UnhealthyThreshold') def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer self.add_query_param('UnhealthyThreshold', UnhealthyThreshold) def get_HealthyThreshold(self): # Integer return self.get_query_params().get('HealthyThreshold') def set_HealthyThreshold(self, HealthyThreshold): # Integer self.add_query_param('HealthyThreshold', HealthyThreshold) def get_HealthCheckPath(self): # String return self.get_query_params().get('HealthCheckPath') def set_HealthCheckPath(self, HealthCheckPath): # String self.add_query_param('HealthCheckPath', HealthCheckPath) def get_HealthCheckCodes(self): # Array return self.get_query_params().get('HealthCheckCodes') def set_HealthCheckCodes(self, HealthCheckCodes): # Array for index1, value1 in enumerate(HealthCheckCodes): self.add_query_param('HealthCheckCodes.' + str(index1 + 1), value1) def get_DryRun(self): # Boolean return self.get_query_params().get('DryRun') def set_DryRun(self, DryRun): # Boolean self.add_query_param('DryRun', DryRun) def get_HealthCheckMethod(self): # String return self.get_query_params().get('HealthCheckMethod') def METHOD_NAME(self, HealthCheckMethod): # String self.add_query_param('HealthCheckMethod', HealthCheckMethod) def get_HealthCheckHost(self): # String return self.get_query_params().get('HealthCheckHost') def set_HealthCheckHost(self, HealthCheckHost): # String self.add_query_param('HealthCheckHost', HealthCheckHost) def get_HealthCheckInterval(self): # Integer return self.get_query_params().get('HealthCheckInterval') def set_HealthCheckInterval(self, HealthCheckInterval): # Integer self.add_query_param('HealthCheckInterval', HealthCheckInterval) def get_HealthCheckTemplateName(self): # String return self.get_query_params().get('HealthCheckTemplateName') def set_HealthCheckTemplateName(self, HealthCheckTemplateName): # String self.add_query_param('HealthCheckTemplateName', HealthCheckTemplateName) def get_HealthCheckTemplateId(self): # String return self.get_query_params().get('HealthCheckTemplateId') def set_HealthCheckTemplateId(self, HealthCheckTemplateId): # String self.add_query_param('HealthCheckTemplateId', HealthCheckTemplateId) def get_HealthCheckHttpVersion(self): # String return self.get_query_params().get('HealthCheckHttpVersion') def set_HealthCheckHttpVersion(self, HealthCheckHttpVersion): # String self.add_query_param('HealthCheckHttpVersion', HealthCheckHttpVersion) def get_HealthCheckConnectPort(self): # Integer return self.get_query_params().get('HealthCheckConnectPort') def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort)
    null
    315
    # coding: utf-8 """ OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator """ from __future__ import annotations from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary] AdditionalProperties: typing_extensions.TypeAlias = schemas.NotAnyTypeSchema from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_0 import schema from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_1 import schema as schema_2 from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_2 import schema as schema_3 from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_3 import schema as schema_5 from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_4 import schema as schema_4 Properties = typing.TypedDict( 'Properties', { "1": typing.Type[schema.Schema], "aB": typing.Type[schema_2.Schema], "Ab": typing.Type[schema_3.Schema], "A-B": typing.Type[schema_4.Schema], "self": typing.Type[schema_5.Schema], } ) class QueryParametersDict(schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES]): __required_keys__: typing.FrozenSet[str] = frozenset({ }) __optional_keys__: typing.FrozenSet[str] = frozenset({ "1", "aB", "Ab", "A-B", "self", }) def __new__( cls, *, aB: typing.Union[ str, schemas.Unset ] = schemas.unset, METHOD_NAME: typing.Union[ str, schemas.Unset ] = schemas.unset, configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None, ): arg_: typing.Dict[str, typing.Any] = {} for key, val in ( ("aB", aB), ("Ab", METHOD_NAME), ): if isinstance(val, schemas.Unset): continue arg_[key] = val used_arg_ = typing.cast(QueryParametersDictInput, arg_) return QueryParameters.validate(used_arg_, configuration=configuration_) @staticmethod def from_dict_( arg: typing.Union[ QueryParametersDictInput, QueryParametersDict ], configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None ) -> QueryParametersDict: return QueryParameters.validate(arg, configuration=configuration) @property def aB(self) -> typing.Union[str, schemas.Unset]: val = self.get("aB", schemas.unset) if isinstance(val, schemas.Unset): return val return typing.cast( str, val ) @property def METHOD_NAME(self) -> typing.Union[str, schemas.Unset]: val = self.get("Ab", schemas.unset) if isinstance(val, schemas.Unset): return val return typing.cast( str, val ) QueryParametersDictInput = typing.TypedDict( 'QueryParametersDictInput', { "1": str, "aB": str, "Ab": str, "A-B": str, "self": str, }, total=False ) @dataclasses.dataclass(frozen=True) class QueryParameters( schemas.Schema[QueryParametersDict, tuple] ): types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict}) properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore additional_properties: typing.Type[AdditionalProperties] = dataclasses.field(default_factory=lambda: AdditionalProperties) # type: ignore type_to_output_cls: typing.Mapping[ typing.Type, typing.Type ] = dataclasses.field( default_factory=lambda: { schemas.immutabledict: QueryParametersDict } ) @classmethod def validate( cls, arg: typing.Union[ QueryParametersDictInput, QueryParametersDict, ], configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None ) -> QueryParametersDict: return super().validate_base( arg, configuration=configuration, )
    null
    316
    """Test otx segmentation task.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import os import numpy as np import pytest from otx.algorithms.segmentation.task import OTXSegmentationTask from otx.api.configuration.helper import create from otx.api.entities.inference_parameters import InferenceParameters from otx.api.entities.model_template import ( parse_model_template, ) from tests.unit.algorithms.segmentation.test_helpers import ( DEFAULT_SEG_TEMPLATE_DIR, generate_otx_dataset, generate_otx_label_schema, init_environment, ) from otx.api.usecases.tasks.interfaces.export_interface import ExportType from tests.test_suite.e2e_test_system import e2e_pytest_unit class MockOTXSegmentationTask(OTXSegmentationTask): def _infer_model(*args, **kwargs): return dict( classes=["background", "rectangle", "ellipse", "triangle"], eval_predictions=[[np.random.rand(4, 128, 128)]], feature_vectors=[np.random.rand(600, 1, 1)], ) def _train_model(*args, **kwargs): return {"final_ckpt": "dummy.pth"} def _explain_model(*args, **kwargs): pass def _export_model(*args, **kwargs): return { "outputs": {"bin": f"/tmp/model.xml", "xml": f"/tmp/model.bin", "onnx": f"/tmp/model.onnx"}, "inference_parameters": {"mean_values": "", "scale_values": ""}, } class MockModel: class _Configuration: def __init__(self, label_schema): self.label_schema = label_schema def get_label_schema(self): return self.label_schema def __init__(self): self.model_adapters = ["weights.pth"] self.data = np.ndarray(1) label_schema = generate_otx_label_schema() self.configuration = self._Configuration(label_schema) def get_data(self, name): return self.data def set_data(self, *args, **kwargs): return class TestOTXSegmentationTask: @pytest.fixture(autouse=True) def setup(self): model_template = parse_model_template(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "template.yaml")) hyper_parameters = create(model_template.hyper_parameters.data) task_env = init_environment(hyper_parameters, model_template) self.seg_task = MockOTXSegmentationTask(task_env) @e2e_pytest_unit def test_load_model_ckpt(self, mocker): mocker_torch_load = mocker.patch("torch.load") self.seg_task._load_model_ckpt(MockModel()) mocker_torch_load.assert_called_once() @e2e_pytest_unit def test_train(self, mocker): dataset = generate_otx_dataset(5) mocker.patch("torch.load", return_value=np.ndarray([1])) self.seg_task.train(dataset, MockModel()) assert self.seg_task._model_ckpt == "dummy.pth" @e2e_pytest_unit def test_infer(self): dataset = generate_otx_dataset(5) predicted_dataset = self.seg_task.infer( dataset.with_empty_annotations(), inference_parameters=InferenceParameters(is_evaluation=False) ) assert predicted_dataset[0].annotation_scene.annotations[0] @e2e_pytest_unit def test_evaluate(self, mocker): class _MockScoreMetric: def __init__(self, value): self.value = value class _MockMetric: def __init__(self): self.overall_dice = _MockScoreMetric(1.0) def METHOD_NAME(self): return 1.0 class _MockResultEntity: performance = 0.0 mocker.patch( "otx.algorithms.segmentation.task.MetricsHelper.compute_dice_averaged_over_pixels", return_value=_MockMetric(), ) _result_entity = _MockResultEntity() self.seg_task.evaluate(_result_entity) assert _result_entity.performance == 1.0 @e2e_pytest_unit @pytest.mark.parametrize("export_type", [ExportType.ONNX, ExportType.OPENVINO]) def test_export(self, otx_model, mocker, export_type): mocker_open = mocker.patch("builtins.open") mocker_open.__enter__.return_value = True mocker.patch("otx.algorithms.segmentation.task.embed_ir_model_data", return_value=None) mocker.patch("otx.algorithms.segmentation.task.embed_onnx_model_data", return_value=None) self.seg_task.export(export_type, otx_model) mocker_open.assert_called()
    null
    317
    # coding=utf-8 # Copyright 2023 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long r"""Finetune a ViT-L/32 on CIFAR-10/100 subpopulation shift with BE+GP. Checkpoints in this config are useful for BE->BE+GP pretraining->finetuning. This config is used for models pretrained on either JFT-300M or ImageNet-21K. """ # pylint: enable=line-too-long import ml_collections from experiments import sweep_utils # local file import from baselines.jft # CIFAR-10/100 subpopulation datasets. CIFAR10_SUBPOPL_DATA_FILES = [] CIFAR100_SUBPOPL_DATA_FILES = [] def get_config(): """Config for finetuning.""" config = ml_collections.ConfigDict() config.model_init = '' # set in sweep config.dataset = '' # set in sweep config.val_split = '' # set in sweep config.test_split = '' # set in sweep config.train_split = '' # set in sweep config.num_classes = None # set in sweep config.batch_size = 512 config.total_steps = None # set in sweep config.pp_train = '' # set in sweep config.pp_eval = '' # set in sweep config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok. config.log_training_steps = 100 config.log_eval_steps = 1000 config.checkpoint_steps = 5000 config.checkpoint_timeout = 1 config.prefetch_to_device = 2 config.trial = 0 # Subpopulation shift evaluation. Parameters set in the sweep. If # `config.subpopl_cifar_data_file` is None, this evaluation is skipped. config.subpopl_cifar_data_file = None config.pp_eval_subpopl_cifar = None # OOD evaluation. They're all set in the sweep. config.ood_datasets = [] config.ood_num_classes = [] config.ood_split = '' config.ood_methods = [] config.pp_eval_ood = [] config.eval_on_cifar_10h = False config.pp_eval_cifar_10h = '' config.eval_on_imagenet_real = False config.pp_eval_imagenet_real = '' # Subpopulation shift evaluation. Parameters set in the sweep. config.subpopl_cifar_data_file = None config.pp_eval_subpopl_cifar = None # Model section. config.model = ml_collections.ConfigDict() config.model.patch_size = [32, 32] config.model.hidden_size = 1024 config.model.transformer = ml_collections.ConfigDict() config.model.transformer.mlp_dim = 4096 config.model.transformer.num_heads = 16 config.model.transformer.num_layers = 24 config.model.transformer.attention_dropout_rate = 0. config.model.transformer.dropout_rate = 0. config.model.classifier = 'token' # This is "no head" fine-tuning, which we use by default. config.model.representation_size = None # BatchEnsemble config. config.model.transformer.be_layers = (21, 22, 23) config.model.transformer.ens_size = 3 config.model.transformer.random_sign_init = -0.5 config.fast_weight_lr_multiplier = 1.0 # GP config. config.use_gp_layer = True config.gp_layer = ml_collections.ConfigDict() config.gp_layer.covmat_momentum = -1 config.gp_layer.ridge_penalty = 1. # No need to use mean field adjustment for pretraining. config.gp_layer.mean_field_factor = -1. # Optimizer section. config.optim_name = 'Momentum' config.optim = ml_collections.ConfigDict() config.grad_clip_norm = 1.0 config.weight_decay = None config.loss = 'softmax_xent' config.lr = ml_collections.ConfigDict() config.lr.base = 0.001 # set in sweep config.lr.warmup_steps = 0 # set in sweep config.lr.decay_type = 'cosine' return config def METHOD_NAME(hyper): """Sweeps over datasets.""" checkpoints = ['/path/to/pretrained_model_ckpt.npz'] # Apply a learning rate sweep following Table 4 of Vision Transformer paper. cifar10_sweep = hyper.product([ hyper.chainit([ hyper.product(sweep_utils.cifar10( hyper, steps=int(10_000 * s), warmup=int(500 * s))) for s in [0.5, 1.0, 1.5, 2.0] ]), hyper.sweep('config.lr.base', [0.03, 0.01, 0.003, 0.001]), hyper.sweep('config.gp_layer.mean_field_factor', [-1., 0.1, 0.2, 0.3, 0.5, 1., 2., 3., 5., 10., 20]) ]) cifar100_sweep = hyper.product([ hyper.chainit([ hyper.product(sweep_utils.cifar100( hyper, steps=int(10_000 * s), warmup=int(500 * s))) for s in [0.5, 1.0, 1.5, 2.0] ]), hyper.sweep('config.lr.base', [0.06, 0.03, 0.01, 0.006]), hyper.sweep('config.fast_weight_lr_multiplier', [0.5, 1.0, 2.0]), hyper.sweep('config.model.transformer.random_sign_init', [-0.5, 0.5]), hyper.sweep('config.gp_layer.mean_field_factor', [-1., 1e-8, 1e-7, 1e-6, 1e-5, 1e-4]) ]) return hyper.product([ hyper.chainit([ hyper.product([ cifar10_sweep, hyper.sweep('config.subpopl_cifar_data_file', CIFAR10_SUBPOPL_DATA_FILES) ]), hyper.product([ cifar100_sweep, hyper.sweep('config.subpopl_cifar_data_file', CIFAR100_SUBPOPL_DATA_FILES) ]), ]), hyper.sweep('config.model_init', checkpoints), ])
    null
    318
    # coding=utf-8 # Copyright 2023 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for multimodal_utils.""" from jax import numpy as jnp import numpy as np import tensorflow as tf import multimodal_utils # local file import from experimental.multimodal class MultimodalUtilsTest(tf.test.TestCase): def test_contrastive_loss_logits(self): zimg = jnp.array([[1., 2., 3.], [4., 5., 6.], [1., 0., 0.]]) ztext = jnp.array([[-1., -2., -3.], [1., 2., 3.], [1., 0., 0.]]) _, logits = multimodal_utils.bidirectional_contrastive_loss(zimg, ztext) np.testing.assert_allclose( logits, jnp.array([[-14., 14., 1.], [-32., 32., 4.], [-1., 1., 1.]])) def test_contrastive_loss_no_reduction_no_mask(self): zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]]) loss, logits = multimodal_utils.bidirectional_contrastive_loss( zimg, ztext, mask=None, reduction=False) np.testing.assert_allclose( logits, jnp.array([[1., 0., 0.], [0., 0., 0.], [0., 1., 1.]])) expected_loss = -0.5 * jnp.array([ jnp.log(jnp.e**2 / (jnp.e + 2)**2), jnp.log(1 / (3 * (jnp.e + 2))), jnp.log(jnp.e**2 / ((2 + jnp.e) * (1 + 2 * jnp.e))) ]) np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6) def test_contrastive_loss_reduction_no_mask(self): zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]]) loss, logits = multimodal_utils.bidirectional_contrastive_loss( zimg, ztext, mask=None, reduction=True) np.testing.assert_allclose( logits, jnp.array([[1., 0., 0.], [0., 0., 0.], [0., 1., 1.]])) expected_loss = jnp.mean(-0.5 * jnp.array([ jnp.log(jnp.e**2 / (jnp.e + 2)**2), jnp.log(1 / (3 * (jnp.e + 2))), jnp.log(jnp.e**2 / ((2 + jnp.e) * (1 + 2 * jnp.e))) ])) np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6) def test_contrastive_loss_no_reduction_mask(self): zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]]) loss, logits = multimodal_utils.bidirectional_contrastive_loss( zimg, ztext, mask=jnp.array([1, 1, 0]), reduction=False) np.testing.assert_allclose( logits, jnp.array([[1., 0., -jnp.inf], [0., 0., -jnp.inf], [-jnp.inf, -jnp.inf, -jnp.inf]])) expected_loss = -0.5 * jnp.array([ jnp.log(jnp.e**2 / (jnp.e + 1)**2), jnp.log(1 / 4), 0 ]) np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6) def METHOD_NAME(self): zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]]) loss, logits = multimodal_utils.bidirectional_contrastive_loss( zimg, ztext, mask=jnp.array([1, 1, 0]), reduction=True) np.testing.assert_allclose( logits, jnp.array([[1., 0., -jnp.inf], [0., 0., -jnp.inf], [-jnp.inf, -jnp.inf, -jnp.inf]])) expected_loss = jnp.sum(-0.5 * jnp.array([ jnp.log(jnp.e**2 / (jnp.e + 1)**2), jnp.log(1 / 4) ])) / 2 np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6) if __name__ == "__main__": tf.test.main()
    null
    319
    """Implements a cat command for xonsh.""" import os import sys import time import xonsh.procs.pipelines as xpp from xonsh.built_ins import XSH from xonsh.xoreutils.util import arg_handler, run_alias def _cat_line( f, sep, last_was_blank, line_count, opts, out, enc, enc_errors, read_size ): _r = r = f.readline(80) restore_newline = False if isinstance(_r, str): _r = r = _r.encode(enc, enc_errors) if r == b"": return last_was_blank, line_count, read_size, True if r.endswith(sep): _r = _r[: -len(sep)] restore_newline = True this_one_blank = _r == b"" if last_was_blank and this_one_blank and opts["squeeze_blank"]: return last_was_blank, line_count, read_size, False last_was_blank = this_one_blank if opts["number_all"] or (opts["number_nonblank"] and not this_one_blank): start = ("%6d " % line_count).encode(enc, enc_errors) _r = start + _r line_count += 1 if opts["show_ends"]: _r = _r + b"$" if restore_newline: _r = _r + sep out.buffer.write(_r) out.flush() read_size += len(r) return last_was_blank, line_count, read_size, False def _cat_single_file(opts, fname, stdin, out, err, line_count=1): env = XSH.env enc = env.get("XONSH_ENCODING") enc_errors = env.get("XONSH_ENCODING_ERRORS") read_size = 0 file_size = fobj = None if fname == "-": f = stdin or sys.stdin elif os.path.isdir(fname): print(f"cat: {fname}: Is a directory.", file=err) return True, line_count elif not os.path.exists(fname): print(f"cat: No such file or directory: {fname}", file=err) return True, line_count else: file_size = os.stat(fname).st_size if file_size == 0: file_size = None fobj = open(fname, "rb") f = xpp.NonBlockingFDReader(fobj.fileno(), timeout=0.1) sep = os.linesep.encode(enc, enc_errors) last_was_blank = False while file_size is None or read_size < file_size: try: last_was_blank, line_count, read_size, endnow = _cat_line( f, sep, last_was_blank, line_count, opts, out, enc, enc_errors, read_size, ) if endnow: break if last_was_blank: time.sleep(1e-3) except KeyboardInterrupt: print("got except", flush=True, file=out) break except Exception as e: print("xonsh:", e, flush=True, file=out) pass if fobj is not None: fobj.close() return False, line_count def cat(args, stdin, stdout, stderr): """A cat command for xonsh.""" opts = METHOD_NAME(args) if opts is None: print(CAT_HELP_STR, file=stdout) return 0 line_count = 1 errors = False if len(args) == 0: args = ["-"] for i in args: o = _cat_single_file(opts, i, stdin, stdout, stderr, line_count) if o is None: return -1 _e, line_count = o errors = _e or errors return int(errors) def METHOD_NAME(args): out = { "number_nonblank": False, "number_all": False, "squeeze_blank": False, "show_ends": False, } if "--help" in args: return arg_handler(args, out, "-b", "number_nonblank", True, "--number-nonblank") arg_handler(args, out, "-n", "number_all", True, "--number") arg_handler(args, out, "-E", "show_ends", True, "--show-ends") arg_handler(args, out, "-s", "squeeze_blank", True, "--squeeze-blank") arg_handler(args, out, "-T", "show_tabs", True, "--show-tabs") return out CAT_HELP_STR = """This version of cat was written in Python for the xonsh project: http://xon.sh Based on cat from GNU coreutils: http://www.gnu.org/software/coreutils/ Usage: cat [OPTION]... [FILE]... Concatenate FILE(s), or standard input, to standard output. -b, --number-nonblank number nonempty output lines, overrides -n -E, --show-ends display $ at end of each line -n, --number number all output lines -s, --squeeze-blank suppress repeated empty output lines -T, --show-tabs display TAB characters as ^I -u (ignored) --help display this help and exit With no FILE, or when FILE is -, read standard input. Examples: cat f - g Output f's contents, then standard input, then g's contents. cat Copy standard input to standard output.""" # NOT IMPLEMENTED: # -A, --show-all equivalent to -vET # -e equivalent to -vE # -t equivalent to -vT # -v, --show-nonprinting use ^ and M- notation, except for LFD and TAB # --version output version information and exit""" def main(args=None): run_alias("cat", args) if __name__ == "__main__": main()
    null
    320
    from subprocess import CalledProcessError import pytest from galaxy.tool_util.deps.container_classes import DOCKER_CONTAINER_TYPE from galaxy.tool_util.deps.container_resolvers.mulled import ( CachedMulledDockerContainerResolver, CachedMulledSingularityContainerResolver, MulledDockerContainerResolver, ) from galaxy.tool_util.deps.containers import ContainerRegistry from galaxy.tool_util.deps.dependencies import ( AppInfo, ToolInfo, ) from galaxy.tool_util.deps.requirements import ToolRequirement SINGULARITY_IMAGES = ( "foo:1.0--bar", "baz:2.22", "mulled-v2-fe8a3b846bc50d24e5df78fa0b562c43477fe9ce:9f946d13f673ab2903cb0da849ad42916d619d18-0", ) @pytest.fixture def appinfo() -> AppInfo: return AppInfo( involucro_auto_init=True, enable_mulled_containers=True, container_image_cache_path=".", ) @pytest.fixture def container_registry(): app_info = AppInfo( involucro_auto_init=True, enable_mulled_containers=True, container_image_cache_path=".", ) return ContainerRegistry(app_info) def test_container_registry(container_registry, mocker): mocker.patch("galaxy.tool_util.deps.mulled.util._get_namespace", return_value=["samtools"]) tool_info = ToolInfo(requirements=[ToolRequirement(name="samtools", version="1.10", type="package")]) container_description = container_registry.find_best_container_description( [DOCKER_CONTAINER_TYPE], tool_info, install=False, ) assert container_description.type == "docker" assert "samtools:1.10" in container_description.identifier def test_docker_container_resolver_detects_docker_cli_absent(appinfo, mocker): mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None) resolver = CachedMulledDockerContainerResolver(appinfo) assert resolver._cli_available is False def test_docker_container_resolver_detects_docker_cli(appinfo, mocker): """ - CachedMulledDockerContainerResolver properly detects present docker binary """ mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled", return_value="/bin/docker") resolver = CachedMulledDockerContainerResolver(appinfo) assert resolver.cli_available def test_cached_docker_container_docker_cli_absent_resolve(appinfo, mocker) -> None: mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None) resolver = CachedMulledDockerContainerResolver(appinfo) assert resolver.cli_available is False assert resolver.resolve(enabled_container_types=[], tool_info=ToolInfo()) is None def test_docker_container_docker_cli_absent_resolve(appinfo, mocker): mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None) resolver = MulledDockerContainerResolver(appinfo) assert resolver.cli_available is False requirement = ToolRequirement(name="samtools", version="1.10", type="package") tool_info = ToolInfo(requirements=[requirement]) mocker.patch( "galaxy.tool_util.deps.container_resolvers.mulled.targets_to_mulled_name", return_value="samtools:1.10--h2e538c0_3", ) container_description = resolver.resolve(enabled_container_types=["docker"], tool_info=tool_info) assert container_description assert container_description.type == "docker" assert container_description.identifier == "quay.io/biocontainers/samtools:1.10--h2e538c0_3" def METHOD_NAME(appinfo, mocker): mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value="/bin/docker") resolver = MulledDockerContainerResolver(appinfo) assert resolver.cli_available is True requirement = ToolRequirement(name="samtools", version="1.10", type="package") tool_info = ToolInfo(requirements=[requirement]) mocker.patch( "galaxy.tool_util.deps.container_resolvers.mulled.targets_to_mulled_name", return_value="samtools:1.10--h2e538c0_3", ) mocker.patch( "galaxy.tool_util.deps.container_resolvers.mulled.docker_cached_container_description", side_effect=CalledProcessError(1, "bla"), ) container_description = resolver.resolve(enabled_container_types=["docker"], tool_info=tool_info, install=True) assert resolver.cli_available is True assert container_description assert container_description.type == "docker" assert container_description.identifier == "quay.io/biocontainers/samtools:1.10--h2e538c0_3" def test_cached_singularity_container_resolver_uncached(mocker): mocker.patch("os.listdir", return_value=SINGULARITY_IMAGES) mocker.patch("os.path.exists", return_value=True) mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.safe_makedirs") resolver = CachedMulledSingularityContainerResolver(app_info=mocker.Mock(container_image_cache_path="/")) requirement = ToolRequirement(name="foo", version="1.0", type="package") tool_info = ToolInfo(requirements=[requirement]) container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info) assert container_description assert container_description.type == "singularity" assert container_description.identifier == "/singularity/mulled/foo:1.0--bar" def test_cached_singularity_container_resolver_dir_mtime_cached(mocker): mocker.patch("os.listdir", return_value=SINGULARITY_IMAGES) mocker.patch("os.path.exists", return_value=True) mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.safe_makedirs") mocker.patch("os.stat", return_value=mocker.Mock(st_mtime=42)) resolver = CachedMulledSingularityContainerResolver( app_info=mocker.Mock(container_image_cache_path="/"), cache_directory_cacher_type="dir_mtime" ) requirement = ToolRequirement(name="baz", version="2.22", type="package") tool_info = ToolInfo(requirements=[requirement]) container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info) assert container_description assert container_description.type == "singularity" assert container_description.identifier == "/singularity/mulled/baz:2.22" requirement = ToolRequirement(name="foo", version="1.0", type="package") tool_info.requirements.append(requirement) container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info) assert container_description assert container_description.type == "singularity" assert ( container_description.identifier == "/singularity/mulled/mulled-v2-fe8a3b846bc50d24e5df78fa0b562c43477fe9ce:9f946d13f673ab2903cb0da849ad42916d619d18-0" )
    null
    321
    ## \example domino/multiscale.py # We are interested in applying domino to problems systematically in a # multiscale manner. This script experiments with those approaches. from __future__ import print_function import IMP.domino import IMP.core import sys IMP.setup_from_argv(sys.argv, "multiscale") m = IMP.Model() m.set_log_level(IMP.SILENT) ds = [IMP.core.XYZR.setup_particle(IMP.Particle(m)) for i in range(0, 3)] for i, d in enumerate(ds): d.set_radius(1) IMP.display.Colored.setup_particle(d, IMP.display.get_display_color(i)) k = 1 h = IMP.core.Harmonic(0, k) r0 = IMP.core.SingletonRestraint( m, IMP.core.DistanceToSingletonScore(h, IMP.algebra.Vector3D(0, 0, 0)), ds[0].get_particle_index(), "0 at origin") r1 = IMP.core.SingletonRestraint( m, IMP.core.AttributeSingletonScore(h, IMP.core.XYZ.get_xyz_keys()[0]), ds[1].get_particle_index(), "1 on axis") rs = [r0, r1] for pr in [(0, 1), (1, 2), (0, 2)]: r = IMP.core.PairRestraint( m, IMP.core.HarmonicSphereDistancePairScore(0, k), (ds[pr[0]].get_particle_index(), ds[pr[1]].get_particle_index()), "R for " + str(pr)) rs.append(r) bb = IMP.algebra.BoundingBox2D(IMP.algebra.Vector2D(0, 0), IMP.algebra.Vector2D(4, 4)) covers = [] for i in range(0, 6): cur = IMP.algebra.get_grid_interior_cover_by_spacing(bb, 4.0 / 2 ** i) print(cur) covers.append([IMP.algebra.Vector3D(x[0], x[1], 0) for x in cur]) def METHOD_NAME(cover, scale): pst = IMP.domino.ParticleStatesTable() st = IMP.domino.XYZStates(cover) for p in ds: pst.set_particle_states(p, st) for r in rs: r.set_maximum_score(.5 * scale ** 2) lf = IMP.domino.ListSubsetFilterTable(pst) rc = IMP.domino.RestraintCache(pst) rc.add_restraints(rs) fs = [IMP.domino.RestraintScoreSubsetFilterTable(rc), lf] sampler = IMP.domino.DominoSampler(m, pst) sampler.set_restraints(rs) sampler.set_subset_filter_tables(fs) sampler.set_log_level(IMP.SILENT) return (sampler, lf, pst) (sampler, lf, pst) = METHOD_NAME(covers[0], 4.0) subset = IMP.domino.Subset(ds) ac = sampler.get_sample_assignments(subset) print(ac) def get_mapping(cover0, cover1): nn = IMP.algebra.NearestNeighbor3D(cover0) ret = [[] for c in cover0] for i, p in enumerate(cover1): nns = nn.get_nearest_neighbor(p) ret[nns].append(i) return ret mw = IMP.display.PymolWriter("mapping.pym") def display_mapping(index, cover0, cover1, mapping): mw.set_frame(index) for i, c in enumerate(mapping): for p in c: g = IMP.display.PointGeometry(cover1[p]) g.set_color(IMP.display.get_display_color(i)) g.set_name("fine") mw.add_geometry(g) for i, c in enumerate(cover0): g = IMP.display.PointGeometry(c) g.set_color(IMP.display.get_display_color(i)) g.set_name("coarse") mw.add_geometry(g) for curi in range(1, len(covers)): scale = 4.0 / 2 ** curi print(scale) mapping = get_mapping(covers[curi - 1], covers[curi]) print(mapping) display_mapping(curi - 1, covers[curi - 1], covers[curi], mapping) (sampler, lf, pst) = METHOD_NAME(covers[curi], scale) lac = ac cac = [] for a in lac: for i, p in enumerate(subset): s = a[i] allowed = mapping[s] lf.set_allowed_states(p, allowed) ccac = sampler.get_sample_assignments(subset) print(a, ccac) cac = cac + ccac ac = list(set(cac)) print("for scale", scale, "got", ac) sw = IMP.display.PymolWriter("solutions." + str(curi) + ".pym") for i, a in enumerate(ac): IMP.domino.load_particle_states(subset, a, pst) sw.set_frame(i) for p in ds: g = IMP.core.XYZRGeometry(p) sw.add_geometry(g) for c in covers[curi]: g = IMP.display.PointGeometry(c) g.set_color(IMP.display.Color(1, 1, 1)) g.set_name("grid") sw.add_geometry(g)
    null
    322
    import numpy as np from base_test import ArkoudaTest from context import arkouda as ak SIZE = 10 K = 5 def make_array(): a = ak.randint(0, SIZE, SIZE) return a def compare_results(akres, sortedres) -> int: """ Compares the numpy and arkouda arrays via the numpy.allclose method with the default relative and absolute tolerances, returning 0 if the arrays are similar element-wise within the tolerances, 1 if they are dissimilar.element :return: 0 (identical) or 1 (dissimilar) :rtype: int """ akres = akres.to_ndarray() if not np.array_equal(akres, sortedres): akres = ak.array(akres) sortedres = ak.array(sortedres) innp = sortedres[ ak.in1d(ak.array(sortedres), ak.array(akres), True) ] # values in np array, but not ak array inak = akres[ ak.in1d(ak.array(akres), ak.array(sortedres), True) ] # values in ak array, not not np array print(f"(values in np but not ak: {innp}) (values in ak but not np: {inak})") return 1 return 0 def run_test(runMin=True, isInd=True, verbose=True): """ The run_test method runs execution of the mink reduction on a randomized array. :return: """ aka = make_array() failures = 0 try: if not isInd: if runMin: akres = ak.mink(aka, K) npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array else: akres = ak.maxk(aka, K) npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array else: if runMin: akres = aka[ak.argmink(aka, K)] npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array else: akres = aka[ak.argmaxk(aka, K)] npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array except RuntimeError as E: if verbose: print("Arkouda error: ", E) return 1 failures += compare_results(akres, npres) return failures class MinKTest(ArkoudaTest): def test_mink(self): """ Executes run_test and asserts whether there are any errors :return: None :raise: AssertionError if there are any errors encountered in run_test for set operations """ self.assertEqual(0, run_test()) def test_error_handling(self): testArray = ak.randint(0, 100, 100) with self.assertRaises(TypeError): ak.mink(list(range(0, 10)), 1) with self.assertRaises(TypeError): ak.mink(testArray, "1") with self.assertRaises(ValueError): ak.mink(testArray, -1) with self.assertRaises(ValueError): ak.mink(ak.array([]), 1) class MaxKTest(ArkoudaTest): def test_maxk(self): """ Executes run_test and asserts whether there are any errors :return: None :raise: AssertionError if there are any errors encountered in run_test for set operations """ self.assertEqual(0, run_test(runMin=False)) def test_error_handling(self): testArray = ak.randint(0, 100, 100) with self.assertRaises(TypeError): ak.maxk(list(range(0, 10)), 1) with self.assertRaises(TypeError): ak.maxk(testArray, "1") with self.assertRaises(ValueError): ak.maxk(testArray, -1) with self.assertRaises(ValueError): ak.maxk(ak.array([]), 1) class ArgMinKTest(ArkoudaTest): def METHOD_NAME(self): """ Executes run_test and asserts whether there are any errors :return: None :raise: AssertionError if there are any errors encountered in run_test for set operations """ self.assertEqual(0, run_test(isInd=True)) def test_error_handling(self): testArray = ak.randint(0, 100, 100) with self.assertRaises(TypeError): ak.argmink(list(range(0, 10)), 1) with self.assertRaises(TypeError): ak.argmink(testArray, "1") with self.assertRaises(ValueError): ak.argmink(testArray, -1) with self.assertRaises(ValueError): ak.argmink(ak.array([]), 1) class ArgMaxKTest(ArkoudaTest): def test_argmaxk(self): """ Executes run_test and asserts whether there are any errors :return: None :raise: AssertionError if there are any errors encountered in run_test for set operations """ self.assertEqual(0, run_test(runMin=False, isInd=True)) def test_error_handling(self): testArray = ak.randint(0, 100, 100) with self.assertRaises(TypeError): ak.argmaxk(list(range(0, 10)), 1) with self.assertRaises(TypeError): ak.argmaxk(testArray, "1") with self.assertRaises(ValueError): ak.argmaxk(testArray, -1) with self.assertRaises(ValueError): ak.argmaxk(ak.array([]), 1) class ArgMinTest(ArkoudaTest): def test_argmin(self): np_arr = np.array([False, False, True, True, False]) ak_arr = ak.array(np_arr) self.assertEqual(np_arr.argmin(), ak_arr.argmin()) class ArgMaxTest(ArkoudaTest): def test_argmax(self): np_arr = np.array([False, False, True, True, False]) ak_arr = ak.array(np_arr) self.assertEqual(np_arr.argmax(), ak_arr.argmax())
    null
    323
    import random from sc2 import maps from sc2.bot_ai import BotAI from sc2.data import Difficulty, Race from sc2.ids.ability_id import AbilityId from sc2.ids.unit_typeid import UnitTypeId from sc2.ids.upgrade_id import UpgradeId from sc2.main import run_game from sc2.player import Bot, Computer from sc2.position import Point2 from sc2.unit import Unit from sc2.units import Units class Hydralisk(BotAI): def select_target(self) -> Point2: if self.enemy_structures: return random.choice(self.enemy_structures).position return self.enemy_start_locations[0] # pylint: disable=R0912 async def METHOD_NAME(self, iteration): larvae: Units = self.larva forces: Units = self.units.of_type({UnitTypeId.ZERGLING, UnitTypeId.HYDRALISK}) # Send all idle lings + hydras to attack-move if we have at least 10 hydras, every 400th frame if self.units(UnitTypeId.HYDRALISK).amount >= 10 and iteration % 50 == 0: for unit in forces.idle: unit.attack(self.select_target()) # If supply is low, train overlords if self.supply_left < 2 and larvae and self.can_afford(UnitTypeId.OVERLORD): larvae.random.train(UnitTypeId.OVERLORD) return # If hydra den is ready and idle, research upgrades hydra_dens = self.structures(UnitTypeId.HYDRALISKDEN) if hydra_dens: for hydra_den in hydra_dens.ready.idle: if self.already_pending_upgrade(UpgradeId.EVOLVEGROOVEDSPINES ) == 0 and self.can_afford(UpgradeId.EVOLVEGROOVEDSPINES): hydra_den.research(UpgradeId.EVOLVEGROOVEDSPINES) elif self.already_pending_upgrade(UpgradeId.EVOLVEMUSCULARAUGMENTS ) == 0 and self.can_afford(UpgradeId.EVOLVEMUSCULARAUGMENTS): hydra_den.research(UpgradeId.EVOLVEMUSCULARAUGMENTS) # If hydra den is ready, train hydra if larvae and self.can_afford(UnitTypeId.HYDRALISK) and self.structures(UnitTypeId.HYDRALISKDEN).ready: larvae.random.train(UnitTypeId.HYDRALISK) return # If all our townhalls are dead, send all our units to attack if not self.townhalls: for unit in self.units.of_type( {UnitTypeId.DRONE, UnitTypeId.QUEEN, UnitTypeId.ZERGLING, UnitTypeId.HYDRALISK} ): unit.attack(self.enemy_start_locations[0]) return hq: Unit = self.townhalls.first # Send idle queens with >=25 energy to inject for queen in self.units(UnitTypeId.QUEEN).idle: # The following checks if the inject ability is in the queen abilitys - basically it checks if we have enough energy and if the ability is off-cooldown # abilities = await self.get_available_abilities(queen) # if AbilityId.EFFECT_INJECTLARVA in abilities: if queen.energy >= 25: queen(AbilityId.EFFECT_INJECTLARVA, hq) # Build spawning pool if self.structures(UnitTypeId.SPAWNINGPOOL).amount + self.already_pending(UnitTypeId.SPAWNINGPOOL) == 0: if self.can_afford(UnitTypeId.SPAWNINGPOOL): await self.build(UnitTypeId.SPAWNINGPOOL, near=hq.position.towards(self.game_info.map_center, 5)) # Upgrade to lair if spawning pool is complete if self.structures(UnitTypeId.SPAWNINGPOOL).ready: if hq.is_idle and not self.townhalls(UnitTypeId.LAIR): if self.can_afford(UnitTypeId.LAIR): hq.build(UnitTypeId.LAIR) # If lair is ready and we have no hydra den on the way: build hydra den if self.townhalls(UnitTypeId.LAIR).ready: if self.structures(UnitTypeId.HYDRALISKDEN).amount + self.already_pending(UnitTypeId.HYDRALISKDEN) == 0: if self.can_afford(UnitTypeId.HYDRALISKDEN): await self.build(UnitTypeId.HYDRALISKDEN, near=hq.position.towards(self.game_info.map_center, 5)) # If we dont have both extractors: build them if ( self.structures(UnitTypeId.SPAWNINGPOOL) and self.gas_buildings.amount + self.already_pending(UnitTypeId.EXTRACTOR) < 2 ): if self.can_afford(UnitTypeId.EXTRACTOR): # May crash if we dont have any drones for vg in self.vespene_geyser.closer_than(10, hq): drone: Unit = self.workers.random drone.build_gas(vg) break # If we have less than 22 drones, build drones if self.supply_workers + self.already_pending(UnitTypeId.DRONE) < 22: if larvae and self.can_afford(UnitTypeId.DRONE): larva: Unit = larvae.random larva.train(UnitTypeId.DRONE) return # Saturate gas for a in self.gas_buildings: if a.assigned_harvesters < a.ideal_harvesters: w: Units = self.workers.closer_than(10, a) if w: w.random.gather(a) # Build queen once the pool is done if self.structures(UnitTypeId.SPAWNINGPOOL).ready: if not self.units(UnitTypeId.QUEEN) and hq.is_idle: if self.can_afford(UnitTypeId.QUEEN): hq.train(UnitTypeId.QUEEN) # Train zerglings if we have much more minerals than vespene (not enough gas for hydras) if self.units(UnitTypeId.ZERGLING).amount < 20 and self.minerals > 1000: if larvae and self.can_afford(UnitTypeId.ZERGLING): larvae.random.train(UnitTypeId.ZERGLING) def main(): run_game( maps.get("(2)CatalystLE"), [Bot(Race.Zerg, Hydralisk()), Computer(Race.Terran, Difficulty.Medium)], realtime=False, save_replay_as="ZvT.SC2Replay", ) if __name__ == "__main__": main()
    null
    324
    from __future__ import annotations import ast from abc import ABC from typing import Dict, List, Optional, Set, Tuple from boa3.internal.model import set_internal_call from boa3.internal.model.expression import IExpression from boa3.internal.model.type.type import IType, Type from boa3.internal.model.variable import Variable from boa3.internal.neo.vm.VMCode import VMCode class Callable(IExpression, ABC): """ A class used to represent a function or a class method :ivar args: a dictionary that maps each arg with its name. Empty by default. :ivar is_public: a boolean value that specifies if the method is public. False by default. :ivar return_type: the return type of the method. None by default. """ def __init__(self, args: Dict[str, Variable] = None, vararg: Optional[Tuple[str, Variable]] = None, kwargs: Optional[Dict[str, Variable]] = None, defaults: List[ast.AST] = None, return_type: IType = Type.none, is_public: bool = False, decorators: List[Callable] = None, external_name: str = None, is_safe: bool = False, origin_node: Optional[ast.AST] = None): if args is None: args = {} self.args: Dict[str, Variable] = args.copy() if not isinstance(defaults, list): defaults = [] self.defaults: List[ast.AST] = defaults self._vararg: Optional[Tuple[str, Variable]] = None if (isinstance(vararg, tuple) and len(vararg) == 2 and isinstance(vararg[0], str) and isinstance(vararg[1], Variable)): from boa3.internal.model.type.typeutils import TypeUtils vararg_id, vararg_var = vararg if vararg_var.type is not Type.any: default_code = "{0}({1}, {2})".format(TypeUtils.cast.raw_identifier, Type.tuple.build_collection(vararg_var.type), Type.tuple.default_value) else: default_code = "{0}".format(Type.tuple.default_value) default_value = set_internal_call(ast.parse(default_code).body[0].value) self.args[vararg_id] = Variable(Type.tuple.build_collection([vararg_var.type])) self.defaults.append(default_value) self._vararg = vararg if kwargs is None: kwargs = {} self._kwargs: Dict[str, Variable] = kwargs.copy() self.return_type: IType = return_type if decorators is None: decorators = [] from boa3.internal.model.decorator import IDecorator self.decorators: List[IDecorator] = [decorator for decorator in decorators if isinstance(decorator, IDecorator)] from boa3.internal.model.builtin.decorator import PublicDecorator public_decorator = next((decorator for decorator in self.decorators if isinstance(decorator, PublicDecorator)), None) self.is_public: bool = is_public or public_decorator is not None if self.is_public: if isinstance(public_decorator, PublicDecorator): external_name = public_decorator.name elif self.defined_by_entry: external_name = None self.external_name: Optional[str] = external_name self.is_safe: bool = is_safe or (isinstance(public_decorator, PublicDecorator) and public_decorator.safe) self._self_calls: Set[ast.AST] = set() super().__init__(origin_node) self.init_address: Optional[int] = None self.init_bytecode: Optional[VMCode] = None self.init_defaults_bytecode: Optional[VMCode] = None self.end_bytecode: Optional[VMCode] = None @property def type(self) -> IType: return self.return_type @property def symbols(self) -> Dict[str, Variable]: """ Gets all the symbols in the method :return: a dictionary that maps each symbol in the module with its name """ return self.args.copy() @property def METHOD_NAME(self) -> Dict[str, Variable]: num_defaults = len(self.defaults) if num_defaults > 0: return {key: self.args[key] for key in list(self.args.keys())[:-num_defaults]} return self.args @property def has_cls_or_self(self) -> bool: return any(decorator.has_cls_or_self for decorator in self.decorators) @property def cls_or_self_type(self) -> Optional[IType]: if not self.has_cls_or_self or len(self.args) == 0: return None return list(self.args.values())[0].type @property def has_starred_argument(self) -> bool: return self._vararg is not None @property def start_address(self) -> Optional[int]: """ Gets the address where this method starts in the bytecode :return: the first address of the method """ if self.init_bytecode is None and self.init_defaults_bytecode is None: return self.init_address else: from boa3.internal.compiler.codegenerator.vmcodemapping import VMCodeMapping return VMCodeMapping.instance().get_start_address(self.init_bytecode) @property def start_bytecode(self) -> Optional[VMCode]: return (self.init_defaults_bytecode if len(self.defaults) > 0 else self.init_bytecode) @property def end_address(self) -> Optional[int]: """ Gets the address of this method's last operation in the bytecode :return: the last address of the method """ if self.end_bytecode is None: return self.start_address else: from boa3.internal.compiler.codegenerator.vmcodemapping import VMCodeMapping return VMCodeMapping.instance().get_end_address(self.end_bytecode) @property def is_called(self) -> bool: return len(self._self_calls) > 0 def reset_calls(self): self._self_calls.clear() @property def is_compiled(self) -> bool: return self.start_address is not None and self.end_address is not None def add_call_origin(self, origin: ast.AST) -> bool: try: self._self_calls.add(origin) return True except BaseException: return False def __str__(self) -> str: args_types: List[str] = [str(arg.type) for arg in self.args.values()] if self.return_type is not Type.none: signature = '({0}) -> {1}'.format(', '.join(args_types), self.return_type) else: signature = '({0})'.format(', '.join(args_types)) public = 'public ' if self.is_public else '' return '{0}{1}'.format(public, signature) def __repr__(self) -> str: name = self.identifier if hasattr(self, 'identifier') else self.__class__.__name__ return f'{name}{str(self)}'
    null
    325
    """ testing import """ import pathlib from unittest.mock import patch import datetime import pytz from django.test import TestCase from bookwyrm import models from bookwyrm.importers import GoodreadsImporter from bookwyrm.models.import_job import handle_imported_book def make_date(*args): """helper function to easily generate a date obj""" return datetime.datetime(*args, tzinfo=pytz.UTC) # pylint: disable=consider-using-with @patch("bookwyrm.suggested_users.rerank_suggestions_task.delay") @patch("bookwyrm.activitystreams.populate_stream_task.delay") @patch("bookwyrm.activitystreams.add_book_statuses_task.delay") class GoodreadsImport(TestCase): """importing from goodreads csv""" # pylint: disable=invalid-name def setUp(self): """use a test csv""" self.importer = GoodreadsImporter() datafile = pathlib.Path(__file__).parent.joinpath("../data/goodreads.csv") self.csv = open(datafile, "r", encoding=self.importer.encoding) with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch( "bookwyrm.activitystreams.populate_stream_task.delay" ), patch("bookwyrm.lists_stream.populate_lists_task.delay"): self.local_user = models.User.objects.create_user( "mouse", "[email protected]", "password", local=True ) models.SiteSettings.objects.create() work = models.Work.objects.create(title="Test Work") self.book = models.Edition.objects.create( title="Example Edition", remote_id="https://example.com/book/1", parent_work=work, ) def METHOD_NAME(self, *_): """creates the import job entry and checks csv""" import_job = self.importer.create_job( self.local_user, self.csv, False, "public" ) import_items = models.ImportItem.objects.filter(job=import_job).all() self.assertEqual(len(import_items), 3) self.assertEqual(import_items[0].index, 0) self.assertEqual(import_items[0].data["Book Id"], "42036538") self.assertEqual(import_items[0].normalized_data["isbn_13"], '="9781250313195"') self.assertEqual(import_items[0].normalized_data["isbn_10"], '="1250313198"') self.assertEqual(import_items[1].index, 1) self.assertEqual(import_items[1].data["Book Id"], "52691223") self.assertEqual(import_items[2].index, 2) self.assertEqual(import_items[2].data["Book Id"], "28694510") def test_create_retry_job(self, *_): """trying again with items that didn't import""" import_job = self.importer.create_job( self.local_user, self.csv, False, "unlisted" ) import_items = models.ImportItem.objects.filter(job=import_job).all()[:2] retry = self.importer.create_retry_job( self.local_user, import_job, import_items ) self.assertNotEqual(import_job, retry) self.assertEqual(retry.user, self.local_user) self.assertEqual(retry.include_reviews, False) self.assertEqual(retry.privacy, "unlisted") retry_items = models.ImportItem.objects.filter(job=retry).all() self.assertEqual(len(retry_items), 2) self.assertEqual(retry_items[0].index, 0) self.assertEqual(retry_items[0].data["Book Id"], "42036538") self.assertEqual(retry_items[1].index, 1) self.assertEqual(retry_items[1].data["Book Id"], "52691223") def test_handle_imported_book(self, *_): """goodreads import added a book, this adds related connections""" shelf = self.local_user.shelf_set.filter( identifier=models.Shelf.READ_FINISHED ).first() self.assertIsNone(shelf.books.first()) import_job = self.importer.create_job( self.local_user, self.csv, False, "public" ) import_item = import_job.items.first() import_item.book = self.book import_item.save() with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"): handle_imported_book(import_item) shelf.refresh_from_db() self.assertEqual(shelf.books.first(), self.book) self.assertEqual( shelf.shelfbook_set.first().shelved_date, make_date(2020, 10, 21) ) readthrough = models.ReadThrough.objects.get(user=self.local_user) self.assertEqual(readthrough.book, self.book) self.assertEqual(readthrough.start_date, make_date(2020, 10, 21)) self.assertEqual(readthrough.finish_date, make_date(2020, 10, 25)) @patch("bookwyrm.activitystreams.add_status_task.delay") def test_handle_imported_book_review(self, *_): """goodreads review import""" import_job = self.importer.create_job( self.local_user, self.csv, True, "unlisted" ) import_item = import_job.items.get(index=2) import_item.book = self.book import_item.save() with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"): handle_imported_book(import_item) review = models.Review.objects.get(book=self.book, user=self.local_user) self.assertEqual(review.content, "mixed feelings") self.assertEqual(review.rating, 2) self.assertEqual(review.published_date, make_date(2019, 7, 8)) self.assertEqual(review.privacy, "unlisted") @patch("bookwyrm.activitystreams.add_status_task.delay") def test_handle_imported_book_rating(self, *_): """goodreads rating import""" import_job = self.importer.create_job( self.local_user, self.csv, True, "unlisted" ) import_item = import_job.items.filter(index=0).first() import_item.book = self.book import_item.save() with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"): handle_imported_book(import_item) review = models.ReviewRating.objects.get(book=self.book, user=self.local_user) self.assertIsInstance(review, models.ReviewRating) self.assertEqual(review.rating, 3) self.assertEqual(review.published_date, make_date(2020, 10, 25)) self.assertEqual(review.privacy, "unlisted")
    null
    326
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkdomain.endpoint import endpoint_data class SaveTaskForUpdatingRegistrantInfoByIdentityCredentialRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveTaskForUpdatingRegistrantInfoByIdentityCredential') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_Country(self): # String return self.get_query_params().get('Country') def set_Country(self, Country): # String self.add_query_param('Country', Country) def get_IdentityCredentialType(self): # String return self.get_query_params().get('IdentityCredentialType') def set_IdentityCredentialType(self, IdentityCredentialType): # String self.add_query_param('IdentityCredentialType', IdentityCredentialType) def get_City(self): # String return self.get_query_params().get('City') def set_City(self, City): # String self.add_query_param('City', City) def get_IdentityCredential(self): # String return self.get_body_params().get('IdentityCredential') def set_IdentityCredential(self, IdentityCredential): # String self.add_body_params('IdentityCredential', IdentityCredential) def get_TransferOutProhibited(self): # Boolean return self.get_query_params().get('TransferOutProhibited') def set_TransferOutProhibited(self, TransferOutProhibited): # Boolean self.add_query_param('TransferOutProhibited', TransferOutProhibited) def get_ZhCity(self): # String return self.get_query_params().get('ZhCity') def set_ZhCity(self, ZhCity): # String self.add_query_param('ZhCity', ZhCity) def get_TelExt(self): # String return self.get_query_params().get('TelExt') def set_TelExt(self, TelExt): # String self.add_query_param('TelExt', TelExt) def get_Province(self): # String return self.get_query_params().get('Province') def set_Province(self, Province): # String self.add_query_param('Province', Province) def METHOD_NAME(self): # String return self.get_query_params().get('ZhRegistrantName') def set_ZhRegistrantName(self, ZhRegistrantName): # String self.add_query_param('ZhRegistrantName', ZhRegistrantName) def get_PostalCode(self): # String return self.get_query_params().get('PostalCode') def set_PostalCode(self, PostalCode): # String self.add_query_param('PostalCode', PostalCode) def get_Lang(self): # String return self.get_query_params().get('Lang') def set_Lang(self, Lang): # String self.add_query_param('Lang', Lang) def get_Email(self): # String return self.get_query_params().get('Email') def set_Email(self, Email): # String self.add_query_param('Email', Email) def get_ZhRegistrantOrganization(self): # String return self.get_query_params().get('ZhRegistrantOrganization') def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization) def get_Address(self): # String return self.get_query_params().get('Address') def set_Address(self, Address): # String self.add_query_param('Address', Address) def get_TelArea(self): # String return self.get_query_params().get('TelArea') def set_TelArea(self, TelArea): # String self.add_query_param('TelArea', TelArea) def get_ZhAddress(self): # String return self.get_query_params().get('ZhAddress') def set_ZhAddress(self, ZhAddress): # String self.add_query_param('ZhAddress', ZhAddress) def get_RegistrantType(self): # String return self.get_query_params().get('RegistrantType') def set_RegistrantType(self, RegistrantType): # String self.add_query_param('RegistrantType', RegistrantType) def get_DomainNames(self): # RepeatList return self.get_query_params().get('DomainName') def set_DomainNames(self, DomainName): # RepeatList for depth1 in range(len(DomainName)): self.add_query_param('DomainName.' + str(depth1 + 1), DomainName[depth1]) def get_Telephone(self): # String return self.get_query_params().get('Telephone') def set_Telephone(self, Telephone): # String self.add_query_param('Telephone', Telephone) def get_ZhProvince(self): # String return self.get_query_params().get('ZhProvince') def set_ZhProvince(self, ZhProvince): # String self.add_query_param('ZhProvince', ZhProvince) def get_RegistrantOrganization(self): # String return self.get_query_params().get('RegistrantOrganization') def set_RegistrantOrganization(self, RegistrantOrganization): # String self.add_query_param('RegistrantOrganization', RegistrantOrganization) def get_UserClientIp(self): # String return self.get_query_params().get('UserClientIp') def set_UserClientIp(self, UserClientIp): # String self.add_query_param('UserClientIp', UserClientIp) def get_IdentityCredentialNo(self): # String return self.get_query_params().get('IdentityCredentialNo') def set_IdentityCredentialNo(self, IdentityCredentialNo): # String self.add_query_param('IdentityCredentialNo', IdentityCredentialNo) def get_RegistrantName(self): # String return self.get_query_params().get('RegistrantName') def set_RegistrantName(self, RegistrantName): # String self.add_query_param('RegistrantName', RegistrantName)
    null
    327
    # Copyright (c) 2022 The Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import tempfile import os import shutil from pathlib import Path from gem5.resources.md5_utils import md5_file, md5_dir class MD5FileTestSuite(unittest.TestCase): """Test cases for gem5.resources.md5_utils.md5_file()""" def test_md5FileConsistency(self) -> None: # This test ensures the md5 algorithm we use does not change the md5 # value over time. file = tempfile.NamedTemporaryFile(mode="w", delete=False) file.write("This is a test string, to be put in a temp file") file.close() md5 = md5_file(Path(file.name)) os.remove(file.name) self.assertEquals("b113b29fce251f2023066c3fda2ec9dd", md5) def METHOD_NAME(self) -> None: # This test ensures that two files with exactly the same contents have # the same md5 value. test_str = "This is a test" file = tempfile.NamedTemporaryFile(mode="w", delete=False) file.write(test_str) file.close() first_file_md5 = md5_file(Path(file.name)) os.remove(file.name) file = tempfile.NamedTemporaryFile(mode="w", delete=False) file.write(test_str) file.close() second_file_md5 = md5_file(Path(file.name)) os.remove(file.name) self.assertEquals(first_file_md5, second_file_md5) class MD5DirTestSuite(unittest.TestCase): """Test cases for gem5.resources.md5_utils.md5_dir()""" def _create_temp_directory(self) -> Path: dir = tempfile.mkdtemp() with open(os.path.join(dir, "file1"), "w") as f: f.write("Some test data here") with open(os.path.join(dir, "file2"), "w") as f: f.write("Some more test data") os.mkdir(os.path.join(dir, "dir2")) with open(os.path.join(dir, "dir2", "file1"), "w") as f: f.write("Yet more data") return Path(dir) def test_md5DirConsistency(self) -> None: # This test ensures the md5 algorithm we use does not change the value # given for directories over time. dir = self._create_temp_directory() md5 = md5_dir(dir) shutil.rmtree(dir) self.assertEquals("ad5ac785de44c9fc2fe2798cab2d7b1a", md5) def test_identicalDirsIdenticalMd5(self) -> None: # This test ensures that two directories with exactly the same contents # have the same md5 value. dir1 = self._create_temp_directory() first_md5 = md5_dir(dir1) shutil.rmtree(dir1) dir2 = self._create_temp_directory() second_md5 = md5_dir(dir2) shutil.rmtree(dir2) self.assertEquals(first_md5, second_md5)
    null
    328
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkclickhouse.endpoint import endpoint_data class OperateLogHubRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'clickhouse', '2019-11-11', 'OperateLogHub') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_Description(self): # String return self.get_query_params().get('Description') def set_Description(self, Description): # String self.add_query_param('Description', Description) def get_UseLorne(self): # Boolean return self.get_query_params().get('UseLorne') def set_UseLorne(self, UseLorne): # Boolean self.add_query_param('UseLorne', UseLorne) def get_DeliverName(self): # String return self.get_query_params().get('DeliverName') def set_DeliverName(self, DeliverName): # String self.add_query_param('DeliverName', DeliverName) def get_DeliverTime(self): # String return self.get_query_params().get('DeliverTime') def set_DeliverTime(self, DeliverTime): # String self.add_query_param('DeliverTime', DeliverTime) def get_DomainUrl(self): # String return self.get_query_params().get('DomainUrl') def set_DomainUrl(self, DomainUrl): # String self.add_query_param('DomainUrl', DomainUrl) def get_Password(self): # String return self.get_query_params().get('Password') def set_Password(self, Password): # String self.add_query_param('Password', Password) def get_AccessKey(self): # String return self.get_query_params().get('AccessKey') def set_AccessKey(self, AccessKey): # String self.add_query_param('AccessKey', AccessKey) def get_Create(self): # Boolean return self.get_query_params().get('Create') def set_Create(self, Create): # Boolean self.add_query_param('Create', Create) def get_TableName(self): # String return self.get_query_params().get('TableName') def set_TableName(self, TableName): # String self.add_query_param('TableName', TableName) def get_TaskId(self): # String return self.get_query_params().get('TaskId') def set_TaskId(self, TaskId): # String self.add_query_param('TaskId', TaskId) def get_ProjectName(self): # String return self.get_query_params().get('ProjectName') def set_ProjectName(self, ProjectName): # String self.add_query_param('ProjectName', ProjectName) def get_SchemaName(self): # String return self.get_query_params().get('SchemaName') def set_SchemaName(self, SchemaName): # String self.add_query_param('SchemaName', SchemaName) def get_AccessSecret(self): # String return self.get_query_params().get('AccessSecret') def set_AccessSecret(self, AccessSecret): # String self.add_query_param('AccessSecret', AccessSecret) def get_LogStoreName(self): # String return self.get_query_params().get('LogStoreName') def set_LogStoreName(self, LogStoreName): # String self.add_query_param('LogStoreName', LogStoreName) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_DBClusterId(self): # String return self.get_query_params().get('DBClusterId') def METHOD_NAME(self, DBClusterId): # String self.add_query_param('DBClusterId', DBClusterId) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_LogHubStoress(self): # RepeatList return self.get_query_params().get('LogHubStores') def set_LogHubStoress(self, LogHubStores): # RepeatList for depth1 in range(len(LogHubStores)): if LogHubStores[depth1].get('LogKey') is not None: self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.LogKey', LogHubStores[depth1].get('LogKey')) if LogHubStores[depth1].get('FieldKey') is not None: self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.FieldKey', LogHubStores[depth1].get('FieldKey')) if LogHubStores[depth1].get('Type') is not None: self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.Type', LogHubStores[depth1].get('Type')) def get_FilterDirtyData(self): # Boolean return self.get_query_params().get('FilterDirtyData') def set_FilterDirtyData(self, FilterDirtyData): # Boolean self.add_query_param('FilterDirtyData', FilterDirtyData) def get_UserName(self): # String return self.get_query_params().get('UserName') def set_UserName(self, UserName): # String self.add_query_param('UserName', UserName)
    null
    329
    """script for compiling elm source and dumping it to the js folder.""" import functools import io import logging import pygments from xonsh.color_tools import rgb_to_ints from xonsh.prompt.base import PromptFormatter, default_prompt from xonsh.pyghooks import ( Token, XonshHtmlFormatter, XonshLexer, XonshStyle, xonsh_style_proxy, ) from xonsh.pygments_cache import get_all_styles from xonsh.style_tools import partial_color_tokenize from xonsh.xontribs import Xontrib, get_xontribs # $RAISE_SUBPROC_ERROR = True # $XONSH_SHOW_TRACEBACK = False # # helper funcs # @functools.lru_cache(maxsize=4) def get_rst_formatter(**kwargs): from pygments.formatters.html import HtmlFormatter from pygments.lexers.markup import RstLexer return RstLexer(), HtmlFormatter(**kwargs) def METHOD_NAME(s): return s.replace(r"\n", "<br/>") def invert_color(orig): r, g, b = rgb_to_ints(orig) inverted = [255 - r, 255 - g, 255 - b] new = [hex(n)[2:] for n in inverted] new = [n if len(n) == 2 else "0" + n for n in new] return "".join(new) def html_format(s, style="default"): buf = io.StringIO() proxy_style = xonsh_style_proxy(XonshStyle(style)) # make sure we have a foreground color fgcolor = proxy_style._styles[Token.Text][0] if not fgcolor: fgcolor = invert_color(proxy_style.background_color[1:].strip("#")) # need to generate stream before creating formatter so that all tokens actually exist if isinstance(s, str): token_stream = partial_color_tokenize(s) else: token_stream = s formatter = XonshHtmlFormatter( wrapcode=True, noclasses=True, style=proxy_style, prestyles="margin: 0em; padding: 0.5em 0.1em; color: #" + fgcolor, cssstyles="border-style: solid; border-radius: 5px", ) formatter.format(token_stream, buf) return buf.getvalue() def rst_to_html(text): try: from pygments import highlight lexer, formatter = get_rst_formatter( noclasses=True, cssstyles="background: transparent", style="monokai", # a dark bg style ) return highlight(text, lexer, formatter) except ImportError: return text # render prompts def get_named_prompts(): return [ ( "default", default_prompt(), ), ("debian chroot", "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{RESET}> "), ("minimalist", "{BOLD_GREEN}{cwd_base}{RESET} ) "), ( "terlar", "{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:" "{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\n{BOLD_INTENSE_RED}➤{RESET} ", ), ( "default with git status", "{env_name}{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}" "{branch_color}{gitstatus: {}}{RESET} {BOLD_BLUE}" "{prompt_end}{RESET} ", ), ("robbyrussell", "{BOLD_INTENSE_RED}➜ {CYAN}{cwd_base} {gitstatus}{RESET} "), ("just a dollar", "$ "), ( "simple pythonista", "{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} " "in {BOLD_GREEN}{cwd}{RESET}\n↪ ", ), ( "informative", "[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} " "{BOLD_GREEN}{cwd} {gitstatus}{RESET}\n> ", ), ( "informative Version Control", "{YELLOW}{env_name} " "{BOLD_GREEN}{cwd} {gitstatus}{RESET} {prompt_end} ", ), ("classic", "{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> "), ( "classic with git status", "{gitstatus} {RESET}{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> ", ), ("screen savvy", "{YELLOW}{user}@{PURPLE}{hostname}{BOLD_GREEN}{cwd}{RESET}> "), ( "sorin", "{CYAN}{cwd} {INTENSE_RED}❯{INTENSE_YELLOW}❯{INTENSE_GREEN}❯{RESET} ", ), ( "acidhub", "❰{INTENSE_GREEN}{user}{RESET}❙{YELLOW}{cwd}{RESET}{env_name}❱{gitstatus}≻ ", ), ( "nim", "{INTENSE_GREEN}┬─[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}" "{INTENSE_GREEN}]─[{localtime}]─[{RESET}G:{INTENSE_GREEN}{curr_branch}=]" "\n{INTENSE_GREEN}╰─>{INTENSE_RED}{prompt_end}{RESET} ", ), ] def get_initial(env, prompt_format, fields): template = env.get_stringified("PROMPT") return { "value": template, "display": METHOD_NAME(html_format(prompt_format(template, fields=fields))), } def render_prompts(env): prompt_format = PromptFormatter() fields = dict(env.get("PROMPT_FIELDS") or {}) fields.update( cwd="~/snail/stuff", cwd_base="stuff", user="lou", hostname="carcolh", env_name=fields["env_prefix"] + "env" + fields["env_postfix"], curr_branch="branch", gitstatus="{CYAN}branch|{BOLD_BLUE}+2{RESET}⚑7", branch_color="{BOLD_INTENSE_RED}", localtime="15:56:07", ) yield get_initial(env, prompt_format, fields) for name, template in get_named_prompts(): display = html_format(prompt_format(template, fields=fields)) yield name, { "value": template, "display": METHOD_NAME(display), } def render_colors(): source = ( "import sys\n" 'echo "Welcome $USER on" @(sys.platform)\n\n' "def func(x=42):\n" ' d = {"xonsh": True}\n' ' return d.get("xonsh") and you\n\n' "# This is a comment\n" "![env | uniq | sort | grep PATH]\n" ) lexer = XonshLexer() lexer.add_filter("tokenmerge") token_stream = list(pygments.lex(source, lexer=lexer)) token_stream = [(t, s.replace("\n", "\\n")) for t, s in token_stream] styles = sorted(get_all_styles()) styles.insert(0, styles.pop(styles.index("default"))) for style in styles: try: display = html_format(token_stream, style=style) except Exception as ex: logging.error( f"Failed to format Xonsh code {ex!r}. {style!r}", exc_info=True ) display = source yield style, METHOD_NAME(display) def format_xontrib(xontrib: Xontrib): return { "url": xontrib.url, "license": xontrib.license, "display": METHOD_NAME(rst_to_html(xontrib.get_description())), } def render_xontribs(): md = get_xontribs() for xontrib_name, xontrib in md.items(): yield xontrib_name, format_xontrib(xontrib)
    null
    330
    from datetime import UTC, datetime from django.forms.models import model_to_dict from django.urls import reverse from .base import AuthenticatedAPITestCase from pydis_site.apps.api.models import Reminder, User class UnauthedReminderAPITests(AuthenticatedAPITestCase): def setUp(self): super().setUp() self.client.force_authenticate(user=None) def test_list_returns_401(self): url = reverse('api:bot:reminder-list') response = self.client.get(url) self.assertEqual(response.status_code, 401) def METHOD_NAME(self): url = reverse('api:bot:reminder-list') response = self.client.post(url, data={'not': 'important'}) self.assertEqual(response.status_code, 401) def test_delete_returns_401(self): url = reverse('api:bot:reminder-detail', args=('1234',)) response = self.client.delete(url) self.assertEqual(response.status_code, 401) class EmptyDatabaseReminderAPITests(AuthenticatedAPITestCase): def test_list_all_returns_empty_list(self): url = reverse('api:bot:reminder-list') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), []) def test_delete_returns_404(self): url = reverse('api:bot:reminder-detail', args=('1234',)) response = self.client.delete(url) self.assertEqual(response.status_code, 404) class ReminderCreationTests(AuthenticatedAPITestCase): @classmethod def setUpTestData(cls): cls.author = User.objects.create( id=1234, name='Mermaid Man', discriminator=1234, ) def test_accepts_valid_data(self): data = { 'author': self.author.id, 'content': 'Remember to...wait what was it again?', 'expiration': datetime.now(tz=UTC).isoformat(), 'jump_url': "https://www.google.com", 'channel_id': 123, 'mentions': [8888, 9999], } url = reverse('api:bot:reminder-list') response = self.client.post(url, data=data) self.assertEqual(response.status_code, 201) self.assertIsNotNone(Reminder.objects.filter(id=1).first()) def test_rejects_invalid_data(self): data = { 'author': self.author.id, # Missing multiple required fields } url = reverse('api:bot:reminder-list') response = self.client.post(url, data=data) self.assertEqual(response.status_code, 400) self.assertRaises(Reminder.DoesNotExist, Reminder.objects.get, id=1) class ReminderDeletionTests(AuthenticatedAPITestCase): @classmethod def setUpTestData(cls): cls.author = User.objects.create( id=6789, name='Barnacle Boy', discriminator=6789, ) cls.reminder = Reminder.objects.create( author=cls.author, content="Don't forget to set yourself a reminder", expiration=datetime.now(UTC), jump_url="https://www.decliningmentalfaculties.com", channel_id=123 ) def test_delete_unknown_reminder_returns_404(self): url = reverse('api:bot:reminder-detail', args=('something',)) response = self.client.delete(url) self.assertEqual(response.status_code, 404) def test_delete_known_reminder_returns_204(self): url = reverse('api:bot:reminder-detail', args=(self.reminder.id,)) response = self.client.delete(url) self.assertEqual(response.status_code, 204) self.assertRaises(Reminder.DoesNotExist, Reminder.objects.get, id=self.reminder.id) class ReminderListTests(AuthenticatedAPITestCase): @classmethod def setUpTestData(cls): cls.author = User.objects.create( id=6789, name='Patrick Star', discriminator=6789, ) cls.reminder_one = Reminder.objects.create( author=cls.author, content="We should take Bikini Bottom, and push it somewhere else!", expiration=datetime.now(UTC), jump_url="https://www.icantseemyforehead.com", channel_id=123 ) cls.reminder_two = Reminder.objects.create( author=cls.author, content="Gahhh-I love being purple!", expiration=datetime.now(UTC), jump_url="https://www.goofygoobersicecreampartyboat.com", channel_id=123, active=False ) drf_format = '%Y-%m-%dT%H:%M:%S.%fZ' cls.rem_dict_one = model_to_dict(cls.reminder_one) cls.rem_dict_one['expiration'] = cls.rem_dict_one['expiration'].strftime(drf_format) cls.rem_dict_two = model_to_dict(cls.reminder_two) cls.rem_dict_two['expiration'] = cls.rem_dict_two['expiration'].strftime(drf_format) def test_reminders_in_full_list(self): url = reverse('api:bot:reminder-list') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertCountEqual(response.json(), [self.rem_dict_one, self.rem_dict_two]) def test_filter_search(self): url = reverse('api:bot:reminder-list') response = self.client.get(f'{url}?search={self.author.name}') self.assertEqual(response.status_code, 200) self.assertCountEqual(response.json(), [self.rem_dict_one, self.rem_dict_two]) def test_filter_field(self): url = reverse('api:bot:reminder-list') response = self.client.get(f'{url}?active=true') self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), [self.rem_dict_one]) class ReminderRetrieveTests(AuthenticatedAPITestCase): @classmethod def setUpTestData(cls): cls.author = User.objects.create( id=6789, name='Reminder author', discriminator=6789, ) cls.reminder = Reminder.objects.create( author=cls.author, content="Reminder content", expiration=datetime.now(UTC), jump_url="http://example.com/", channel_id=123 ) def test_retrieve_unknown_returns_404(self): url = reverse('api:bot:reminder-detail', args=("not_an_id",)) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_retrieve_known_returns_200(self): url = reverse('api:bot:reminder-detail', args=(self.reminder.id,)) response = self.client.get(url) self.assertEqual(response.status_code, 200) class ReminderUpdateTests(AuthenticatedAPITestCase): @classmethod def setUpTestData(cls): cls.author = User.objects.create( id=666, name='Man Ray', discriminator=666, ) cls.reminder = Reminder.objects.create( author=cls.author, content="Squash those do-gooders", expiration=datetime.now(UTC), jump_url="https://www.decliningmentalfaculties.com", channel_id=123 ) cls.data = {'content': 'Oops I forgot'} def test_patch_updates_record(self): url = reverse('api:bot:reminder-detail', args=(self.reminder.id,)) response = self.client.patch(url, data=self.data) self.assertEqual(response.status_code, 200) self.assertEqual( Reminder.objects.filter(id=self.reminder.id).first().content, self.data['content'] )
    null
    331
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdksas.endpoint import endpoint_data class AddClientUserDefineRuleRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Sas', '2018-12-03', 'AddClientUserDefineRule') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ActionType(self): # Integer return self.get_query_params().get('ActionType') def METHOD_NAME(self, ActionType): # Integer self.add_query_param('ActionType', ActionType) def get_NewFilePath(self): # String return self.get_query_params().get('NewFilePath') def set_NewFilePath(self, NewFilePath): # String self.add_query_param('NewFilePath', NewFilePath) def get_Type(self): # Integer return self.get_query_params().get('Type') def set_Type(self, Type): # Integer self.add_query_param('Type', Type) def get_Platform(self): # String return self.get_query_params().get('Platform') def set_Platform(self, Platform): # String self.add_query_param('Platform', Platform) def get_RegistryKey(self): # String return self.get_query_params().get('RegistryKey') def set_RegistryKey(self, RegistryKey): # String self.add_query_param('RegistryKey', RegistryKey) def get_Cmdline(self): # String return self.get_query_params().get('Cmdline') def set_Cmdline(self, Cmdline): # String self.add_query_param('Cmdline', Cmdline) def get_FilePath(self): # String return self.get_query_params().get('FilePath') def set_FilePath(self, FilePath): # String self.add_query_param('FilePath', FilePath) def get_Md5List(self): # String return self.get_query_params().get('Md5List') def set_Md5List(self, Md5List): # String self.add_query_param('Md5List', Md5List) def get_ParentProcPath(self): # String return self.get_query_params().get('ParentProcPath') def set_ParentProcPath(self, ParentProcPath): # String self.add_query_param('ParentProcPath', ParentProcPath) def get_ProcPath(self): # String return self.get_query_params().get('ProcPath') def set_ProcPath(self, ProcPath): # String self.add_query_param('ProcPath', ProcPath) def get_ParentCmdline(self): # String return self.get_query_params().get('ParentCmdline') def set_ParentCmdline(self, ParentCmdline): # String self.add_query_param('ParentCmdline', ParentCmdline) def get_IP(self): # String return self.get_query_params().get('IP') def set_IP(self, IP): # String self.add_query_param('IP', IP) def get_RegistryContent(self): # String return self.get_query_params().get('RegistryContent') def set_RegistryContent(self, RegistryContent): # String self.add_query_param('RegistryContent', RegistryContent) def get_PortStr(self): # String return self.get_query_params().get('PortStr') def set_PortStr(self, PortStr): # String self.add_query_param('PortStr', PortStr) def get_Port(self): # Integer return self.get_query_params().get('Port') def set_Port(self, Port): # Integer self.add_query_param('Port', Port) def get_Name(self): # String return self.get_query_params().get('Name') def set_Name(self, Name): # String self.add_query_param('Name', Name)
    null
    332
    import asyncio import base64 import hashlib import hmac import json from typing import Awaitable from unittest import TestCase from unittest.mock import MagicMock from hummingbot.connector.derivative.kucoin_perpetual import kucoin_perpetual_constants as CONSTANTS from hummingbot.connector.derivative.kucoin_perpetual.kucoin_perpetual_auth import KucoinPerpetualAuth from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, WSJSONRequest class KucoinPerpetualAuthTests(TestCase): def setUp(self) -> None: super().setUp() self.api_key = "testApiKey" self.secret_key = "testSecretKey" self.passphrase = "testPassphrase" self.subaccount_name = "test!?Subaccount" self.mock_time_provider = MagicMock() self.mock_time_provider.time.return_value = 1000000 self.auth = KucoinPerpetualAuth( api_key=self.api_key, passphrase = self.passphrase, secret_key=self.secret_key, time_provider=self.mock_time_provider, ) def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1): ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coroutine, timeout)) return ret def _sign(self, passphrase: str, key: str) -> str: signed_message = base64.b64encode( hmac.new( key.encode("utf-8"), passphrase.encode("utf-8"), hashlib.sha256).digest()) return signed_message.decode("utf-8") def test_add_auth_headers_to_get_request_without_params(self): request = RESTRequest( method=RESTMethod.GET, url="https://test.url/api/endpoint", is_auth_required=True, throttler_limit_id="/api/endpoint" ) self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1)) self.assertEqual(self.api_key, request.headers["KC-API-KEY"]) self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"]) self.assertEqual("2", request.headers["KC-API-KEY-VERSION"]) expected_signature = self._sign("1000000" + "GET" + request.throttler_limit_id, key=self.secret_key) self.assertEqual(expected_signature, request.headers["KC-API-SIGN"]) expected_passphrase = self._sign(self.passphrase, key=self.secret_key) self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"]) self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"]) expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key, key=CONSTANTS.HB_PARTNER_KEY) self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"]) def METHOD_NAME(self): request = RESTRequest( method=RESTMethod.GET, url="https://test.url/api/endpoint", params={"param1": "value1", "param2": "value2"}, is_auth_required=True, throttler_limit_id="/api/endpoint" ) self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1)) self.assertEqual(self.api_key, request.headers["KC-API-KEY"]) self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"]) self.assertEqual("2", request.headers["KC-API-KEY-VERSION"]) full_endpoint = f"{request.throttler_limit_id}?param1=value1&param2=value2" expected_signature = self._sign("1000000" + "GET" + full_endpoint, key=self.secret_key) self.assertEqual(expected_signature, request.headers["KC-API-SIGN"]) expected_passphrase = self._sign(self.passphrase, key=self.secret_key) self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"]) self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"]) expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key, key=CONSTANTS.HB_PARTNER_KEY) self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"]) def test_add_auth_headers_to_post_request(self): body = {"param_z": "value_param_z", "param_a": "value_param_a"} request = RESTRequest( method=RESTMethod.POST, url="https://test.url/api/endpoint", data=json.dumps(body), is_auth_required=True, throttler_limit_id="/api/endpoint" ) self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1)) self.assertEqual(self.api_key, request.headers["KC-API-KEY"]) self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"]) self.assertEqual("2", request.headers["KC-API-KEY-VERSION"]) expected_signature = self._sign("1000000" + "POST" + request.throttler_limit_id + json.dumps(body), key=self.secret_key) self.assertEqual(expected_signature, request.headers["KC-API-SIGN"]) expected_passphrase = self._sign(self.passphrase, key=self.secret_key) self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"]) self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"]) expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key, key=CONSTANTS.HB_PARTNER_KEY) self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"]) def test_no_auth_added_to_wsrequest(self): payload = {"param1": "value_param_1"} request = WSJSONRequest(payload=payload, is_auth_required=True) self.async_run_with_timeout(self.auth.ws_authenticate(request)) self.assertEqual(payload, request.payload) def test_ws_auth_payload(self): expires = self.auth._get_expiration_timestamp() self.mock_time_provider.return_value = expires payload = self.auth.get_ws_auth_payload() raw_signature = "GET/realtime" + expires expected_signature = hmac.new(self.secret_key.encode("utf-8"), raw_signature.encode("utf-8"), hashlib.sha256).hexdigest() self.assertEqual(3, len(payload)) self.assertEqual(self.api_key, payload[0]) self.assertEqual(expires, payload[1]) self.assertEqual(expected_signature, payload[2])
    null
    333
    #!/usr/bin/env python3 # # Copyright (c) 2015 - 2023, Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import unittest from unittest import mock mock_libgeopm = mock.Mock() with mock.patch('cffi.FFI.dlopen', return_value=mock_libgeopm): from geopmpy.endpoint import Endpoint class TestEndpoint(unittest.TestCase): def setUp(self): mock_libgeopm.reset() mock_libgeopm.geopm_endpoint_create.return_value = 0 mock_libgeopm.geopm_endpoint_destroy.return_value = 0 mock_libgeopm.geopm_endpoint_open.return_value = 0 mock_libgeopm.geopm_endpoint_close.return_value = 0 self._endpoint = Endpoint('test_endpoint') self.test_agent_name = 'my_agent' def mock_agent(endpoint, name_max, name_cstr): for idx, char in enumerate(self.test_agent_name): name_cstr[idx] = char.encode() name_cstr[len(self.test_agent_name)] = b'\x00' return 0 mock_libgeopm.geopm_endpoint_agent.side_effect = mock_agent def test_endpoint_creation_destruction(self): self.assertEqual("Endpoint(name='test_endpoint')", repr(self._endpoint)) initial_destroy_count = mock_libgeopm.geopm_endpoint_destroy.call_count del self._endpoint self.assertEqual(initial_destroy_count + 1, mock_libgeopm.geopm_endpoint_destroy.call_count) mock_libgeopm.geopm_endpoint_create.return_value = 1 self.assertRaises(RuntimeError, Endpoint, 'test_endpoint') def METHOD_NAME(self): initial_open_count = mock_libgeopm.geopm_endpoint_open.call_count initial_close_count = mock_libgeopm.geopm_endpoint_close.call_count with self._endpoint: self.assertEqual(initial_open_count + 1, mock_libgeopm.geopm_endpoint_open.call_count) self.assertEqual(initial_close_count, mock_libgeopm.geopm_endpoint_close.call_count) self.assertEqual(initial_close_count + 1, mock_libgeopm.geopm_endpoint_close.call_count) def test_endpoint_agent_name(self): self.assertEqual(self.test_agent_name, self._endpoint.agent()) def test_wait_for_agent_attach(self): mock_libgeopm.geopm_endpoint_wait_for_agent_attach.return_value = 1 self.assertRaises(RuntimeError, self._endpoint.wait_for_agent_attach, 123.4) mock_libgeopm.geopm_endpoint_wait_for_agent_attach.return_value = 0 self._endpoint.wait_for_agent_attach(123.4) def test_stop_wait_loop(self): mock_libgeopm.geopm_endpoint_wait_for_agent_stop_wait_loop.return_value = 1 self.assertRaises(RuntimeError, self._endpoint.stop_wait_loop) mock_libgeopm.geopm_endpoint_wait_for_agent_stop_wait_loop.return_value = 0 self._endpoint.stop_wait_loop() def test_reset_wait_loop(self): mock_libgeopm.geopm_endpoint_wait_for_agent_reset_wait_loop.return_value = 1 self.assertRaises(RuntimeError, self._endpoint.reset_wait_loop) mock_libgeopm.geopm_endpoint_wait_for_agent_reset_wait_loop.return_value = 0 self._endpoint.reset_wait_loop() def test_endpoint_profile_name(self): test_profile_name = 'my agent' def mock_profile_name(endpoint, name_max, name_cstr): for idx, char in enumerate(test_profile_name): name_cstr[idx] = char.encode() name_cstr[len(test_profile_name)] = b'\x00' return 0 mock_libgeopm.geopm_endpoint_profile_name.side_effect = mock_profile_name self.assertEqual(test_profile_name, self._endpoint.profile_name()) def test_endpoint_nodes(self): test_node_names = ['node 1', 'node 2'] def mock_num_node(endpoint, num_node_p): num_node_p[0] = len(test_node_names) return 0 mock_libgeopm.geopm_endpoint_num_node.side_effect = mock_num_node def mock_node_name(endpoint, node_idx, name_max, name_cstr): for idx, char in enumerate(test_node_names[node_idx]): name_cstr[idx] = char.encode() name_cstr[len(test_node_names[node_idx])] = b'\x00' return 0 mock_libgeopm.geopm_endpoint_node_name.side_effect = mock_node_name self.assertEqual(test_node_names, self._endpoint.nodes()) def test_write_policy(self): test_policy = {'p0': 0, 'p1': 1} mock_libgeopm.geopm_endpoint_write_policy.return_value = 0 with mock.patch('geopmpy.agent.policy_names') as policy_mock: policy_mock.return_value = list(test_policy) self._endpoint.write_policy(test_policy) args = mock_libgeopm.geopm_endpoint_write_policy.call_args[0] _, num_policy, policy_array = args self.assertEqual(num_policy, len(test_policy)) self.assertEqual(policy_array[0], 0) self.assertEqual(policy_array[1], 1) def test_read_sample(self): test_sample = {'s0': 0, 's1': 1} test_age = 1.1 def mock_read_sample(endpoint, num_sample, sample_array, sample_age_p): sample_array[0] = test_sample['s0'] sample_array[1] = test_sample['s1'] sample_age_p[0] = test_age return 0 mock_libgeopm.geopm_endpoint_read_sample.side_effect = mock_read_sample with mock.patch('geopmpy.agent.sample_names') as sample_mock: sample_mock.return_value = list(test_sample) self.assertEqual((test_age, test_sample), self._endpoint.read_sample()) if __name__ == '__main__': unittest.main()
    null
    334
    # GemRB - Infinity Engine Emulator # Copyright (C) 2003 The GemRB Project # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # #character generation, appearance (GUICG12) import GemRB import CharOverview from GUIDefines import * AppearanceWindow = 0 PortraitButton = 0 PortraitsTable = 0 LastPortrait = 0 Gender = 0 def SetPicture (): global PortraitsTable, LastPortrait PortraitName = PortraitsTable.GetRowName (LastPortrait)+"L" PortraitButton.SetPicture (PortraitName) return def OnLoad (): global AppearanceWindow, PortraitButton, PortraitsTable, LastPortrait global Gender Gender=GemRB.GetVar ("Gender") AppearanceWindow = GemRB.LoadWindow (11, "GUICG") CharOverview.PositionCharGenWin(AppearanceWindow) #Load the Portraits Table PortraitsTable = GemRB.LoadTable ("PICTURES") PortraitsStart = PortraitsTable.FindValue (0, 2) FemaleCount = PortraitsTable.GetRowCount () - PortraitsStart + 1 if Gender == 2: LastPortrait = GemRB.Roll (1, FemaleCount, PortraitsStart-1) else: LastPortrait = GemRB.Roll (1, PortraitsTable.GetRowCount()-FemaleCount, 0) PortraitButton = AppearanceWindow.GetControl (1) PortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET) PortraitButton.SetState (IE_GUI_BUTTON_LOCKED) LeftButton = AppearanceWindow.GetControl (2) RightButton = AppearanceWindow.GetControl (3) BackButton = AppearanceWindow.GetControl (5) BackButton.SetText (15416) BackButton.MakeEscape() CustomButton = AppearanceWindow.GetControl (6) CustomButton.SetText (17545) DoneButton = AppearanceWindow.GetControl (0) DoneButton.SetText (36789) DoneButton.MakeDefault() RightButton.OnPress (RightPress) LeftButton.OnPress (LeftPress) BackButton.OnPress (METHOD_NAME) CustomButton.OnPress (CustomPress) DoneButton.OnPress (NextPress) while True: if PortraitsTable.GetValue (LastPortrait, 0) == Gender: SetPicture () break LastPortrait = LastPortrait + 1 AppearanceWindow.Focus() return def RightPress (): global LastPortrait while True: LastPortrait = LastPortrait + 1 if LastPortrait >= PortraitsTable.GetRowCount (): LastPortrait = 0 if PortraitsTable.GetValue (LastPortrait, 0) == Gender: SetPicture () return def LeftPress (): global LastPortrait while True: LastPortrait = LastPortrait - 1 if LastPortrait < 0: LastPortrait = PortraitsTable.GetRowCount ()-1 if PortraitsTable.GetValue (LastPortrait, 0) == Gender: SetPicture () return def METHOD_NAME (): if AppearanceWindow: AppearanceWindow.Close () GemRB.SetNextScript ("CharGen") GemRB.SetVar ("Gender",0) #scrapping the gender value return def CustomDone (): Window = CustomWindow Portrait = PortraitList1.QueryText () GemRB.SetToken ("LargePortrait", Portrait) Portrait = PortraitList2.QueryText () GemRB.SetToken ("SmallPortrait", Portrait) if Window: Window.Close () if AppearanceWindow: AppearanceWindow.Close () GemRB.SetNextScript ("CharGen2") return def CustomAbort (): if CustomWindow: CustomWindow.Close () return def LargeCustomPortrait (): Window = CustomWindow Portrait = PortraitList1.QueryText () #small hack if GemRB.GetVar ("Row1") == RowCount1: return Label = Window.GetControl (0x10000007) Label.SetText (Portrait) Button = Window.GetControl (6) if Portrait=="": Portrait = "NOPORTMD" Button.SetState (IE_GUI_BUTTON_DISABLED) else: if PortraitList2.QueryText ()!="": Button.SetState (IE_GUI_BUTTON_ENABLED) Button = Window.GetControl (0) Button.SetPicture (Portrait, "NOPORTMD") return def SmallCustomPortrait (): Window = CustomWindow Portrait = PortraitList2.QueryText () #small hack if GemRB.GetVar ("Row2") == RowCount2: return Label = Window.GetControl (0x10000008) Label.SetText (Portrait) Button = Window.GetControl (6) if Portrait=="": Portrait = "NOPORTSM" Button.SetState (IE_GUI_BUTTON_DISABLED) else: if PortraitList1.QueryText ()!="": Button.SetState (IE_GUI_BUTTON_ENABLED) Button = Window.GetControl (1) Button.SetPicture (Portrait, "NOPORTSM") return def CustomPress (): global PortraitList1, PortraitList2 global RowCount1, RowCount2 global CustomWindow CustomWindow = Window = GemRB.LoadWindow (18) PortraitList1 = Window.GetControl (2) RowCount1 = len(PortraitList1.ListResources (CHR_PORTRAITS, 2)) PortraitList1.OnSelect (LargeCustomPortrait) PortraitList1.SetVarAssoc ("Row1",RowCount1) PortraitList2 = Window.GetControl (4) RowCount2 = len(PortraitList2.ListResources (CHR_PORTRAITS, 0)) PortraitList2.OnSelect (SmallCustomPortrait) PortraitList2.SetVarAssoc ("Row2",RowCount2) Button = Window.GetControl (6) Button.SetText (11973) Button.MakeDefault() Button.OnPress (CustomDone) Button.SetState (IE_GUI_BUTTON_DISABLED) Button = Window.GetControl (7) Button.SetText (15416) Button.MakeEscape() Button.OnPress (CustomAbort) Button = Window.GetControl (0) PortraitName = PortraitsTable.GetRowName (LastPortrait)+"L" Button.SetPicture (PortraitName, "NOPORTMD") Button.SetState (IE_GUI_BUTTON_LOCKED) Button = Window.GetControl (1) PortraitName = PortraitsTable.GetRowName (LastPortrait)+"S" Button.SetPicture (PortraitName, "NOPORTSM") Button.SetState (IE_GUI_BUTTON_LOCKED) Window.ShowModal (MODAL_SHADOW_NONE) return def NextPress (): if AppearanceWindow: AppearanceWindow.Close () PortraitTable = GemRB.LoadTable ("pictures") PortraitName = PortraitTable.GetRowName (LastPortrait ) GemRB.SetToken ("SmallPortrait", PortraitName+"S") GemRB.SetToken ("LargePortrait", PortraitName+"L") GemRB.SetVar ("PortraitIndex", LastPortrait) GemRB.SetNextScript ("CharGen2") #Before race return
    null
    335
    # coding=utf-8 # Copyright 2023 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Replace all images in the fake directory with their compressed versions. This allows to reduce size of the images in the `fake_data/` directory. Instructions: python -m tensorflow_datasets.scripts.replace_fake_images \ --fake_dir=/path/to/tensorflow_datasets/testing/test_data/fake_examples """ import hashlib import os import tarfile import tempfile import zipfile import zlib import absl.app import absl.flags import numpy as np import PIL.Image FLAGS = absl.flags.FLAGS absl.flags.DEFINE_string( 'fake_dir', None, 'path to the directory which contains files' ) # Some dataset generation rely on the image content, so we cannot compress # those. SKIP_DATASETS = ['curated_breast_imaging_ddsm'] def rewrite_image(filepath): """Replace the image by an new one with smaller size (uniform color). Args: filepath: path of the images to get processed """ image_content = PIL.Image.open(filepath) image = np.array(image_content) # Filter unsuported images if image_content.mode == 'RGBA' or image.dtype == bool: return # The color is a deterministic function of the relative filepath. assert filepath.startswith(FLAGS.fake_dir) relative_filepath = filepath[len(FLAGS.fake_dir) :] color = int(hashlib.md5(relative_filepath.encode('utf-8')).hexdigest(), 16) color %= 255 image = np.ones_like(image) * color image = PIL.Image.fromarray(image) image.save(filepath, optimize=True) def rewrite_zip(root_dir, zip_filepath): """Rewrite the given .zip file into a new one containing compressed images. Args: root_dir: directory path which contain zip compressed file zip_filepath: path from directory to file """ # Creating a temporary file to store images with tempfile.TemporaryDirectory(dir=root_dir) as temp_dir: # Extraction of compressed .zip file with zipfile.ZipFile(zip_filepath, 'r') as zip_file: zip_file.extractall(path=temp_dir) rewrite_dir(temp_dir) # Recursively compress the archive content # Compress the .zip file again with zipfile.ZipFile( zip_filepath, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=zlib.Z_BEST_COMPRESSION, ) as zip_file: for file_dir, _, files in os.walk(temp_dir): for file in files: file_path = os.path.join(file_dir, file) zip_file.write( file_path, arcname=os.path.relpath(file_path, temp_dir) ) def rewrite_tar(root_dir, tar_filepath): """Rewrite the older .tar file into new better compressed one. Compression formats supported by this method (.tar.gz, .tgz, .tar.bz2) Args: root_dir: directory path which contain tar compressed file tar_filepath: path from directory to file """ # Create a tempfile to store the images contain noise with tempfile.TemporaryDirectory(dir=root_dir, suffix='fake') as temp_dir: # Checking the extension of file to be extract tar_filepath_lowercase = tar_filepath.lower() if tar_filepath_lowercase.endswith('gz'): extension = ':gz' elif tar_filepath_lowercase.endswith('bz2'): extension = ':bz2' elif tar_filepath_lowercase.endswith('xz'): extension = ':xz' else: extension = '' # Extraction of .tar file with tarfile.open(tar_filepath, 'r' + extension) as tar: tar.extractall(path=temp_dir) rewrite_dir(temp_dir) # Recursively compress the archive content # Convert back into tar file with tarfile.open(tar_filepath, 'w' + extension) as tar: tar.add(temp_dir, arcname='', recursive=True) def rewrite_dir(fake_dir): """Process the whole directory which contains the compressed files. Args: fake_dir: path of the directory which contains all compression files """ img_ext_list = ['.jpg', '.jpeg', '.png'] for root_dir, _, files in os.walk(fake_dir): if any(skip_ds in root_dir for skip_ds in SKIP_DATASETS): print(f'Skipping {root_dir}') continue print(f'Processing {root_dir}') for file in files: path = os.path.join(root_dir, file) file_ext = os.path.splitext(file)[-1].lower() if file_ext in img_ext_list: rewrite_image(path) elif file_ext == '.npz': # Filter `.npz` files continue elif zipfile.is_zipfile(path): rewrite_zip(root_dir, path) elif tarfile.is_tarfile(path): rewrite_tar(root_dir, path) def METHOD_NAME(_): """Main script.""" if FLAGS.fake_dir is None: raise ValueError('You should specify the path of the `fake_dir`') rewrite_dir(FLAGS.fake_dir) if __name__ == '__main__': absl.app.run(METHOD_NAME)
    null
    336
    # -*- coding: utf-8 -*- """ This plugin enables a kind of permalink which can be used to refer to a piece of content which is resistant to the file being moved or renamed. """ import itertools import logging import os import os.path from pelican import signals from pelican.generators import Generator from pelican.utils import clean_output_dir from pelican.utils import mkdir_p logger = logging.getLogger(__name__) def article_url(content): ''' Get the URL for an item of content ''' return '{content.settings[SITEURL]}/{content.url}'.format( content=content) REDIRECT_STRING = ''' <!DOCTYPE HTML> <html lang="en-US"> <head> <meta charset="UTF-8"> <meta http-equiv="refresh" content="0;url={url}"> <script type="text/javascript"> window.location.href = "{url}" </script> <title>Page Redirection to {title}</title> </head> <body> If you are not redirected automatically, follow the <a href='{url}'>link to {title}</a> </body> </html> ''' class PermalinkGenerator(Generator): ''' Generate a redirect page for every item of content with a permalink_id metadata ''' def generate_context(self): ''' Setup context ''' self.permalink_output_path = os.path.join( self.output_path, self.settings['PERMALINK_PATH']) self.permalink_id_metadata_key = ( self.settings['PERMALINK_ID_METADATA_KEY']) def generate_output(self, writer=None): ''' Generate redirect files ''' logger.info( 'Generating permalink files in %r', self.permalink_output_path) clean_output_dir(self.permalink_output_path, []) mkdir_p(self.permalink_output_path) for content in itertools.chain( self.context['articles'], self.context['pages']): for permalink_id in content.get_permalink_ids_iter(): permalink_path = os.path.join( self.permalink_output_path, permalink_id) + '.html' redirect_string = REDIRECT_STRING.format( url=article_url(content), title=content.title) open(permalink_path, 'w').write(redirect_string) def get_permalink_ids_iter(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY'] permalink_ids = self.metadata.get(permalink_id_key, '') for permalink_id in permalink_ids.split(','): if permalink_id: yield permalink_id.strip() def get_permalink_ids(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' return list(self.get_permalink_ids_iter()) def get_permalink_path(self): """Get just path component of permalink.""" try: first_permalink_id = next(self.get_permalink_ids_iter()) except StopIteration: return None return '/{settings[PERMALINK_PATH]}/{first_permalink}.html'.format( settings=self.settings, first_permalink=first_permalink_id) def get_permalink_url(self): ''' Get a permalink URL ''' return "/".join((self.settings['SITEURL'], self.get_permalink_path())) PERMALINK_METHODS = ( get_permalink_ids_iter, get_permalink_ids, get_permalink_url, get_permalink_path, ) def add_permalink_methods(content_inst): ''' Add permalink methods to object ''' for permalink_method in PERMALINK_METHODS: setattr( content_inst, permalink_method.__name__, permalink_method.__get__(content_inst, content_inst.__class__)) def add_permalink_option_defaults(pelicon_inst): ''' Add perlican defaults ''' pelicon_inst.settings.setdefault('PERMALINK_PATH', 'permalinks') pelicon_inst.settings.setdefault( 'PERMALINK_ID_METADATA_KEY', 'permalink_id') def METHOD_NAME(_pelican_object): return PermalinkGenerator def register(): signals.METHOD_NAME.connect(METHOD_NAME) signals.content_object_init.connect(add_permalink_methods) signals.initialized.connect(add_permalink_option_defaults)
    null
    337
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkrds.endpoint import endpoint_data class CreateDdrInstanceRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Rds', '2014-08-15', 'CreateDdrInstance') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_DBInstanceStorage(self): # Integer return self.get_query_params().get('DBInstanceStorage') def set_DBInstanceStorage(self, DBInstanceStorage): # Integer self.add_query_param('DBInstanceStorage', DBInstanceStorage) def get_SystemDBCharset(self): # String return self.get_query_params().get('SystemDBCharset') def set_SystemDBCharset(self, SystemDBCharset): # String self.add_query_param('SystemDBCharset', SystemDBCharset) def get_EngineVersion(self): # String return self.get_query_params().get('EngineVersion') def set_EngineVersion(self, EngineVersion): # String self.add_query_param('EngineVersion', EngineVersion) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_DBInstanceDescription(self): # String return self.get_query_params().get('DBInstanceDescription') def set_DBInstanceDescription(self, DBInstanceDescription): # String self.add_query_param('DBInstanceDescription', DBInstanceDescription) def get_Period(self): # String return self.get_query_params().get('Period') def set_Period(self, Period): # String self.add_query_param('Period', Period) def get_BackupSetId(self): # String return self.get_query_params().get('BackupSetId') def set_BackupSetId(self, BackupSetId): # String self.add_query_param('BackupSetId', BackupSetId) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_DBInstanceClass(self): # String return self.get_query_params().get('DBInstanceClass') def set_DBInstanceClass(self, DBInstanceClass): # String self.add_query_param('DBInstanceClass', DBInstanceClass) def get_SecurityIPList(self): # String return self.get_query_params().get('SecurityIPList') def set_SecurityIPList(self, SecurityIPList): # String self.add_query_param('SecurityIPList', SecurityIPList) def get_VSwitchId(self): # String return self.get_query_params().get('VSwitchId') def set_VSwitchId(self, VSwitchId): # String self.add_query_param('VSwitchId', VSwitchId) def get_PrivateIpAddress(self): # String return self.get_query_params().get('PrivateIpAddress') def set_PrivateIpAddress(self, PrivateIpAddress): # String self.add_query_param('PrivateIpAddress', PrivateIpAddress) def get_ZoneId(self): # String return self.get_query_params().get('ZoneId') def set_ZoneId(self, ZoneId): # String self.add_query_param('ZoneId', ZoneId) def get_InstanceNetworkType(self): # String return self.get_query_params().get('InstanceNetworkType') def METHOD_NAME(self, InstanceNetworkType): # String self.add_query_param('InstanceNetworkType', InstanceNetworkType) def get_ConnectionMode(self): # String return self.get_query_params().get('ConnectionMode') def set_ConnectionMode(self, ConnectionMode): # String self.add_query_param('ConnectionMode', ConnectionMode) def get_SourceDBInstanceName(self): # String return self.get_query_params().get('SourceDBInstanceName') def set_SourceDBInstanceName(self, SourceDBInstanceName): # String self.add_query_param('SourceDBInstanceName', SourceDBInstanceName) def get_ClientToken(self): # String return self.get_query_params().get('ClientToken') def set_ClientToken(self, ClientToken): # String self.add_query_param('ClientToken', ClientToken) def get_Engine(self): # String return self.get_query_params().get('Engine') def set_Engine(self, Engine): # String self.add_query_param('Engine', Engine) def get_DBInstanceStorageType(self): # String return self.get_query_params().get('DBInstanceStorageType') def set_DBInstanceStorageType(self, DBInstanceStorageType): # String self.add_query_param('DBInstanceStorageType', DBInstanceStorageType) def get_DBInstanceNetType(self): # String return self.get_query_params().get('DBInstanceNetType') def set_DBInstanceNetType(self, DBInstanceNetType): # String self.add_query_param('DBInstanceNetType', DBInstanceNetType) def get_RestoreTime(self): # String return self.get_query_params().get('RestoreTime') def set_RestoreTime(self, RestoreTime): # String self.add_query_param('RestoreTime', RestoreTime) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_UsedTime(self): # String return self.get_query_params().get('UsedTime') def set_UsedTime(self, UsedTime): # String self.add_query_param('UsedTime', UsedTime) def get_RestoreType(self): # String return self.get_query_params().get('RestoreType') def set_RestoreType(self, RestoreType): # String self.add_query_param('RestoreType', RestoreType) def get_VPCId(self): # String return self.get_query_params().get('VPCId') def set_VPCId(self, VPCId): # String self.add_query_param('VPCId', VPCId) def get_PayType(self): # String return self.get_query_params().get('PayType') def set_PayType(self, PayType): # String self.add_query_param('PayType', PayType) def get_SourceRegion(self): # String return self.get_query_params().get('SourceRegion') def set_SourceRegion(self, SourceRegion): # String self.add_query_param('SourceRegion', SourceRegion)
    null
    338
    # MIT License # Copyright (c) 2020 Development Seed # Copyright (c) 2021 Plan4Better # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import abc from dataclasses import dataclass from typing import Any, ClassVar, Dict, List, Optional from pydantic import BaseModel, Field from pydantic.class_validators import root_validator from pydantic.networks import AnyHttpUrl from src.core.config import settings from src.resources.enums import MimeTypes # =========================VECTOR TILE SCHEMAS========================= class VectorTileLayer(BaseModel, metaclass=abc.ABCMeta): """Layer's Abstract BaseClass. Attributes: id (str): Layer's name. bounds (list): Layer's bounds (left, bottom, right, top). minzoom (int): Layer's min zoom level. maxzoom (int): Layer's max zoom level. tileurl (str, optional): Layer's tiles url. """ id: str bounds: List[float] = [-180, -90, 180, 90] minzoom: int = settings.DEFAULT_MINZOOM maxzoom: int = settings.DEFAULT_MAXZOOM tileurl: Optional[str] class VectorTileTable(VectorTileLayer): """Table Reader. Attributes: id (str): Layer's name. bounds (list): Layer's bounds (left, bottom, right, top). minzoom (int): Layer's min zoom level. maxzoom (int): Layer's max zoom level. tileurl (str, optional): Layer's tiles url. type (str): Layer's type. schema (str): Table's database schema (e.g public). geometry_type (str): Table's geometry type (e.g polygon). geometry_column (str): Name of the geomtry column in the table. properties (Dict): Properties available in the table. """ type: str = "Table" dbschema: str = Field(..., alias="schema") table: str geometry_type: str geometry_column: str properties: Dict[str, str] class VectorTileFunction(VectorTileTable): """Function Reader. Attributes: id (str): Layer's name. bounds (list): Layer's bounds (left, bottom, right, top). minzoom (int): Layer's min zoom level. maxzoom (int): Layer's max zoom level. tileurl (str, optional): Layer's tiles url. type (str): Layer's type. function_name (str): Nane of the SQL function to call. Defaults to `id`. sql (str): Valid SQL function which returns Tile data. options (list, optional): options available for the SQL function. """ type: str = "Function" sql: str function_name: Optional[str] options: Optional[List[Dict[str, Any]]] @root_validator def function_name_default(cls, values): """Define default function's name to be same as id.""" function_name = values.get("function_name") if function_name is None: values["function_name"] = values.get("id") return values @classmethod def from_file(cls, id: str, infile: str, **kwargs: Any): """load sql from file""" with open(infile) as f: sql = f.read() return cls(id=id, sql=sql, **kwargs) class TileMatrixSetLink(BaseModel): """ TileMatrixSetLink model. Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets """ href: AnyHttpUrl rel: str = "item" type: MimeTypes = MimeTypes.json class Config: """Config for model.""" use_enum_values = True class TileMatrixSetRef(BaseModel): """ TileMatrixSetRef model. Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets """ id: str title: str links: List[TileMatrixSetLink] class TileMatrixSetList(BaseModel): """ TileMatrixSetList model. Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets """ tileMatrixSets: List[TileMatrixSetRef] @dataclass class Registry: """function registry""" funcs: ClassVar[Dict[str, VectorTileFunction]] = {} @classmethod def get(cls, key: str): """lookup function by name""" return cls.funcs.get(key) @classmethod def METHOD_NAME(cls, *args: VectorTileFunction): """register function(s)""" for func in args: cls.funcs[func.id] = func registry = Registry()
    null
    339
    import struct import base64 import json from jmbitcoin import ecdsa_sign, ecdsa_verify from jmdaemon import fidelity_bond_sanity_check import binascii def assert_is_utxo(utxo): assert len(utxo) == 2 assert isinstance(utxo[0], bytes) assert len(utxo[0]) == 32 assert isinstance(utxo[1], int) assert utxo[1] >= 0 def METHOD_NAME(cert_pub, cert_expiry): return b'fidelity-bond-cert|' + cert_pub + b'|' + str(cert_expiry).encode('ascii') def get_ascii_cert_msg(cert_pub, cert_expiry): return b'fidelity-bond-cert|' + binascii.hexlify(cert_pub) + b'|' + str(cert_expiry).encode('ascii') class FidelityBond: def __init__(self, utxo, utxo_pubkey, locktime, cert_expiry, cert_privkey, cert_pubkey, cert_signature): assert_is_utxo(utxo) assert isinstance(utxo_pubkey, bytes) assert isinstance(locktime, int) assert isinstance(cert_expiry, int) assert isinstance(cert_privkey, bytes) assert isinstance(cert_pubkey, bytes) assert isinstance(cert_signature, bytes) self.utxo = utxo self.utxo_pubkey = utxo_pubkey self.locktime = locktime self.cert_expiry = cert_expiry self.cert_privkey = cert_privkey self.cert_pubkey = cert_pubkey self.cert_signature = cert_signature def create_proof(self, maker_nick, taker_nick): return FidelityBondProof( maker_nick, taker_nick, self.cert_pubkey, self.cert_expiry, self.cert_signature, self.utxo, self.utxo_pubkey, self.locktime) def serialize(self): return json.dumps([ self.utxo, self.utxo_pubkey, self.locktime, self.cert_expiry, self.cert_privkey, self.cert_pubkey, self.cert_signature, ]) @classmethod def deserialize(cls, data): return cls(*json.loads(data)) class FidelityBondProof: # nick_sig + cert_sig + cert_pubkey + cert_expiry + utxo_pubkey + txid + vout + timelock # 72 + 72 + 33 + 2 + 33 + 32 + 4 + 4 = 252 bytes SER_STUCT_FMT = '<72s72s33sH33s32sII' def __init__(self, maker_nick, taker_nick, cert_pub, cert_expiry, cert_sig, utxo, utxo_pub, locktime): assert isinstance(maker_nick, str) assert isinstance(taker_nick, str) assert isinstance(cert_pub, bytes) assert isinstance(cert_sig, bytes) assert isinstance(utxo_pub, bytes) assert isinstance(locktime, int) assert_is_utxo(utxo) self.maker_nick = maker_nick self.taker_nick = taker_nick self.cert_pub = cert_pub self.cert_expiry = cert_expiry self.cert_sig = cert_sig self.utxo = utxo self.utxo_pub = utxo_pub self.locktime = locktime @property def nick_msg(self): return (self.taker_nick + '|' + self.maker_nick).encode('ascii') def create_proof_msg(self, cert_priv): nick_sig = ecdsa_sign(self.nick_msg, cert_priv) # FIXME: remove stupid base64 nick_sig = base64.b64decode(nick_sig) return self._serialize_proof_msg(nick_sig) def _serialize_proof_msg(self, msg_signature): msg_signature = msg_signature.rjust(72, b'\xff') cert_sig = self.cert_sig.rjust(72, b'\xff') fidelity_bond_data = struct.pack( self.SER_STUCT_FMT, msg_signature, cert_sig, self.cert_pub, self.cert_expiry, self.utxo_pub, self.utxo[0], self.utxo[1], self.locktime ) return base64.b64encode(fidelity_bond_data).decode('ascii') @staticmethod def _verify_signature(message, signature, pubkey): # FIXME: remove stupid base64 return ecdsa_verify(message, base64.b64encode(signature), pubkey) @classmethod def parse_and_verify_proof_msg(cls, maker_nick, taker_nick, data): if not fidelity_bond_sanity_check.fidelity_bond_sanity_check(data): raise ValueError("sanity check failed") decoded_data = base64.b64decode(data) unpacked_data = struct.unpack(cls.SER_STUCT_FMT, decoded_data) try: signature = unpacked_data[0][unpacked_data[0].index(b'\x30'):] cert_sig = unpacked_data[1][unpacked_data[1].index(b'\x30'):] except ValueError: #raised if index() doesnt find the position raise ValueError("der signature header not found") proof = cls(maker_nick, taker_nick, unpacked_data[2], unpacked_data[3], cert_sig, (unpacked_data[5], unpacked_data[6]), unpacked_data[4], unpacked_data[7]) cert_msg = METHOD_NAME(proof.cert_pub, proof.cert_expiry) ascii_cert_msg = get_ascii_cert_msg(proof.cert_pub, proof.cert_expiry) if not cls._verify_signature(proof.nick_msg, signature, proof.cert_pub): raise ValueError("nick sig does not verify") if not cls._verify_signature(cert_msg, proof.cert_sig, proof.utxo_pub) and\ not cls._verify_signature(ascii_cert_msg, proof.cert_sig, proof.utxo_pub): raise ValueError("cert sig does not verify") return proof
    null
    340
    # Copyright 2022 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Tuple import argparse import os import numpy as np import nnabla as nn from nnabla.ext_utils import get_extension_context from nnabla.utils.data_iterator import DataIterator from nnabla.logger import logger from pointnet2 import pointnet2_classification_msg, pointnet2_classification_ssg from loss import classification_loss from running_utils import categorical_accuracy # Install neu (nnabla examples utils) to import these functions. # See [NEU](https://github.com/nnabla/nnabla-examples/tree/master/utils). from neu.datasets.modelnet40_normal_resampled import data_iterator_modelnet40_normal_resampled from neu.checkpoint_util import load_checkpoint def eval_one_epoch( valid_data_iter: DataIterator, valid_vars: Dict[str, nn.Variable], valid_loss_vars: Dict[str, nn.Variable], ) -> Tuple[np.ndarray, np.ndarray]: total_steps = 0 total_accuracy = 0.0 total_loss = 0.0 num_iterations = valid_data_iter.size // valid_data_iter.batch_size for _ in range(num_iterations): point_cloud, label = valid_data_iter.next() valid_vars["point_cloud"].d = point_cloud valid_vars["label"].d = label valid_loss_vars["loss"].forward(clear_buffer=True) pred_logits = valid_loss_vars["pred"].d.copy() accuracy = categorical_accuracy(pred_logits, valid_vars["label"].d) total_steps += 1 total_accuracy += accuracy total_loss += float(valid_loss_vars["loss"].d) average_accuracy = total_accuracy / float(total_steps) average_loss = total_loss / float(total_steps) return average_accuracy, average_loss def evaluate(args): # Set context extension_module = args.context ctx = get_extension_context(extension_module, device_id=args.device_id) nn.set_default_context(ctx) # Feature dim, with normal vector or not feature_dim = 6 if args.with_normal else 3 # Create validation graph valid_batch_size = 4 # Setting 4 is for using all data of valid dataset point_cloud_valid = nn.Variable( (valid_batch_size, args.num_points, feature_dim)) label_valid = nn.Variable((valid_batch_size, 1)) if args.model_type == "ssg": pred_valid = pointnet2_classification_ssg( point_cloud_valid, train=False, num_classes=args.num_classes) elif args.model_type == "msg": pred_valid = pointnet2_classification_msg( point_cloud_valid, train=False, num_classes=args.num_classes) else: raise ValueError pred_valid.persistent = True loss_valid = classification_loss(pred_valid, label_valid) valid_vars = {"point_cloud": point_cloud_valid, "label": label_valid} valid_loss_vars = {"loss": loss_valid, "pred": pred_valid} # Load snapshot load_checkpoint(args.checkpoint_json_path, {}) # Data Iterator valid_data_iter = data_iterator_modelnet40_normal_resampled( args.data_dir, valid_batch_size, False, False, args.num_points, normalize=True, with_normal=args.with_normal, ) logger.info(f"Validation dataset size: {valid_data_iter.size}") # Evaluation logger.info(f"Evaluation starting ...") accuracy, loss = eval_one_epoch( valid_data_iter, valid_vars, valid_loss_vars) logger.info("accuracy: {}".format(accuracy)) logger.info("loss: {}".format(loss)) def METHOD_NAME(): parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", type=str, default=os.path.join(os.path.dirname(__file__), "data", "modelnet40_normal_resampled") ) parser.add_argument("--model_type", type=str, default="ssg", choices=["msg", "ssg"]) parser.add_argument("--num_classes", type=int, default=40) parser.add_argument("--num_points", type=int, default=1024) parser.add_argument("--with_normal", action="store_true") parser.add_argument("--device_id", type=int, default=0) parser.add_argument("--context", type=str, default="cudnn") parser.add_argument( "--checkpoint_json_path", type=str, default="./pointnet2_classification_result/seed_100/checkpoint_best/checkpoint_best.json", ) args = parser.parse_args() evaluate(args) if __name__ == "__main__": METHOD_NAME()
    null
    341
    from typing import Optional from pydantic import Field, SecretStr from hummingbot.client.config.config_data_types import BaseConnectorConfigMap, ClientFieldData from hummingbot.connector.exchange.ndax import ndax_constants as CONSTANTS from hummingbot.core.utils.tracking_nonce import get_tracking_nonce CENTRALIZED = True EXAMPLE_PAIR = "BTC-CAD" HUMMINGBOT_ID_PREFIX = 777 # NDAX fees: https://ndax.io/fees # Fees have to be expressed as percent value DEFAULT_FEES = [0.2, 0.2] # USE_ETHEREUM_WALLET not required because default value is false # FEE_TYPE not required because default value is Percentage # FEE_TOKEN not required because the fee is not flat def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str: return hb_trading_pair.replace("-", "") def get_new_client_order_id(is_buy: bool, trading_pair: str) -> str: ts_micro_sec: int = get_tracking_nonce() return f"{HUMMINGBOT_ID_PREFIX}{ts_micro_sec}" def METHOD_NAME(connector_variant_label: Optional[str]) -> str: variant = connector_variant_label if connector_variant_label else "ndax_main" return CONSTANTS.REST_URLS.get(variant) def wss_url(connector_variant_label: Optional[str]) -> str: variant = connector_variant_label if connector_variant_label else "ndax_main" return CONSTANTS.WSS_URLS.get(variant) class NdaxConfigMap(BaseConnectorConfigMap): connector: str = Field(default="ndax", client_data=None) ndax_uid: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX user ID (uid)", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_account_name: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter the name of the account you want to use", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_api_key: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX API key", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_secret_key: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX secret key", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) class Config: title = "ndax" KEYS = NdaxConfigMap.construct() OTHER_DOMAINS = ["ndax_testnet"] OTHER_DOMAINS_PARAMETER = {"ndax_testnet": "ndax_testnet"} OTHER_DOMAINS_EXAMPLE_PAIR = {"ndax_testnet": "BTC-CAD"} OTHER_DOMAINS_DEFAULT_FEES = {"ndax_testnet": [0.2, 0.2]} class NdaxTestnetConfigMap(BaseConnectorConfigMap): connector: str = Field(default="ndax_testnet", client_data=None) ndax_testnet_uid: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX Testnet user ID (uid)", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_testnet_account_name: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter the name of the account you want to use", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_testnet_api_key: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX Testnet API key", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) ndax_testnet_secret_key: SecretStr = Field( default=..., client_data=ClientFieldData( prompt=lambda cm: "Enter your NDAX Testnet secret key", is_secure=True, is_connect_key=True, prompt_on_new=True, ) ) class Config: title = "ndax_testnet" OTHER_DOMAINS_KEYS = {"ndax_testnet": NdaxTestnetConfigMap.construct()}
    null
    342
    from abc import abstractmethod from typing import List, Iterator, Union from docutils import nodes from docutils.statemachine import ViewList, string2lines from docutils.parsers.rst import Directive, directives from conversion import transpile_py_to_r def setup(app): app.add_directive('pharmpy-execute', PharmpyExecute) app.add_directive('pharmpy-code', PharmpyCode) return { 'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True, } def csv_option(s): return [p.strip() for p in s.split(",")] if s else [] class RecursiveDirective(Directive): def _convert_lines_to_nodes(self, lines: List[str]) -> List[nodes.Node]: """Turn an RST string into a node that can be used in the document. See https://github.com/sphinx-doc/sphinx/issues/8039 """ node = nodes.Element() node.document = self.state.document self.state.nested_parse( ViewList( string2lines('\n'.join(lines)), source='[SnippetDirective]', ), self.content_offset, node, ) return node.children class PharmpyAbstractCodeDirective(RecursiveDirective): option_spec = { 'linenos': directives.flag, 'lineno-start': directives.nonnegative_int, 'emphasize-lines': directives.unchanged_required, } def run(self): return self._nodes() def _nodes(self): lines = self._lines() return self._convert_lines_to_nodes(lines) @abstractmethod def _lines(self) -> List[str]: """Return lines for this directive""" def _input(self): return [ '.. tabs::', *METHOD_NAME(3, [ '', '.. code-tab:: py', *METHOD_NAME(3, self._code_option_lines()), '', *METHOD_NAME(3, self.content), '', '.. code-tab:: r R', *METHOD_NAME(3, self._code_option_lines()), '', *METHOD_NAME(3, transpile_py_to_r(self.content)), ]), ] def _code_option_lines(self): if 'emphasize-lines' in self.options: yield f':emphasize-lines:{self.options.get("emphasize-lines")}' if 'linenos' in self.options: yield ':linenos:' if 'lineno-start' in self.options: yield f':lineno-start:{self.options.get("lineno-start")}' class PharmpyExecute(PharmpyAbstractCodeDirective): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True has_content = True option_spec = { **PharmpyAbstractCodeDirective.option_spec, 'hide-code': directives.flag, 'hide-output': directives.flag, 'code-below': directives.flag, 'raises': csv_option, 'stderr': directives.flag, } def _lines(self) -> List[str]: return [ f'.. container:: pharmpy-snippet{"" if "hide-output" in self.options else " with-output"}', '', *METHOD_NAME(3, self._input_output_lines()) ] def _input_output_lines(self): # NOTE self._output should always be returned here, even when # `hide-output` is set, otherwise the code will not be executed. if 'hide-code' in self.options: return self._output() if 'code-below' in self.options: return [ *self._output(), '', *self._input(), ] return [ *self._input(), '', *self._output(), ] def _output(self): return [ '.. jupyter-execute::', *METHOD_NAME(3, [ *self._jupyter_option_lines(), '', *self.content ]), ] def _jupyter_option_lines(self): yield ':hide-code:' if 'hide-output' in self.options: yield ':hide-output:' if 'raise' in self.options: yield f':raises:{",".join(self.options.get("raises"))}' if 'stderr' in self.options: yield ':stderr:' class PharmpyCode(PharmpyAbstractCodeDirective): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True has_content = True option_spec = PharmpyAbstractCodeDirective.option_spec def _lines(self) -> List[str]: return [ '.. container:: pharmpy-snippet', '', *METHOD_NAME(3, self._input()) ] def METHOD_NAME(n: int, lines: Union[List[str],Iterator[str]]): return map(lambda line: (' '*n + line) if line else line, lines)
    null
    343
    import logging import httpx from django.core.handlers.wsgi import WSGIRequest from django.http import HttpResponse from django.shortcuts import render from django.utils import timezone from django.views import View from pydis_site import settings from pydis_site.apps.home.models import RepositoryMetadata log = logging.getLogger(__name__) class HomeView(View): """The main landing page for the website.""" github_api = "https://api.github.com/users/python-discord/repos?per_page=100" repository_cache_ttl = 3600 # Which of our GitHub repos should be displayed on the front page, and in which order? repos = [ "python-discord/site", "python-discord/bot", "python-discord/snekbox", "python-discord/sir-lancebot", "python-discord/metricity", "python-discord/king-arthur", ] def __init__(self): """Clean up stale RepositoryMetadata.""" if not settings.STATIC_BUILD: RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete() # If no token is defined (for example in local development), then # it does not make sense to pass the Authorization header. More # specifically, GitHub will reject any requests from us due to the # invalid header. We can make a limited number of anonymous requests # though, which is useful for testing. if settings.GITHUB_TOKEN: self.headers = {"Authorization": f"token {settings.GITHUB_TOKEN}"} else: self.headers = {} def _get_api_data(self) -> dict[str, dict[str, str]]: """ Call the GitHub API and get information about our repos. If we're unable to get that info for any reason, return an empty dict. """ repo_dict = {} try: # Fetch the data from the GitHub API api_data: list[dict] = httpx.get( self.github_api, headers=self.headers, timeout=settings.TIMEOUT_PERIOD ).json() except httpx.TimeoutException: log.error("Request to fetch GitHub repository metadata for timed out!") return repo_dict # Process the API data into our dict for repo in api_data: try: full_name = repo["full_name"] if full_name in self.repos: repo_dict[full_name] = { "full_name": repo["full_name"], "description": repo["description"], "language": repo["language"], "forks_count": repo["forks_count"], "stargazers_count": repo["stargazers_count"], } # Something is not right about the API data we got back from GitHub. except (TypeError, ConnectionError, KeyError) as e: log.error( "Unable to parse the GitHub repository metadata from response!", extra={ 'api_data': api_data, 'error': e } ) continue return repo_dict def METHOD_NAME(self) -> list[RepositoryMetadata]: """Build a list of RepositoryMetadata objects that we can use to populate the front page.""" # First off, load the timestamp of the least recently updated entry. if settings.STATIC_BUILD: last_update = None else: last_update = ( RepositoryMetadata.objects.values_list("last_updated", flat=True) .order_by("last_updated").first() ) # If we did not retrieve any results here, we should import them! if last_update is None: # Try to get new data from the API. If it fails, we'll return an empty list. # In this case, we simply don't display our projects on the site. api_repositories = self._get_api_data() # Create all the repodata records in the database. data = [ RepositoryMetadata( repo_name=api_data["full_name"], description=api_data["description"], forks=api_data["forks_count"], stargazers=api_data["stargazers_count"], language=api_data["language"], ) for api_data in api_repositories.values() ] if settings.STATIC_BUILD: return data return RepositoryMetadata.objects.bulk_create(data) # If the data is stale, we should refresh it. if (timezone.now() - last_update).seconds > self.repository_cache_ttl: # Try to get new data from the API. If it fails, return the cached data. api_repositories = self._get_api_data() if not api_repositories: return RepositoryMetadata.objects.all() # Update or create all RepoData objects in self.repos database_repositories = [] for api_data in api_repositories.values(): repo_data, _created = RepositoryMetadata.objects.update_or_create( repo_name=api_data["full_name"], defaults={ 'repo_name': api_data["full_name"], 'description': api_data["description"], 'forks': api_data["forks_count"], 'stargazers': api_data["stargazers_count"], 'language': api_data["language"], } ) database_repositories.append(repo_data) return database_repositories # Otherwise, if the data is fresher than 2 minutes old, we should just return it. return RepositoryMetadata.objects.all() def get(self, request: WSGIRequest) -> HttpResponse: """Collect repo data and render the homepage view.""" repo_data = self.METHOD_NAME() return render(request, "home/index.html", {"repo_data": repo_data}) def timeline(request: WSGIRequest) -> HttpResponse: """Render timeline view.""" return render(request, 'home/timeline.html')
    null
    344
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdksas.endpoint import endpoint_data class DescribeSuspEventsRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeSuspEvents') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_Source(self): # String return self.get_query_params().get('Source') def set_Source(self, Source): # String self.add_query_param('Source', Source) def get_ContainerFieldName(self): # String return self.get_query_params().get('ContainerFieldName') def set_ContainerFieldName(self, ContainerFieldName): # String self.add_query_param('ContainerFieldName', ContainerFieldName) def get_SourceIp(self): # String return self.get_query_params().get('SourceIp') def set_SourceIp(self, SourceIp): # String self.add_query_param('SourceIp', SourceIp) def get_EventNames(self): # String return self.get_query_params().get('EventNames') def set_EventNames(self, EventNames): # String self.add_query_param('EventNames', EventNames) def get_From(self): # String return self.get_query_params().get('From') def set_From(self, _From): # String self.add_query_param('From', _From) def get_Id(self): # Long return self.get_query_params().get('Id') def set_Id(self, Id): # Long self.add_query_param('Id', Id) def get_TacticId(self): # String return self.get_body_params().get('TacticId') def set_TacticId(self, TacticId): # String self.add_body_params('TacticId', TacticId) def get_AlarmUniqueInfo(self): # String return self.get_query_params().get('AlarmUniqueInfo') def set_AlarmUniqueInfo(self, AlarmUniqueInfo): # String self.add_query_param('AlarmUniqueInfo', AlarmUniqueInfo) def get_UniqueInfo(self): # String return self.get_query_params().get('UniqueInfo') def set_UniqueInfo(self, UniqueInfo): # String self.add_query_param('UniqueInfo', UniqueInfo) def get_GroupId(self): # Long return self.get_query_params().get('GroupId') def set_GroupId(self, GroupId): # Long self.add_query_param('GroupId', GroupId) def get_OperateTimeEnd(self): # String return self.get_query_params().get('OperateTimeEnd') def set_OperateTimeEnd(self, OperateTimeEnd): # String self.add_query_param('OperateTimeEnd', OperateTimeEnd) def get_Name(self): # String return self.get_query_params().get('Name') def set_Name(self, Name): # String self.add_query_param('Name', Name) def get_Status(self): # String return self.get_query_params().get('Status') def set_Status(self, Status): # String self.add_query_param('Status', Status) def get_Uuids(self): # String return self.get_query_params().get('Uuids') def set_Uuids(self, Uuids): # String self.add_query_param('Uuids', Uuids) def get_TimeEnd(self): # String return self.get_query_params().get('TimeEnd') def set_TimeEnd(self, TimeEnd): # String self.add_query_param('TimeEnd', TimeEnd) def get_TargetType(self): # String return self.get_query_params().get('TargetType') def set_TargetType(self, TargetType): # String self.add_query_param('TargetType', TargetType) def METHOD_NAME(self): # String return self.get_query_params().get('SortType') def set_SortType(self, SortType): # String self.add_query_param('SortType', SortType) def get_Remark(self): # String return self.get_query_params().get('Remark') def set_Remark(self, Remark): # String self.add_query_param('Remark', Remark) def get_ContainerFieldValue(self): # String return self.get_query_params().get('ContainerFieldValue') def set_ContainerFieldValue(self, ContainerFieldValue): # String self.add_query_param('ContainerFieldValue', ContainerFieldValue) def get_PageSize(self): # String return self.get_query_params().get('PageSize') def set_PageSize(self, PageSize): # String self.add_query_param('PageSize', PageSize) def get_Lang(self): # String return self.get_query_params().get('Lang') def set_Lang(self, Lang): # String self.add_query_param('Lang', Lang) def get_Dealed(self): # String return self.get_query_params().get('Dealed') def set_Dealed(self, Dealed): # String self.add_query_param('Dealed', Dealed) def get_CurrentPage(self): # String return self.get_query_params().get('CurrentPage') def set_CurrentPage(self, CurrentPage): # String self.add_query_param('CurrentPage', CurrentPage) def get_ClusterId(self): # String return self.get_query_params().get('ClusterId') def set_ClusterId(self, ClusterId): # String self.add_query_param('ClusterId', ClusterId) def get_OperateErrorCodeLists(self): # RepeatList return self.get_query_params().get('OperateErrorCodeList') def set_OperateErrorCodeLists(self, OperateErrorCodeList): # RepeatList for depth1 in range(len(OperateErrorCodeList)): self.add_query_param('OperateErrorCodeList.' + str(depth1 + 1), OperateErrorCodeList[depth1]) def get_SortColumn(self): # String return self.get_query_params().get('SortColumn') def set_SortColumn(self, SortColumn): # String self.add_query_param('SortColumn', SortColumn) def get_AssetsTypeLists(self): # RepeatList return self.get_query_params().get('AssetsTypeList') def set_AssetsTypeLists(self, AssetsTypeList): # RepeatList for depth1 in range(len(AssetsTypeList)): self.add_query_param('AssetsTypeList.' + str(depth1 + 1), AssetsTypeList[depth1]) def get_OperateTimeStart(self): # String return self.get_query_params().get('OperateTimeStart') def set_OperateTimeStart(self, OperateTimeStart): # String self.add_query_param('OperateTimeStart', OperateTimeStart) def get_TimeStart(self): # String return self.get_query_params().get('TimeStart') def set_TimeStart(self, TimeStart): # String self.add_query_param('TimeStart', TimeStart) def get_Levels(self): # String return self.get_query_params().get('Levels') def set_Levels(self, Levels): # String self.add_query_param('Levels', Levels) def get_ParentEventTypes(self): # String return self.get_query_params().get('ParentEventTypes') def set_ParentEventTypes(self, ParentEventTypes): # String self.add_query_param('ParentEventTypes', ParentEventTypes)
    null
    345
    """ Datatypes for Anvi'o https://github.com/merenlab/anvio """ import glob import logging import os from typing import Optional from galaxy.datatypes.metadata import MetadataElement from galaxy.datatypes.protocols import ( DatasetProtocol, HasExtraFilesAndMetadata, ) from galaxy.datatypes.text import Html log = logging.getLogger(__name__) class AnvioComposite(Html): """ Base class to use for Anvi'o composite datatypes. Generally consist of a sqlite database, plus optional additional files """ file_ext = "anvio_composite" composite_type = "auto_primary_file" def METHOD_NAME(self, dataset: HasExtraFilesAndMetadata) -> str: """ This is called only at upload to write the html file cannot rename the datasets here - they come with the default unfortunately """ defined_files = self.get_composite_files(dataset=dataset).items() rval = [f"<html><head><title>Files for Anvi'o Composite Dataset ({self.file_ext})</title></head>"] if defined_files: rval.append("<p/>This composite dataset is composed of the following defined files:<p/><ul>") for composite_name, composite_file in defined_files: opt_text = "" if composite_file.optional: opt_text = " (optional)" missing_text = "" if not os.path.exists(os.path.join(dataset.extra_files_path, composite_name)): missing_text = " (missing)" rval.append(f'<li><a href="{composite_name}">{composite_name}</a>{opt_text}{missing_text}</li>') rval.append("</ul>") defined_files = map(lambda x: x[0], defined_files) extra_files = [] for dirpath, _dirnames, filenames in os.walk(dataset.extra_files_path, followlinks=True): for filename in filenames: rel_path = os.path.relpath(os.path.join(dirpath, filename), dataset.extra_files_path) if rel_path not in defined_files: extra_files.append(rel_path) if extra_files: rval.append("<p/>This composite dataset contains these undefined files:<p/><ul>") for rel_path in extra_files: rval.append(f'<li><a href="{rel_path}">{rel_path}</a></li>') rval.append("</ul>") if not (defined_files or extra_files): rval.append("<p/>This composite dataset does not contain any files!<p/><ul>") rval.append("</html>") return "\n".join(rval) def get_mime(self) -> str: """Returns the mime type of the datatype""" return "text/html" def set_peek(self, dataset: DatasetProtocol, **kwd) -> None: """Set the peek and blurb text""" if not dataset.dataset.purged: dataset.peek = "Anvio database (multiple files)" dataset.blurb = "Anvio database (multiple files)" else: dataset.peek = "file does not exist" dataset.blurb = "file purged from disk" def display_peek(self, dataset: DatasetProtocol) -> str: """Create HTML content, used for displaying peek.""" try: return dataset.peek except Exception: return "Anvio database (multiple files)" class AnvioDB(AnvioComposite): """Class for AnvioDB database files.""" _anvio_basename: Optional[str] = None MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_db" def __init__(self, *args, **kwd): super().__init__(*args, **kwd) if self._anvio_basename is not None: self.add_composite_file(self._anvio_basename, is_binary=True, optional=False) def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> None: """ Set the anvio_basename based upon actual extra_files_path contents. """ super().set_meta(dataset, overwrite=overwrite, **kwd) if dataset.metadata.anvio_basename is not None and os.path.exists( os.path.join(dataset.extra_files_path, dataset.metadata.anvio_basename) ): return found = False for basename in [dataset.metadata.anvio_basename, self._anvio_basename]: if found: break if basename is not None and not os.path.exists(os.path.join(dataset.extra_files_path, basename)): for name in glob.glob(os.path.join(dataset.extra_files_path, f"*{basename}")): dataset.metadata.anvio_basename = os.path.basename(name) found = True break class AnvioStructureDB(AnvioDB): """Class for Anvio Structure DB database files.""" _anvio_basename = "STRUCTURE.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_structure_db" class AnvioGenomesDB(AnvioDB): """Class for Anvio Genomes DB database files.""" _anvio_basename = "-GENOMES.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_genomes_db" class AnvioContigsDB(AnvioDB): """Class for Anvio Contigs DB database files.""" _anvio_basename = "CONTIGS.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_contigs_db" def __init__(self, *args, **kwd): super().__init__(*args, **kwd) self.add_composite_file("CONTIGS.h5", is_binary=True, optional=True) class AnvioProfileDB(AnvioDB): """Class for Anvio Profile DB database files.""" _anvio_basename = "PROFILE.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_profile_db" def __init__(self, *args, **kwd): super().__init__(*args, **kwd) self.add_composite_file("RUNINFO.cp", is_binary=True, optional=True) self.add_composite_file("RUNINFO.mcp", is_binary=True, optional=True) self.add_composite_file("AUXILIARY_DATA.db", is_binary=True, optional=True) self.add_composite_file("RUNLOG.txt", is_binary=False, optional=True) class AnvioPanDB(AnvioDB): """Class for Anvio Pan DB database files.""" _anvio_basename = "PAN.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_pan_db" class AnvioSamplesDB(AnvioDB): """Class for Anvio Samples DB database files.""" _anvio_basename = "SAMPLES.db" MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True) file_ext = "anvio_samples_db"
    null
    346
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkrds.endpoint import endpoint_data class UpgradeDBInstanceMajorVersionRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Rds', '2014-08-15', 'UpgradeDBInstanceMajorVersion') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_DBInstanceStorage(self): # Integer return self.get_query_params().get('DBInstanceStorage') def set_DBInstanceStorage(self, DBInstanceStorage): # Integer self.add_query_param('DBInstanceStorage', DBInstanceStorage) def get_ZoneIdSlave1(self): # String return self.get_query_params().get('ZoneIdSlave1') def set_ZoneIdSlave1(self, ZoneIdSlave1): # String self.add_query_param('ZoneIdSlave1', ZoneIdSlave1) def get_ZoneIdSlave2(self): # String return self.get_query_params().get('ZoneIdSlave2') def set_ZoneIdSlave2(self, ZoneIdSlave2): # String self.add_query_param('ZoneIdSlave2', ZoneIdSlave2) def get_SwitchTimeMode(self): # String return self.get_query_params().get('SwitchTimeMode') def set_SwitchTimeMode(self, SwitchTimeMode): # String self.add_query_param('SwitchTimeMode', SwitchTimeMode) def get_SwitchOver(self): # String return self.get_query_params().get('SwitchOver') def set_SwitchOver(self, SwitchOver): # String self.add_query_param('SwitchOver', SwitchOver) def get_CollectStatMode(self): # String return self.get_query_params().get('CollectStatMode') def set_CollectStatMode(self, CollectStatMode): # String self.add_query_param('CollectStatMode', CollectStatMode) def get_SwitchTime(self): # String return self.get_query_params().get('SwitchTime') def set_SwitchTime(self, SwitchTime): # String self.add_query_param('SwitchTime', SwitchTime) def get_DBInstanceId(self): # String return self.get_query_params().get('DBInstanceId') def set_DBInstanceId(self, DBInstanceId): # String self.add_query_param('DBInstanceId', DBInstanceId) def METHOD_NAME(self): # String return self.get_query_params().get('DBInstanceStorageType') def set_DBInstanceStorageType(self, DBInstanceStorageType): # String self.add_query_param('DBInstanceStorageType', DBInstanceStorageType) def get_Period(self): # String return self.get_query_params().get('Period') def set_Period(self, Period): # String self.add_query_param('Period', Period) def get_UsedTime(self): # String return self.get_query_params().get('UsedTime') def set_UsedTime(self, UsedTime): # String self.add_query_param('UsedTime', UsedTime) def get_DBInstanceClass(self): # String return self.get_query_params().get('DBInstanceClass') def set_DBInstanceClass(self, DBInstanceClass): # String self.add_query_param('DBInstanceClass', DBInstanceClass) def get_VSwitchId(self): # String return self.get_query_params().get('VSwitchId') def set_VSwitchId(self, VSwitchId): # String self.add_query_param('VSwitchId', VSwitchId) def get_PrivateIpAddress(self): # String return self.get_query_params().get('PrivateIpAddress') def set_PrivateIpAddress(self, PrivateIpAddress): # String self.add_query_param('PrivateIpAddress', PrivateIpAddress) def get_VPCId(self): # String return self.get_query_params().get('VPCId') def set_VPCId(self, VPCId): # String self.add_query_param('VPCId', VPCId) def get_ZoneId(self): # String return self.get_query_params().get('ZoneId') def set_ZoneId(self, ZoneId): # String self.add_query_param('ZoneId', ZoneId) def get_PayType(self): # String return self.get_query_params().get('PayType') def set_PayType(self, PayType): # String self.add_query_param('PayType', PayType) def get_InstanceNetworkType(self): # String return self.get_query_params().get('InstanceNetworkType') def set_InstanceNetworkType(self, InstanceNetworkType): # String self.add_query_param('InstanceNetworkType', InstanceNetworkType) def get_TargetMajorVersion(self): # String return self.get_query_params().get('TargetMajorVersion') def set_TargetMajorVersion(self, TargetMajorVersion): # String self.add_query_param('TargetMajorVersion', TargetMajorVersion)
    null
    347
    from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple from boa3.internal import constants from boa3.internal.model.builtin.method.builtinmethod import IBuiltinMethod from boa3.internal.model.expression import IExpression from boa3.internal.model.method import Method from boa3.internal.model.property import Property from boa3.internal.model.type.classes.classarraytype import ClassArrayType from boa3.internal.model.variable import Variable from boa3.internal.neo.vm.opcode.Opcode import Opcode class BlockType(ClassArrayType): """ A class used to represent Neo Block class """ def __init__(self): super().__init__('Block') from boa3.internal.model.type.type import Type from boa3.internal.model.type.collection.sequence.uint160type import UInt160Type from boa3.internal.model.type.collection.sequence.uint256type import UInt256Type uint256 = UInt256Type.build() self._variables: Dict[str, Variable] = { 'hash': Variable(uint256), 'version': Variable(Type.int), 'previous_hash': Variable(uint256), 'merkle_root': Variable(uint256), 'timestamp': Variable(Type.int), 'nonce': Variable(Type.int), 'index': Variable(Type.int), 'primary_index': Variable(Type.int), 'next_consensus': Variable(UInt160Type.build()), 'transaction_count': Variable(Type.int) } self._constructor: Method = None @property def class_variables(self) -> Dict[str, Variable]: return {} @property def instance_variables(self) -> Dict[str, Variable]: return self._variables.copy() @property def properties(self) -> Dict[str, Property]: return {} @property def static_methods(self) -> Dict[str, Method]: return {} @property def class_methods(self) -> Dict[str, Method]: return {} @property def instance_methods(self) -> Dict[str, Method]: return {} def constructor_method(self) -> Optional[Method]: # was having a problem with recursive import if self._constructor is None: self._constructor: Method = BlockMethod(self) return self._constructor @classmethod def build(cls, value: Any = None) -> BlockType: if value is None or cls._is_type_of(value): return _Block @classmethod def _is_type_of(cls, value: Any): return isinstance(value, BlockType) _Block = BlockType() class BlockMethod(IBuiltinMethod): def __init__(self, return_type: BlockType): identifier = '-Block__init__' args: Dict[str, Variable] = {} super().__init__(identifier, args, return_type=return_type) def validate_parameters(self, *params: IExpression) -> bool: return len(params) == 0 @property def METHOD_NAME(self) -> List[Tuple[Opcode, bytes]]: from boa3.internal.neo.vm.type.Integer import Integer uint160_default = Integer(constants.SIZE_OF_INT160).to_byte_array() + bytes(constants.SIZE_OF_INT160) uint256_default = Integer(constants.SIZE_OF_INT256).to_byte_array() + bytes(constants.SIZE_OF_INT256) return [ (Opcode.PUSH0, b''), # transaction_count (Opcode.PUSHDATA1, uint160_default), # next_consensus (Opcode.PUSH0, b''), # primary_index (Opcode.PUSH0, b''), # index (Opcode.PUSH0, b''), # nonce (Opcode.PUSH0, b''), # timestamp (Opcode.PUSHDATA1, uint256_default), # merkle_root (Opcode.PUSHDATA1, uint256_default), # previous_hash (Opcode.PUSH0, b''), # version (Opcode.PUSHDATA1, uint256_default), # hash (Opcode.PUSH10, b''), (Opcode.PACK, b'') ] @property def _args_on_stack(self) -> int: return len(self.args) @property def _body(self) -> Optional[str]: return
    null
    348
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class CreateInstanceRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata') self.set_method('POST') def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_NodeSpec(self): return self.get_query_params().get('NodeSpec') def set_NodeSpec(self,NodeSpec): self.add_query_param('NodeSpec',NodeSpec) def get_ClientToken(self): return self.get_query_params().get('ClientToken') def set_ClientToken(self,ClientToken): self.add_query_param('ClientToken',ClientToken) def get_NetworkType(self): return self.get_query_params().get('NetworkType') def set_NetworkType(self,NetworkType): self.add_query_param('NetworkType',NetworkType) def get_AccountName(self): return self.get_query_params().get('AccountName') def set_AccountName(self,AccountName): self.add_query_param('AccountName',AccountName) def get_SecurityToken(self): return self.get_query_params().get('SecurityToken') def set_SecurityToken(self,SecurityToken): self.add_query_param('SecurityToken',SecurityToken) def get_NodeNumber(self): return self.get_query_params().get('NodeNumber') def set_NodeNumber(self,NodeNumber): self.add_query_param('NodeNumber',NodeNumber) def get_ResourceOwnerAccount(self): return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self,ResourceOwnerAccount): self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount) def get_OwnerAccount(self): return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self,OwnerAccount): self.add_query_param('OwnerAccount',OwnerAccount) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId) def get_SecurityIPList(self): return self.get_query_params().get('SecurityIPList') def set_SecurityIPList(self,SecurityIPList): self.add_query_param('SecurityIPList',SecurityIPList) def get_VSwitchId(self): return self.get_query_params().get('VSwitchId') def set_VSwitchId(self,VSwitchId): self.add_query_param('VSwitchId',VSwitchId) def get_AccountPassword(self): return self.get_query_params().get('AccountPassword') def set_AccountPassword(self,AccountPassword): self.add_query_param('AccountPassword',AccountPassword) def get_InstanceName(self): return self.get_query_params().get('InstanceName') def set_InstanceName(self,InstanceName): self.add_query_param('InstanceName',InstanceName) def get_DBName(self): return self.get_query_params().get('DBName') def set_DBName(self,DBName): self.add_query_param('DBName',DBName) def get_VpcId(self): return self.get_query_params().get('VpcId') def set_VpcId(self,VpcId): self.add_query_param('VpcId',VpcId) def get_ZoneId(self): return self.get_query_params().get('ZoneId') def set_ZoneId(self,ZoneId): self.add_query_param('ZoneId',ZoneId) def get_ChargeType(self): return self.get_query_params().get('ChargeType') def METHOD_NAME(self,ChargeType): self.add_query_param('ChargeType',ChargeType
    null
    349
    #!/usr/bin/env python3 # Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Script to generate list of seed nodes for chainparams.cpp. This script expects two text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt These files must consist of lines in the format <ip>:<port> [<ipv6>]:<port> <onion>.onion:<port> <i2p>.b32.i2p:<port> The output will be two data structures with the peers in binary format: static const uint8_t chainparams_seed_{main,test}[]={ ... } These should be pasted into `src/chainparamsseeds.h`. ''' from base64 import b32decode from enum import Enum import struct import sys import os import re class BIP155Network(Enum): IPV4 = 1 IPV6 = 2 TORV2 = 3 # no longer supported TORV3 = 4 I2P = 5 CJDNS = 6 def name_to_bip155(addr): '''Convert address string to BIP155 (networkID, addr) tuple.''' if addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) == 35: assert vchAddr[34] == 3 return (BIP155Network.TORV3, vchAddr[:32]) elif len(vchAddr) == 10: return (BIP155Network.TORV2, vchAddr) else: raise ValueError('Invalid onion %s' % vchAddr) elif addr.endswith('.b32.i2p'): vchAddr = b32decode(addr[0:-8] + '====', True) if len(vchAddr) == 32: return (BIP155Network.I2P, vchAddr) else: raise ValueError(f'Invalid I2P {vchAddr}') elif '.' in addr: # IPv4 return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.')))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1])) else: raise ValueError('Could not parse address %s' % addr) def parse_spec(s): '''Convert endpoint string to BIP155 (networkID, addr, port) tuple.''' match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) port = match.group(2) elif s.count(':') > 1: # ipv6, no port host = s port = '' else: (host,_,port) = s.partition(':') if not port: port = 0 else: port = int(port) host = name_to_bip155(host) if host[0] == BIP155Network.TORV2: return None # TORV2 is no longer supported, so we ignore it else: return host + (port, ) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack("<BH", 253, l) elif l < 0x100000000: r = struct.pack("<BI", 254, l) else: r = struct.pack("<BQ", 255, l) return r def bip155_serialize(spec): ''' Serialize (networkID, addr, port) tuple to BIP155 binary format. ''' r = b"" r += struct.pack('B', spec[0].value) r += ser_compact_size(len(spec[1])) r += spec[1] r += struct.pack('>H', spec[2]) return r def METHOD_NAME(g, f, structname): g.write('static const uint8_t %s[] = {\n' % structname) for line in f: comment = line.find('#') if comment != -1: line = line[0:comment] line = line.strip() if not line: continue spec = parse_spec(line) if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2) continue blob = bip155_serialize(spec) hoststr = ','.join(('0x%02x' % b) for b in blob) g.write(f' {hoststr},\n') g.write('};\n') def main(): if len(sys.argv)<2: print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr) sys.exit(1) g = sys.stdout indir = sys.argv[1] g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('/**\n') g.write(' * List of fixed seed nodes for the bitcoin network\n') g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') g.write(' *\n') g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f: METHOD_NAME(g, f, 'chainparams_seed_main') g.write('\n') with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f: METHOD_NAME(g, f, 'chainparams_seed_test') g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': main()
    null
    350
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class CreateInstanceRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata') self.set_method('POST') def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_NodeSpec(self): return self.get_query_params().get('NodeSpec') def set_NodeSpec(self,NodeSpec): self.add_query_param('NodeSpec',NodeSpec) def METHOD_NAME(self): return self.get_query_params().get('ClientToken') def set_ClientToken(self,ClientToken): self.add_query_param('ClientToken',ClientToken) def get_NetworkType(self): return self.get_query_params().get('NetworkType') def set_NetworkType(self,NetworkType): self.add_query_param('NetworkType',NetworkType) def get_AccountName(self): return self.get_query_params().get('AccountName') def set_AccountName(self,AccountName): self.add_query_param('AccountName',AccountName) def get_SecurityToken(self): return self.get_query_params().get('SecurityToken') def set_SecurityToken(self,SecurityToken): self.add_query_param('SecurityToken',SecurityToken) def get_NodeNumber(self): return self.get_query_params().get('NodeNumber') def set_NodeNumber(self,NodeNumber): self.add_query_param('NodeNumber',NodeNumber) def get_ResourceOwnerAccount(self): return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self,ResourceOwnerAccount): self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount) def get_OwnerAccount(self): return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self,OwnerAccount): self.add_query_param('OwnerAccount',OwnerAccount) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId) def get_SecurityIPList(self): return self.get_query_params().get('SecurityIPList') def set_SecurityIPList(self,SecurityIPList): self.add_query_param('SecurityIPList',SecurityIPList) def get_VSwitchId(self): return self.get_query_params().get('VSwitchId') def set_VSwitchId(self,VSwitchId): self.add_query_param('VSwitchId',VSwitchId) def get_AccountPassword(self): return self.get_query_params().get('AccountPassword') def set_AccountPassword(self,AccountPassword): self.add_query_param('AccountPassword',AccountPassword) def get_InstanceName(self): return self.get_query_params().get('InstanceName') def set_InstanceName(self,InstanceName): self.add_query_param('InstanceName',InstanceName) def get_DBName(self): return self.get_query_params().get('DBName') def set_DBName(self,DBName): self.add_query_param('DBName',DBName) def get_VpcId(self): return self.get_query_params().get('VpcId') def set_VpcId(self,VpcId): self.add_query_param('VpcId',VpcId) def get_ZoneId(self): return self.get_query_params().get('ZoneId') def set_ZoneId(self,ZoneId): self.add_query_param('ZoneId',ZoneId) def get_ChargeType(self): return self.get_query_params().get('ChargeType') def set_ChargeType(self,ChargeType): self.add_query_param('ChargeType',ChargeType
    null
    351
    import shutil import os import stat import bpy import arm.utils from arm import log if arm.is_reload(__name__): log = arm.reload_module(log) arm.utils = arm.reload_module(arm.utils) else: arm.enable_reload(__name__) assets = [] reserved_names = ['return.'] khafile_params = [] khafile_defs = [] khafile_defs_last = [] embedded_data = [] shaders = [] shaders_last = [] shaders_external = [] shader_datas = [] shader_passes = [] shader_passes_assets = {} shader_cons = {} def reset(): global assets global khafile_params global khafile_defs global khafile_defs_last global embedded_data global shaders global shaders_last global shaders_external global shader_datas global shader_passes global shader_cons assets = [] khafile_params = [] khafile_defs_last = khafile_defs khafile_defs = [] embedded_data = [] shaders_last = shaders shaders = [] shaders_external = [] shader_datas = [] shader_passes = [] shader_cons = {} shader_cons['mesh_vert'] = [] shader_cons['depth_vert'] = [] shader_cons['depth_frag'] = [] shader_cons['voxel_vert'] = [] shader_cons['voxel_frag'] = [] shader_cons['voxel_geom'] = [] def add(asset_file): global assets # Asset already exists, do nothing if asset_file in assets: return asset_file_base = os.path.basename(asset_file) for f in assets: f_file_base = os.path.basename(f) if f_file_base == asset_file_base: return assets.append(asset_file) # Reserved file name for f in reserved_names: if f in asset_file: log.warn(f'File "{asset_file}" contains reserved keyword, this will break C++ builds!') def add_khafile_def(d): global khafile_defs if d not in khafile_defs: khafile_defs.append(d) def add_khafile_param(p): global khafile_params if p not in khafile_params: khafile_params.append(p) def add_embedded_data(file): global embedded_data if file not in embedded_data: embedded_data.append(file) def add_shader(file): global shaders global shaders_last if file not in shaders: shaders.append(file) def add_shader_data(file): global shader_datas if file not in shader_datas: shader_datas.append(file) def add_shader_pass(data_name): global shader_passes # Shader data for passes are written into single shader_datas.arm file add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm') if data_name not in shader_passes: shader_passes.append(data_name) def METHOD_NAME(file): global shaders_external shaders_external.append(file) name = file.split('/')[-1].split('\\')[-1] add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name) invalidate_enabled = True # Disable invalidating during build process def remove_readonly(func, path, excinfo): os.chmod(path, stat.S_IWRITE) func(path) def invalidate_shader_cache(self, context): # compiled.inc changed, recompile all shaders next time global invalidate_enabled if invalidate_enabled is False: return fp = arm.utils.get_fp_build() if os.path.isdir(fp + '/compiled/Shaders'): shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly) if os.path.isdir(fp + '/debug/html5-resources'): shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly) if os.path.isdir(fp + '/krom-resources'): shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly) if os.path.isdir(fp + '/debug/krom-resources'): shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly) if os.path.isdir(fp + '/windows-resources'): shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly) if os.path.isdir(fp + '/linux-resources'): shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly) if os.path.isdir(fp + '/osx-resources'): shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly) def invalidate_compiled_data(self, context): global invalidate_enabled if invalidate_enabled is False: return fp = arm.utils.get_fp_build() if os.path.isdir(fp + '/compiled'): shutil.rmtree(fp + '/compiled', onerror=remove_readonly) def invalidate_mesh_data(self, context): fp = arm.utils.get_fp_build() if os.path.isdir(fp + '/compiled/Assets/meshes'): shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly) def invalidate_envmap_data(self, context): fp = arm.utils.get_fp_build() if os.path.isdir(fp + '/compiled/Assets/envmaps'): shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly) def invalidate_unpacked_data(self, context): fp = arm.utils.get_fp_build() if os.path.isdir(fp + '/compiled/Assets/unpacked'): shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly) def invalidate_mesh_cache(self, context): if context.object is None or context.object.data is None: return context.object.data.arm_cached = False def invalidate_instance_cache(self, context): if context.object is None or context.object.data is None: return invalidate_mesh_cache(self, context) for slot in context.object.material_slots: slot.material.arm_cached = False def invalidate_compiler_cache(self, context): bpy.data.worlds['Arm'].arm_recompile = True def shader_equal(sh, ar, shtype): # Merge equal shaders for e in ar: if sh.is_equal(e): sh.context.data[shtype] = e.context.data[shtype] sh.is_linked = True return ar.append(sh) def vs_equal(c, ar): shader_equal(c.vert, ar, 'vertex_shader') def fs_equal(c, ar): shader_equal(c.frag, ar, 'fragment_shader') def gs_equal(c, ar): shader_equal(c.geom, ar, 'geometry_shader') def tcs_equal(c, ar): shader_equal(c.tesc, ar, 'tesscontrol_shader') def tes_equal(c, ar): shader_equal(c.tese, ar, 'tesseval_shader')
    null
    352
    from unittest import TestCase from pcs.common.pacemaker.resource.operations import CibResourceOperationDto from pcs.lib.cib.resource import agent from pcs.lib.resource_agent import ( ResourceAgentAction, ResourceAgentMetadata, ResourceAgentName, ) from pcs.lib.resource_agent.const import OCF_1_0 class GetDefaultOperationInterval(TestCase): def test_return_0s_on_name_different_from_monitor(self): self.assertEqual("0s", agent.get_default_operation_interval("start")) def METHOD_NAME(self): self.assertEqual("60s", agent.get_default_operation_interval("monitor")) class CompleteOperationsOptions(TestCase): def test_add_intervals_everywhere_is_missing(self): self.assertEqual( agent.complete_operations_options( [ {"name": "monitor", "interval": "20s"}, {"name": "start"}, ] ), [ {"name": "monitor", "interval": "20s"}, {"name": "start", "interval": "0s"}, ], ) class GetDefaultOperations(TestCase): fixture_actions = [ ResourceAgentAction( "custom1", "40s", None, None, None, None, False, False ), ResourceAgentAction( "custom2", "60s", "25s", None, None, None, False, False ), ResourceAgentAction( "meta-data", None, None, None, None, None, False, False ), ResourceAgentAction( "monitor", "30s", "10s", None, None, None, False, False ), ResourceAgentAction( "start", None, "40s", None, None, None, False, False ), ResourceAgentAction( "status", "20s", "15s", None, None, None, False, False ), ResourceAgentAction( "validate-all", None, None, None, None, None, False, False ), ] fixture_actions_meta_only = [ ResourceAgentAction( "meta-data", None, None, None, None, None, False, False ) ] maxDiff = None @staticmethod def fixture_agent(actions): return ResourceAgentMetadata( ResourceAgentName("ocf", "pacemaker", "Dummy"), agent_exists=True, ocf_version=OCF_1_0, shortdesc="", longdesc="", parameters=[], actions=actions, ) @staticmethod def fixture_stonith_agent(actions): return ResourceAgentMetadata( ResourceAgentName("stonith", None, "fence_test"), agent_exists=True, ocf_version=OCF_1_0, shortdesc="", longdesc="", parameters=[], actions=actions, ) @staticmethod def op_fixture(name, interval, timeout): return CibResourceOperationDto( id="", name=name, interval=interval, description=None, start_delay=None, interval_origin=None, timeout=timeout, enabled=None, record_pending=None, role=None, on_fail=None, meta_attributes=[], instance_attributes=[], ) def test_select_only_actions_for_cib(self): self.assertEqual( agent.get_default_operations( self.fixture_agent(self.fixture_actions) ), [ self.op_fixture("custom1", "0s", "40s"), self.op_fixture("custom2", "25s", "60s"), self.op_fixture("monitor", "10s", "30s"), self.op_fixture("start", "40s", None), ], ) def test_select_only_actions_for_cib_stonith(self): self.assertEqual( agent.get_default_operations( self.fixture_stonith_agent(self.fixture_actions) ), [self.op_fixture("monitor", "10s", "30s")], ) def test_select_only_necessary_actions_for_cib(self): self.assertEqual( agent.get_default_operations( self.fixture_agent(self.fixture_actions), necessary_only=True ), [self.op_fixture("monitor", "10s", "30s")], ) def test_select_only_necessary_actions_for_cib_stonith(self): self.assertEqual( agent.get_default_operations( self.fixture_stonith_agent(self.fixture_actions), necessary_only=True, ), [self.op_fixture("monitor", "10s", "30s")], ) def test_complete_monitor(self): self.assertEqual( agent.get_default_operations( self.fixture_agent(self.fixture_actions_meta_only), necessary_only=True, ), [self.op_fixture("monitor", "60s", None)], ) def test_complete_monitor_stonith(self): self.assertEqual( agent.get_default_operations( self.fixture_stonith_agent(self.fixture_actions_meta_only), necessary_only=True, ), [self.op_fixture("monitor", "60s", None)], )
    null
    353
    from __future__ import print_function import IMP.test import IMP.algebra displayit = False if displayit: import IMP.display from IMP.algebra import * import pickle class Tests(IMP.test.TestCase): def test_magnitude(self): """Check dense log grid of ints""" print("construct") bb = BoundingBox3D(Vector3D(1, 1, 1), Vector3D(15, 15, 15)) sz = [5, 5, 5] le = LogEmbedding3D(bb, Vector3D(2.0, 2.0, 2.0), sz) g = DenseIntLogGrid3D(sz, le) bbo = g.get_bounding_box() print(bb, bbo) if displayit: w = IMP.display.PymolWriter(self.get_tmp_file_name("log.pym")) bbg = IMP.display.BoundingBoxGeometry(bb) bbg.set_color(IMP.display.get_display_color(0)) bbg.set_name("in") w.add_geometry(bbg) bbog = IMP.display.BoundingBoxGeometry(bbo) bbog.set_color(IMP.display.get_display_color(1)) bbog.set_name("out") w.add_geometry(bbog) for i in range(0, sz[0]): for j in range(0, sz[0]): for k in range(0, sz[0]): ei = ExtendedGridIndex3D(i, j, k) gi = g.get_index(ei) bbi = g.get_bounding_box(ei) bbog = IMP.display.BoundingBoxGeometry(bbi) bbog.set_name(str(ei)) w.add_geometry(bbog) cg = IMP.display.PointGeometry(g.get_center(ei)) cg.set_name("center") w.add_geometry(cg) self.assertAlmostEqual(bbo.get_corner(1)[0], 15, delta=.1) def METHOD_NAME(self): """Test mixed log embedding""" eb = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(0, 0, 0), IMP.algebra.Vector3D(1, 1, 1), IMP.algebra.Vector3D(1, 2, 1)) for i in range(0, 10): gi = IMP.algebra.ExtendedGridIndex3D([i, i, i]) center = eb.get_center(gi) print(center) def test_default_embedding_pickle(self): """Test (un-)pickle of DefaultEmbedding3D""" e1 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(1, 2, 3), IMP.algebra.Vector3D(2, 4, 5)) e2 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(4, 5, 6), IMP.algebra.Vector3D(7, 8, 9)) e2.foo = 'bar' dump = pickle.dumps((e1, e2)) newe1, newe2 = pickle.loads(dump) self.assertLess(IMP.algebra.get_distance( e1.get_origin(), newe1.get_origin()), 1e-4) self.assertLess(IMP.algebra.get_distance( e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4) self.assertLess(IMP.algebra.get_distance( e2.get_origin(), newe2.get_origin()), 1e-4) self.assertLess(IMP.algebra.get_distance( e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4) self.assertEqual(newe2.foo, 'bar') self.assertRaises(TypeError, e1._set_from_binary, 42) def test_log_embedding_pickle(self): """Test (un-)pickle of LogEmbedding3D""" e1 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(1, 2, 3), IMP.algebra.Vector3D(2, 4, 5), IMP.algebra.Vector3D(7, 8, 9)) e2 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(4, 5, 6), IMP.algebra.Vector3D(7, 8, 9), IMP.algebra.Vector3D(17, 18, 19)) e2.foo = 'bar' dump = pickle.dumps((e1, e2)) newe1, newe2 = pickle.loads(dump) self.assertLess(IMP.algebra.get_distance( e1.get_origin(), newe1.get_origin()), 1e-4) self.assertLess(IMP.algebra.get_distance( e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4) self.assertLess(IMP.algebra.get_distance( e2.get_origin(), newe2.get_origin()), 1e-4) self.assertLess(IMP.algebra.get_distance( e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4) self.assertEqual(newe2.foo, 'bar') self.assertRaises(TypeError, e1._set_from_binary, 42) def test_grid_index_pickle(self): """Test (un-)pickle of GridIndex3D""" g1 = IMP.algebra.GridIndex3D(1, 2, 3) g2 = IMP.algebra.GridIndex3D(4, 5, 6) g2.foo = 'bar' dump = pickle.dumps((g1, g2)) newg1, newg2 = pickle.loads(dump) self.assertEqual(g1[0], newg1[0]) self.assertEqual(g1[1], newg1[1]) self.assertEqual(g1[2], newg1[2]) self.assertEqual(g2[0], newg2[0]) self.assertEqual(g2[1], newg2[1]) self.assertEqual(g2[2], newg2[2]) self.assertEqual(newg2.foo, 'bar') self.assertRaises(TypeError, g1._set_from_binary, 42) def test_extended_grid_index_pickle(self): """Test (un-)pickle of ExtendedGridIndex3D""" g1 = IMP.algebra.ExtendedGridIndex3D(1, 2, 3) g2 = IMP.algebra.ExtendedGridIndex3D(4, 5, 6) g2.foo = 'bar' dump = pickle.dumps((g1, g2)) newg1, newg2 = pickle.loads(dump) self.assertEqual(g1[0], newg1[0]) self.assertEqual(g1[1], newg1[1]) self.assertEqual(g1[2], newg1[2]) self.assertEqual(g2[0], newg2[0]) self.assertEqual(g2[1], newg2[1]) self.assertEqual(g2[2], newg2[2]) self.assertEqual(newg2.foo, 'bar') self.assertRaises(TypeError, g1._set_from_binary, 42) def test_unbounded_grid_range_pickle(self): """Test (un-)pickle of UnboundedGridRange3D""" g1 = IMP.algebra.UnboundedGridRange3D() g2 = IMP.algebra.UnboundedGridRange3D() g2.foo = 'bar' dump = pickle.dumps((g1, g2)) newg1, newg2 = pickle.loads(dump) self.assertEqual(newg2.foo, 'bar') self.assertRaises(TypeError, g1._set_from_binary, 42) def test_bounded_grid_range_pickle(self): """Test (un-)pickle of BoundedGridRange3D""" g1 = IMP.algebra.BoundedGridRange3D([1, 2, 3]) g2 = IMP.algebra.BoundedGridRange3D([4, 5, 6]) g2.foo = 'bar' dump = pickle.dumps((g1, g2)) newg1, newg2 = pickle.loads(dump) self.assertEqual(g1.get_end_index(), newg1.get_end_index()) self.assertEqual(g2.get_end_index(), newg2.get_end_index()) self.assertEqual(newg2.foo, 'bar') self.assertRaises(TypeError, g1._set_from_binary, 42) if __name__ == '__main__': IMP.test.main()
    null
    354
    # Copyright (c) ZenML GmbH 2023. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. from typing import Any, Optional from unittest.mock import MagicMock, patch from uuid import UUID, uuid4 import pytest from zenml.steps.external_artifact import ExternalArtifact class MockClient: class MockArtifactResponse: def __init__(self, name): self.artifact_store_id = 42 self.name = name self.id = 123 class MockPipelineResponse: def __init__(self): self.last_successful_run = MagicMock() self.last_successful_run.artifacts = [ MockClient.MockArtifactResponse("foo"), MockClient.MockArtifactResponse("bar"), ] def __init__(self, artifact_store_id=42): self.active_stack = MagicMock() self.active_stack.artifact_store.id = artifact_store_id self.active_stack.artifact_store.path = "foo" def get_artifact(self, *args, **kwargs): return MockClient.MockArtifactResponse("foo") def get_pipeline(self, *args, **kwargs): return MockClient.MockPipelineResponse() @pytest.mark.parametrize( argnames="value,id,pipeline_name,artifact_name,exception_start", argvalues=[ [1, None, None, None, ""], [None, uuid4(), None, None, ""], [None, None, "foo", "bar", ""], [None, None, None, None, "Either a value,"], [1, uuid4(), None, None, "Only a value,"], [None, uuid4(), "foo", "bar", "Only a value,"], [1, None, "foo", "bar", "Only a value,"], [None, None, "foo", None, "`pipeline_name` and `artifact_name`"], [None, None, None, "bar", "`pipeline_name` and `artifact_name`"], ], ids=[ "good_by_value", "good_by_id", "good_by_pipeline_artifact", "bad_all_none", "bad_id_and_value", "bad_id_and_pipeline_artifact", "bad_value_and_pipeline_artifact", "bad_only_pipeline", "bad_only_artifact", ], ) def test_external_artifact_init( value: Optional[Any], id: Optional[UUID], pipeline_name: Optional[str], artifact_name: Optional[str], exception_start: str, ): """Tests that initialization logic of `ExternalArtifact` works expectedly.""" if exception_start: with pytest.raises(ValueError, match=exception_start): ExternalArtifact( value=value, id=id, pipeline_name=pipeline_name, artifact_name=artifact_name, ) else: ExternalArtifact( value=value, id=id, pipeline_name=pipeline_name, artifact_name=artifact_name, ) @patch("zenml.steps.external_artifact.Client") @patch("zenml.steps.external_artifact.fileio") @patch("zenml.steps.external_artifact.artifact_utils") def test_upload_if_necessary_by_value( mocked_zenml_client, mocked_fileio, mocked_artifact_utils, ): mocked_fileio.exists.return_value = False ea = ExternalArtifact(value=1) assert ea._id is None ea.upload_if_necessary() assert ea._id is not None assert ea._value is not None assert ea._pipeline_name is None assert ea._artifact_name is None @pytest.mark.skip @patch("zenml.steps.external_artifact.Client") def test_upload_if_necessary_by_id(mocked_zenml_client): mocked_zenml_client.return_value = MockClient() ea = ExternalArtifact(id=123) assert ea._value is None assert ea._pipeline_name is None assert ea._artifact_name is None assert ea._id is not None assert ea.upload_if_necessary() == 123 @patch("zenml.steps.external_artifact.Client") def test_upload_if_necessary_by_pipeline_and_artifact( mocked_zenml_client, ): mocked_zenml_client.return_value = MockClient() ea = ExternalArtifact(pipeline_name="foo", artifact_name="bar") assert ea._value is None assert ea._pipeline_name is not None assert ea._artifact_name is not None assert ea._id is None assert ea.upload_if_necessary() == 123 assert ea._id == 123 @patch("zenml.steps.external_artifact.Client") def test_upload_if_necessary_by_pipeline_and_artifact_other_artifact_store( mocked_zenml_client, ): mocked_zenml_client.return_value = MockClient(artifact_store_id=45) with pytest.raises(RuntimeError, match=r"The artifact bar \(ID: 123\)"): ExternalArtifact( pipeline_name="foo", artifact_name="bar" ).upload_if_necessary() @patch("zenml.steps.external_artifact.Client") def METHOD_NAME( mocked_zenml_client, ): mocked_zenml_client.return_value = MockClient() with pytest.raises(RuntimeError, match="Artifact with name `foobar`"): ExternalArtifact( pipeline_name="foo", artifact_name="foobar" ).upload_if_necessary()
    null
    355
    import numpy as np import pytest import torch from lhotse import AudioSource, CutSet, MultiCut, Recording, SupervisionSegment from lhotse.audio import RecordingSet from lhotse.cut import PaddingCut from lhotse.utils import fastcopy @pytest.fixture def recording(): return Recording.from_file("test/fixtures/libri/libri-1088-134315-0000_8ch.wav") @pytest.fixture def mono_rir(): return Recording.from_file("test/fixtures/rir/sim_1ch.wav") @pytest.fixture def METHOD_NAME(): return Recording.from_file("test/fixtures/rir/real_8ch.wav") @pytest.fixture def cut_with_supervision(recording, cut_channels=None, sup_channels=None): if cut_channels is None: cut_channels = [0, 1, 2, 3, 4, 5, 6, 7] if sup_channels is None: sup_channels = [0, 1, 2, 3, 4, 5, 6, 7] return MultiCut( id="cut", start=0.0, duration=1.0, channel=cut_channels, supervisions=[ SupervisionSegment( id="sup", recording_id="rec", start=0.0, duration=1.0, channel=sup_channels, ) ], recording=recording, ) def test_cut_perturb_speed11(cut_with_supervision): cut_sp = cut_with_supervision.perturb_speed(1.1) assert cut_sp.start == 0.0 assert cut_sp.duration == 0.9090625 assert cut_sp.end == 0.9090625 assert cut_sp.num_samples == 14545 assert cut_sp.recording.duration == 14.5818125 assert cut_sp.recording.num_samples == 233309 assert cut_sp.supervisions[0].start == 0.0 assert cut_sp.supervisions[0].duration == 0.9090625 assert cut_sp.supervisions[0].end == 0.9090625 cut_samples = cut_sp.load_audio() assert cut_samples.shape[0] == 8 assert cut_samples.shape[1] == 14545 recording_samples = cut_sp.recording.load_audio() assert recording_samples.shape[0] == 8 assert recording_samples.shape[1] == 233309 def test_cut_perturb_speed09(cut_with_supervision): cut_sp = cut_with_supervision.perturb_speed(0.9) assert cut_sp.start == 0.0 assert cut_sp.duration == 1.111125 assert cut_sp.end == 1.111125 assert cut_sp.num_samples == 17778 assert cut_sp.recording.duration == 17.82225 assert cut_sp.recording.num_samples == 285156 assert cut_sp.supervisions[0].start == 0.0 assert cut_sp.supervisions[0].duration == 1.111125 assert cut_sp.supervisions[0].end == 1.111125 cut_samples = cut_sp.load_audio() assert cut_samples.shape[0] == 8 assert cut_samples.shape[1] == 17778 recording_samples = cut_sp.recording.load_audio() assert recording_samples.shape[0] == 8 assert recording_samples.shape[1] == 285156 def test_cut_perturb_tempo09(cut_with_supervision): cut_tp = cut_with_supervision.perturb_tempo(0.9) assert cut_tp.start == 0.0 assert cut_tp.duration == 1.111125 assert cut_tp.end == 1.111125 assert cut_tp.num_samples == 17778 assert cut_tp.recording.duration == 17.82225 assert cut_tp.recording.num_samples == 285156 assert cut_tp.supervisions[0].start == 0.0 assert cut_tp.supervisions[0].duration == 1.111125 assert cut_tp.supervisions[0].end == 1.111125 cut_samples = cut_tp.load_audio() assert cut_samples.shape[0] == 8 assert cut_samples.shape[1] == 17778 recording_samples = cut_tp.recording.load_audio() assert recording_samples.shape[0] == 8 assert recording_samples.shape[1] == 285156 def test_cut_perturb_tempo11(cut_with_supervision): cut_tp = cut_with_supervision.perturb_tempo(1.1) assert cut_tp.start == 0.0 assert cut_tp.duration == 0.9090625 assert cut_tp.end == 0.9090625 assert cut_tp.num_samples == 14545 assert cut_tp.recording.duration == 14.5818125 assert cut_tp.recording.num_samples == 233309 assert cut_tp.supervisions[0].start == 0.0 assert cut_tp.supervisions[0].duration == 0.9090625 assert cut_tp.supervisions[0].end == 0.9090625 cut_samples = cut_tp.load_audio() assert cut_samples.shape[0] == 8 assert cut_samples.shape[1] == 14545 recording_samples = cut_tp.recording.load_audio() assert recording_samples.shape[0] == 8 assert recording_samples.shape[1] == 233309 def test_resample_cut(cut_with_supervision): resampled = cut_with_supervision.resample(8000) assert cut_with_supervision.sampling_rate == 16000 assert resampled.sampling_rate == 8000 assert cut_with_supervision.num_samples == 2 * resampled.num_samples samples = resampled.load_audio() assert samples.shape[1] == resampled.num_samples @pytest.mark.parametrize("scale", [0.125, 2.0]) def test_cut_perturb_volume(cut_with_supervision, scale): cut_vp = cut_with_supervision.perturb_volume(scale) assert cut_vp.start == cut_with_supervision.start assert cut_vp.duration == cut_with_supervision.duration assert cut_vp.end == cut_with_supervision.end assert cut_vp.num_samples == cut_with_supervision.num_samples assert cut_vp.recording.duration == cut_with_supervision.recording.duration assert cut_vp.recording.num_samples == cut_with_supervision.recording.num_samples assert cut_vp.supervisions[0].start == cut_with_supervision.supervisions[0].start assert ( cut_vp.supervisions[0].duration == cut_with_supervision.supervisions[0].duration ) assert cut_vp.supervisions[0].end == cut_with_supervision.supervisions[0].end assert cut_vp.load_audio().shape == cut_with_supervision.load_audio().shape assert ( cut_vp.recording.load_audio().shape == cut_with_supervision.recording.load_audio().shape ) np.testing.assert_array_almost_equal( cut_vp.load_audio(), cut_with_supervision.load_audio() * scale ) np.testing.assert_array_almost_equal( cut_vp.recording.load_audio(), cut_with_supervision.recording.load_audio() * scale, ) @pytest.mark.parametrize( "rir, rir_channels, expected_channels", [ ("mono_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]), pytest.param("mono_rir", [1], None, marks=pytest.mark.xfail), ("multi_channel_rir", [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]), ("multi_channel_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]), ("multi_channel_rir", [1], [0, 1, 2, 3, 4, 5, 6, 7]), pytest.param("multi_channel_rir", [0, 1], None, marks=pytest.mark.xfail), ], ) def test_cut_reverb_rir( cut_with_supervision, rir, rir_channels, expected_channels, request ): rir = request.getfixturevalue(rir) cut = cut_with_supervision cut_rvb = cut.reverb_rir(rir, rir_channels=rir_channels) print(cut_rvb.channel) assert cut_rvb.start == cut.start assert cut_rvb.duration == cut.duration assert cut_rvb.end == cut.end assert cut_rvb.num_samples == cut.num_samples assert cut_rvb.recording.duration == cut.recording.duration assert cut_rvb.recording.num_samples == cut.recording.num_samples assert cut_rvb.supervisions[0].start == cut.supervisions[0].start assert cut_rvb.supervisions[0].duration == cut.supervisions[0].duration assert cut_rvb.supervisions[0].end == cut.supervisions[0].end assert cut_rvb.load_audio().shape == cut.load_audio().shape assert cut_rvb.recording.load_audio().shape == cut.recording.load_audio().shape assert cut_rvb.channel == expected_channels def test_cut_reverb_fast_rir(cut_with_supervision): cut = cut_with_supervision with pytest.raises(AssertionError): cut_rvb = cut.reverb_rir(rir_recording=None)
    null
    356
    ################################################################################ # Creme is a free/open-source Customer Relationship Management software # Copyright (C) 2016-2020 Hybird # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ################################################################################ import logging from django.contrib.contenttypes.models import ContentType from django.http import QueryDict from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext from creme.documents import get_document_model from ..forms.mass_import import form_factory, get_header from ..models import MassImportJobResult from ..utils.translation import get_model_verbose_name from .base import JobProgress, JobType logger = logging.getLogger(__name__) class _MassImportType(JobType): id = JobType.generate_id('creme_core', 'mass_import') verbose_name = _('Mass import') def _build_POST(self, job_data): return QueryDict(job_data['POST'].encode('utf8')) def _get_document(self, POST): return get_document_model().objects.get(id=POST['document']) def _get_ctype(self, job_data): return ContentType.objects.get_for_id(job_data['ctype']) def _execute(self, job): job_data = job.data POST = self._build_POST(job_data) doc = self._get_document(POST) header = get_header(doc.filedata, has_header='has_header' in POST) form_class = form_factory(self._get_ctype(job_data), header) form = form_class(user=job.user, data=POST) if not form.is_valid(): # TODO: unit test raise self.Error( gettext('Invalid data [{}]').format(form.errors.as_text()) ) form.process(job) def progress(self, job): count = MassImportJobResult.objects.filter(job=job).count() return JobProgress( percentage=None, label=ngettext( '{count} line has been processed.', '{count} lines have been processed.', count ).format(count=count) ) @property def results_bricks(self): from ..bricks import MassImportJobErrorsBrick return [MassImportJobErrorsBrick()] def METHOD_NAME(self, job): try: job_data = job.data desc = [ gettext('Import «{model}» from {doc}').format( model=self._get_ctype(job_data).model_class()._meta.verbose_name, doc=self._get_document(self._build_POST(job_data)), ), ] except Exception: # TODO: unit test logger.exception('Error in _MassImportType.get_description') desc = ['?'] return desc def get_stats(self, job): stats = [] result_qs = MassImportJobResult.objects.filter(job=job) lines_count = result_qs.count() entity_result_qs = result_qs.filter(entity__isnull=False) created_count = entity_result_qs.filter(updated=False).count() updated_count = entity_result_qs.filter(updated=True).count() model = self._get_ctype(job.data).model_class() if created_count: stats.append( ngettext( '{count} «{model}» has been created.', '{count} «{model}» have been created.', created_count ).format( count=created_count, model=get_model_verbose_name(model, created_count), ) ) elif updated_count != lines_count: stats.append( gettext('No «{model}» has been created.').format( model=model._meta.verbose_name, ) ) if updated_count: stats.append( ngettext( '{count} «{model}» has been updated.', '{count} «{model}» have been updated.', updated_count ).format( count=updated_count, model=get_model_verbose_name(model, updated_count), ) ) elif created_count != lines_count: stats.append( gettext('No «{model}» has been updated.').format( model=model._meta.verbose_name, ) ) stats.append( ngettext( '{count} line in the file.', '{count} lines in the file.', lines_count, ).format(count=lines_count) ) return stats mass_import_type = _MassImportType()
    null
    357
    from __future__ import absolute_import from six.moves import xrange from argparse import ArgumentParser import os import logging import random from toil.common import Toil from toil.job import Job def setup(job, input_file_id, n, down_checkpoints): """Sets up the sort. Returns the FileID of the sorted file """ # Write the input file to the file store job.fileStore.logToMaster("Starting the merge sort") return job.addChildJobFn(down, input_file_id, n, down_checkpoints=down_checkpoints, memory='600M').rv() def down(job, input_file_id, n, down_checkpoints): """Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output. """ # Read the file input_file = job.fileStore.readGlobalFile(input_file_id, cache=False) length = os.path.getsize(input_file) if length > n: # We will subdivide the file job.fileStore.logToMaster("Splitting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Split the file into two copies mid_point = get_midpoint(input_file, 0, length) t1 = job.fileStore.getLocalTempFile() with open(t1, 'w') as fH: copy_subrange_of_file(input_file, 0, mid_point + 1, fH) t2 = job.fileStore.getLocalTempFile() with open(t2, 'w') as fH: copy_subrange_of_file(input_file, mid_point + 1, length, fH) # Call the down function recursively return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() else: # We can sort this bit of the file job.fileStore.logToMaster("Sorting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Sort the copy and write back to the fileStore output_file = job.fileStore.getLocalTempFile() sort(input_file, output_file) return job.fileStore.writeGlobalFile(output_file) def up(job, input_file_id_1, input_file_id_2): """Merges the two files and places them in the output. """ with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster("Merging %s and %s to %s" % (input_file_id_1, input_file_id_2, output_id)) METHOD_NAME(inputFileHandle1, inputFileHandle2, fileHandle) # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id # convenience functions def sort(in_file, out_file): """Sorts the given file. """ filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close() def METHOD_NAME(filehandle_1, filehandle_2, output_filehandle): """Merges together two files maintaining sorted order. """ line2 = filehandle_2.readline() for line1 in filehandle_1.readlines(): while line2 != '' and line2 <= line1: output_filehandle.write(line2) line2 = filehandle_2.readline() output_filehandle.write(line1) while line2 != '': output_filehandle.write(line2) line2 = filehandle_2.readline() def copy_subrange_of_file(input_file, file_start, file_end, output_filehandle): """Copies the range (in bytes) between fileStart and fileEnd to the given output file handle. """ with open(input_file, 'r') as fileHandle: fileHandle.seek(file_start) data = fileHandle.read(file_end - file_start) assert len(data) == file_end - file_start output_filehandle.write(data) def get_midpoint(file, file_start, file_end): """Finds the point in the file to split. Returns an int i such that fileStart <= i < fileEnd """ filehandle = open(file, 'r') mid_point = (file_start + file_end) / 2 assert mid_point >= file_start filehandle.seek(mid_point) line = filehandle.readline() assert len(line) >= 1 if len(line) + mid_point < file_end: return mid_point + len(line) - 1 filehandle.seek(file_start) line = filehandle.readline() assert len(line) >= 1 assert len(line) + file_start <= file_end return len(line) + file_start - 1 def make_file_to_sort(file_name, lines, line_length): with open(file_name, 'w') as fileHandle: for _ in xrange(lines): line = "".join(random.choice('actgACTGNXYZ') for _ in xrange(line_length - 1)) + '\n' fileHandle.write(line) def main(): parser = ArgumentParser() Job.Runner.addToilOptions(parser) parser.add_argument('--num-lines', default=1000, help='Number of lines in file to sort.', type=int) parser.add_argument('--line-length', default=50, help='Length of lines in file to sort.', type=int) parser.add_argument("--N", help="The threshold below which a serial sort function is used to sort file. " "All lines must of length less than or equal to N or program will fail", default=10000) options = parser.parse_args() if int(options.N) <= 0: raise RuntimeError("Invalid value of N: %s" % options.N) file_name = 'file_to_sort.txt' make_file_to_sort(file_name=file_name, lines=options.num_lines, line_length=options.line_length) with Toil(options) as toil: sort_file_url = 'file://' + os.path.abspath('file_to_sort.txt') if not toil.options.restart: sort_file_id = toil.importFile(sort_file_url) sorted_file_id = toil.start(Job.wrapJobFn(setup, sort_file_id, int(options.N), False, memory='600M')) else: sorted_file_id = toil.restart() toil.exportFile(sorted_file_id, sort_file_url) if __name__ == '__main__': main()
    null
    358
    from __future__ import unicode_literals from rest_framework import generics from rest_framework import permissions from rest_framework.exceptions import NotFound from api.actions.serializers import PreprintRequestActionSerializer from api.base.views import JSONAPIBaseView from api.base import permissions as base_permissions from api.base.filters import ListFilterMixin from api.base.utils import get_object_or_error from api.requests.permissions import NodeRequestPermission, PreprintRequestPermission from api.requests.serializers import NodeRequestSerializer, PreprintRequestSerializer from framework.auth.oauth_scopes import CoreScopes from osf.models import Node, NodeRequest, PreprintRequest, Preprint class RequestMixin(object): serializer_class = None request_class = None request_display_name = None target_class = None target_display_name = None target_lookup_url_kwarg = None request_lookup_url_kwarg = None def __get_object(self, object_class, lookup_arg, display_name, check_object_permissions=True): obj = get_object_or_error( object_class, self.kwargs[lookup_arg], self.request, display_name=display_name, ) # May raise a permission denied if check_object_permissions: self.check_object_permissions(self.request, obj) return obj def get_request(self, check_object_permissions=True): return self.__get_object(self.request_class, self.request_lookup_url_kwarg, self.request_display_name, check_object_permissions=check_object_permissions) def get_target(self, check_object_permissions=True): return self.__get_object(self.target_class, self.target_lookup_url_kwarg, self.target_display_name, check_object_permissions=check_object_permissions) class NodeRequestMixin(RequestMixin): serializer_class = NodeRequestSerializer request_class = NodeRequest request_display_name = 'node request' target_class = Node target_display_name = 'node' target_lookup_url_kwarg = 'node_id' request_lookup_url_kwarg = 'request_id' class PreprintRequestMixin(RequestMixin): serializer_class = PreprintRequestSerializer request_class = PreprintRequest request_display_name = 'preprint request' target_class = Preprint target_display_name = 'preprint' target_lookup_url_kwarg = 'preprint_id' request_lookup_url_kwarg = 'request_id' class RequestDetail(JSONAPIBaseView, generics.RetrieveAPIView): permission_classes = ( permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.ALWAYS_PUBLIC] # Actual scope checks are done on subview.as_view required_write_scopes = [CoreScopes.NULL] view_category = 'requests' view_name = 'request-detail' def get(self, request, *args, **kwargs): request_id = self.kwargs['request_id'] if NodeRequest.objects.filter(_id=request_id).exists(): return NodeRequestDetail.as_view()(request._request, *args, **kwargs) elif PreprintRequest.objects.filter(_id=request_id).exists(): return PreprintRequestDetail.as_view()(request._request, *args, **kwargs) else: raise NotFound class NodeRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeRequestMixin): permission_classes = ( permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, NodeRequestPermission, ) required_read_scopes = [CoreScopes.NODE_REQUESTS_READ] required_write_scopes = [CoreScopes.NULL] serializer_class = NodeRequestSerializer view_category = 'requests' view_name = 'node-request-detail' def get_object(self): return self.get_request() class PreprintRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintRequestMixin): permission_classes = ( permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, PreprintRequestPermission, ) required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ] required_write_scopes = [CoreScopes.NULL] serializer_class = PreprintRequestSerializer view_category = 'requests' view_name = 'preprint-request-detail' def get_object(self): return self.get_request() class RequestActionList(JSONAPIBaseView, generics.ListAPIView): permission_classes = ( permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.ACTIONS_READ] required_write_scopes = [CoreScopes.NULL] view_category = 'requests' view_name = 'request-action-list' def get(self, request, *args, **kwargs): request_id = self.kwargs['request_id'] if PreprintRequest.objects.filter(_id=request_id).exists(): return PreprintRequestActionList.as_view()(request._request, *args, **kwargs) else: raise NotFound class PreprintRequestActionList(JSONAPIBaseView, generics.ListAPIView, PreprintRequestMixin, ListFilterMixin): permission_classes = ( permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, PreprintRequestPermission, ) required_read_scopes = [CoreScopes.ACTIONS_READ] required_write_scopes = [CoreScopes.NULL] serializer_class = PreprintRequestActionSerializer view_category = 'requests' view_name = 'preprint-request-action-list' # supports MustBeModerator def METHOD_NAME(self): request_id = self.kwargs['request_id'] preprint_request = PreprintRequest.load(request_id) if preprint_request: return preprint_request.target.provider raise NotFound # overrides ListFilterMixin def get_default_queryset(self): return self.get_request().actions.order_by('-created').all() # overrides ListAPIView def get_queryset(self): return self.get_queryset_from_request()
    null
    359
    import hashlib import math import operator import re from collections import OrderedDict from typing import Dict, Iterable, List, Optional, Set from click import UsageError from pygitguardian.models import Match, PolicyBreak, ScanResult from ggshield.core.types import IgnoredMatch REGEX_MATCH_HIDE = re.compile(r"[^+\-\s]") REGEX_SPECIAL_CHARS = set(".^$+*?{}()[]\\|") INVALID_PATTERNS_REGEX = re.compile( r"(\*\*\*)" # the "***" sequence is not valid r"|(\*\*[^/])" # a "**" sequence must be immediately followed by a "/" r"|([^/]\*\*)" # a "**" sequence must be either at the start of the string or # immediately preceded by a "/" ) MAXIMUM_CENSOR_LENGTH = 60 def is_ignored( policy_break: PolicyBreak, matches_ignore: Iterable[IgnoredMatch], ) -> bool: """ is_ignored checks if a occurrence is ignored. There are 2 ways of ignoring a occurrence: - matching the occurrence sha - matching one of the match.match values :param policy_break: Policy Break occurrence to judge :param matches_ignore: Iterable of match ignores :return: True if ignored """ matches = [match.match for match in matches_ignore] if policy_break.policy.lower() != "secrets detection": return True if get_ignore_sha(policy_break) in matches or any( match.match in matches for match in policy_break.matches ): return True return False def remove_ignored_from_result( scan_result: ScanResult, matches_ignore: Iterable[IgnoredMatch] ) -> None: """ remove_ignored removes occurrences from a Scan Result based on a sha made from its matches. :param scan_result: ScanResult to filter :param matches_ignore: match SHAs or plaintext matches to filter out """ scan_result.policy_breaks = [ policy_break for policy_break in scan_result.policy_breaks if not is_ignored(policy_break, matches_ignore) ] scan_result.policy_break_count = len(scan_result.policy_breaks) def remove_results_from_ignore_detectors( scan_result: ScanResult, ignored_detectors: Optional[Set[str]] = None, ) -> None: if not ignored_detectors: return scan_result.policy_breaks = [ policy_break for policy_break in scan_result.policy_breaks if policy_break.break_type not in ignored_detectors ] scan_result.policy_break_count = len(scan_result.policy_breaks) def get_ignore_sha(policy_break: PolicyBreak) -> str: hashable = "".join( [ f"{match.match},{match.match_type}" for match in sorted( policy_break.matches, key=operator.attrgetter("match_type") ) ] ) return hashlib.sha256(hashable.encode("UTF-8")).hexdigest() def leak_dictionary_by_ignore_sha( policy_breaks: List[PolicyBreak], ) -> Dict[str, List[PolicyBreak]]: """ leak_dictionary_by_ignore_sha sorts matches and incidents by first appearance in file. sort incidents by first appearance on file, file wide matches have no index so give it -1 so they get bumped to the top :return: Dictionary with line number as index and a list of matches that start on said line. """ policy_breaks.sort( key=lambda x: min( # type: ignore match.index_start if match.index_start else -1 for match in x.matches ) ) sha_dict: Dict[str, List[PolicyBreak]] = OrderedDict() for policy_break in policy_breaks: policy_break.matches.sort(key=lambda x: x.index_start if x.index_start else -1) ignore_sha = get_ignore_sha(policy_break) sha_dict.setdefault(ignore_sha, []).append(policy_break) return sha_dict def translate_user_pattern(pattern: str) -> str: """ Translate the user pattern into a regex. This function assumes that the given pattern is valid and has been normalized beforehand. """ # Escape each special character pattern = "".join( f"\\{char}" if char in REGEX_SPECIAL_CHARS else char for char in pattern ) # Handle start/end of pattern if pattern[-1] != "/": pattern += "$" if pattern[0] == "/": pattern = "^" + pattern[1:] else: pattern = "(^|/)" + pattern # Replace * and ** sequences pattern = re.sub(r"\\\*\\\*/", "([^/]+/)*", pattern) pattern = re.sub(r"\\\*", "([^/]+)", pattern) return pattern def METHOD_NAME(pattern: str) -> bool: return bool(pattern) and not INVALID_PATTERNS_REGEX.search(pattern) def init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]: """ filter_set creates a set of paths of the ignored entries from 3 sources: .gitguardian.yaml files in .git files ignore in .gitignore """ res = set() for path in paths_ignore: if not METHOD_NAME(path): raise UsageError(f"{path} is not a valid exclude pattern.") res.add(re.compile(translate_user_pattern(path))) return res def censor_string(text: str) -> str: """ Censor a string (usually a secret), revealing only the first and last 1/6th of the match up to a maximum of MAXIMUM_CENSOR_LENGTH. :return: the text censored """ len_match = len(text) start_privy_len = min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH) end_privy_len = len_match - min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH) censored = REGEX_MATCH_HIDE.sub("*", text) return str( text[:start_privy_len] + censored[start_privy_len:end_privy_len] + text[end_privy_len:] ) def censor_match(match: Match) -> str: return censor_string(match.match) def censor_content(content: str, policy_breaks: List[PolicyBreak]) -> str: for policy_break in policy_breaks: for match in policy_break.matches: if match.index_start is None: continue match.match = censor_match(match) content = "".join( ( content[: match.index_start], match.match, content[len(match.match) + match.index_start :], ) ) return content
    null
    360
    from datetime import datetime from django.db import models from django.utils import timezone from elasticsearch.exceptions import NotFoundError import pytz class MetricMixin(object): @classmethod def _get_all_indices(cls): all_aliases = cls._index.get_alias() indices = set() for index, aliases in all_aliases.items(): indices.add(index) if aliases['aliases']: for alias in aliases['aliases'].keys(): indices.add(alias) return indices @classmethod def _get_relevant_indices(cls, after, before): # NOTE: This will only work for yearly indices. This logic # will need to be updated if we change to monthly or daily indices if before and after: year_range = range(after.year, before.year + 1) elif after: year_range = range(after.year, timezone.now().year + 1) else: # No metric data from before 2013 year_range = range(2013, before.year + 1) all_indices = cls._get_all_indices() relevant_indices = [ # get_index_name takes a datetime, so get Jan 1 for each relevant year cls.get_index_name(datetime(year, 1, 1, tzinfo=pytz.utc)) for year in year_range ] return [index for index in relevant_indices if index in all_indices] @classmethod def _get_id_to_count(cls, size, metric_field, count_field, after=None, before=None): """Performs the elasticsearch aggregation for get_top_by_count. Return a dict mapping ids to summed counts. If there's no data in the ES index, return None. """ search = cls.search(after=after, before=before) timestamp = {} if after: timestamp['gte'] = after if before: timestamp['lt'] = before if timestamp: search = search.filter('range', timestamp=timestamp) search.aggs.\ bucket('by_id', 'terms', field=metric_field, size=size, order={'sum_count': 'desc'}).\ metric('sum_count', 'sum', field=count_field) # Optimization: set size to 0 so that hits aren't returned (we only care about the aggregation) search = search.extra(size=0) try: response = search.execute() except NotFoundError: # _get_relevant_indices returned 1 or more indices # that doesn't exist. Fall back to unoptimized query search = search.index().index(cls._default_index()) response = search.execute() # No indexed data if not hasattr(response.aggregations, 'by_id'): return None buckets = response.aggregations.by_id.buckets # Map _id => count return { bucket.key: int(bucket.sum_count.value) for bucket in buckets } # Overrides Document.search to only search relevant # indices, determined from `after` @classmethod def search(cls, using=None, index=None, after=None, before=None, *args, **kwargs): if not index and (before or after): indices = cls._get_relevant_indices(after, before) index = ','.join(indices) return super(MetricMixin, cls).search(using=using, index=index, *args, **kwargs) @classmethod def METHOD_NAME(cls, qs, model_field, metric_field, size, order_by=None, count_field='count', annotation='metric_count', after=None, before=None): """Return a queryset annotated with the metric counts for each item. Example: :: # Get the top 10 PreprintProviders by download count top_providers = PreprintDownload.get_top_by_count( qs=PreprintProvider.objects.all(), model_field='_id', metric_field='provider_id', annotation='download_count', size=10 ) for each in top_providers: print('{}: {}'.format(each._id, each.download_count)) ``size`` determines the number of buckets returned by the aggregation. If ``size=None``, the size of the queryset is used. WARNING: Be careful when using size=None when using a large queryset. :param QuerySet qs: The initial queryset to annotate :param str model_field: Model field that corresponds to ``metric_field``. :param str metric_field: Metric field that corresponds to ``model_field``. :param int size: Size of the aggregation. Also determines the size of the final queryset. :param str order_by: Field to order queryset by. If `None`, orders by the metric, descending. :param datetime after: Minimum datetime to narrow the search (inclusive). :param datetime before: Maximum datetime to narrow the search (exclusive). :param str count_field: Name of the field where count values are stored. :param str annotation: Name of the annotation. """ id_to_count = cls._get_id_to_count( size=size or qs.count(), metric_field=metric_field, count_field=count_field, after=after, before=before ) if id_to_count is None: return qs.annotate(**{annotation: models.Value(0, models.IntegerField())}) # Annotate the queryset with the counts for each id # https://stackoverflow.com/a/48187723/1157536 whens = [ models.When(**{ model_field: k, 'then': v, }) for k, v in id_to_count.items() ] # By default order by annotation, desc order_by = order_by or '-{}'.format(annotation) return qs.annotate(**{ annotation: models.Case(*whens, default=0, output_field=models.IntegerField()) }).order_by(order_by)
    null
    361
    from methods.regular.regular_api import * from methods.task.task_template.task_template_launch_handler import TaskTemplateLauncherThread from methods.sync_events.sync_actions_handler import SyncActionsHandlerThread from methods.action.action_flow_trigger_queue import ActionFlowTriggerQueueProcess from shared.ingest.packet import enqueue_packet @routes.route('/api/walrus/v1/interservice/receive', methods = ['POST']) def METHOD_NAME(): """ Inter-Service route to notify of new job launch For now relies on inter_service_security_token for permissions... This is just a starting point for more generic inter service notification Pros/Cons to having DB as intermediary point there, fo now this is fairly light weight. Once we have a good pattern here, eg retry/overflow handling, can probably remove polling / thread """ spec_list = [{"inter_service_security_token": { 'kind': str, 'required': True, 'security_token': settings.INTER_SERVICE_SECRET } }, {"message": { 'kind': str, 'required': True } }, {"id": { # or "base_class_id"? 'kind': int, 'required': False, 'default': None } }, {"extra_params": { 'kind': dict, 'required': False, 'default': None } }, {"base_class_string": { 'kind': str, 'required': False, 'default': None } }, {"project_string_id": { 'kind': str, 'required': False, 'default': None } } # Serialized object maybe? ] log, input_from_request, untrusted_input = regular_input.master(request = request, spec_list = spec_list) if len(log["error"].keys()) >= 1: return jsonify(log = log), 400 logger.info("Received valid inter service request") with sessionMaker.session_scope() as session: # CAUTIONS # Generally assumes any calls here are non blocking # So as to reasonably return # eg 1) Condition on message then some_launcher(event_id = input['id']) # Or 2) if we want object here for some reason, something like: # if input['base_class_string']: # base_object = getattr(sys.modules[__name__], input['base_class_string']).get_by_id( # id = input['id'], # session = session) if input_from_request['message'] == 'new_job_launch_queue_item': job_launcher_thread = TaskTemplateLauncherThread(run_once = True) log['info']['job_launcher_thread'] = True if input_from_request['message'] == 'new_sync_action_item': sync_action_thread = SyncActionsHandlerThread(run_once = True) log['info']['job_launcher_thread'] = True if input_from_request['message'] == 'new_action_flow_queue_item': num_flows = ActionFlowTriggerQueueProcess.try_to_enqueue_new_action_flows( session = session, event_id = input_from_request['id'], commit_per_element = True) for i in range(0, num_flows): action_flow_thread = ActionFlowTriggerQueueProcess(run_once = True) if input_from_request['message'] == 'file_copy': enqueue_packet(project_string_id = input_from_request.get('project_string_id'), session = session, media_url = None, media_type = input_from_request['extra_params'].get('type'), directory_id = input_from_request['extra_params'].get('destination_working_dir_id'), source_directory_id = input_from_request['extra_params'].get('source_working_dir_id'), remove_link = input_from_request['extra_params'].get('remove_link'), add_link = input_from_request['extra_params'].get('add_link'), copy_instance_list = input_from_request['extra_params'].get('copy_instance_list'), job_id = None, batch_id = input_from_request['extra_params'].get('batch_id'), file_id = input_from_request['id'], instance_list = [], video_parent_length = input_from_request['extra_params'].get('frame_count'), task_id = None, mode = 'copy_file', commit_input = True) log['success'] = True return jsonify(log = log), 200
    null
    362
    # coding=utf-8 # Copyright 2018-2023 EvaDB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil from pathlib import Path import pandas as pd from evadb.catalog.models.table_catalog import TableCatalogEntry from evadb.database import EvaDBDatabase from evadb.models.storage.batch import Batch from evadb.parser.table_ref import TableInfo from evadb.storage.abstract_storage_engine import AbstractStorageEngine from evadb.storage.sqlite_storage_engine import SQLStorageEngine from evadb.utils.logging_manager import logger class AbstractMediaStorageEngine(AbstractStorageEngine): def __init__(self, db: EvaDBDatabase): super().__init__(db) self._rdb_handler: SQLStorageEngine = SQLStorageEngine(db) def METHOD_NAME(self, table: TableCatalogEntry): return self.db.catalog().get_multimedia_metadata_table_catalog_entry(table) def _create_metadata_table(self, table: TableCatalogEntry): return ( self.db.catalog().create_and_insert_multimedia_metadata_table_catalog_entry( table ) ) def _xform_file_url_to_file_name(self, file_url: Path) -> str: # Convert media_path to file name. This is done to support duplicate media_names with # different complete paths. Without conversion, we cannot copy files with same name but # different paths. Eg., a/b/my.mp4 and a/b/c/my.mp4. # xformed_file_name = zlib.crc32(str(file_url).encode("utf-8")) & 0xFFFFFFFF # return str(xformed_file_name) # Previous approach with hashing is commented out above. Since we now use symbolic link, the only # thing we need to worry about is the same file name under different directory. This motivates us # to just breakdown directory also as part of file name. Additionally, it does not use hashing, # which avoids computation overhead. file_path_str = str(file_url) file_path = re.sub(r"[^a-zA-Z0-9 \.\n]", "_", file_path_str) return file_path def create(self, table: TableCatalogEntry, if_not_exists=True): """ Create the directory to store the images. Create a sqlite table to persist the file urls """ dir_path = Path(table.file_url) try: dir_path.mkdir(parents=True) except FileExistsError: if if_not_exists: return True error = "Failed to load the image as directory \ already exists: {}".format( dir_path ) logger.error(error) raise FileExistsError(error) self._rdb_handler.create(self._create_metadata_table(table)) return True def drop(self, table: TableCatalogEntry): try: dir_path = Path(table.file_url) shutil.rmtree(str(dir_path)) metadata_table = self.METHOD_NAME(table) self._rdb_handler.drop(metadata_table) # remove the metadata table from the catalog self.db.catalog().delete_table_catalog_entry(metadata_table) except Exception as e: err_msg = f"Failed to drop the image table {e}" logger.exception(err_msg) raise Exception(err_msg) def delete(self, table: TableCatalogEntry, rows: Batch): try: media_metadata_table = self.METHOD_NAME(table) for media_file_path in rows.file_paths(): dst_file_name = self._xform_file_url_to_file_name(Path(media_file_path)) image_file = Path(table.file_url) / dst_file_name self._rdb_handler.delete( media_metadata_table, where_clause={ media_metadata_table.identifier_column: str(media_file_path) }, ) image_file.unlink() except Exception as e: error = f"Deleting file path {media_file_path} failed with exception {e}" logger.exception(error) raise RuntimeError(error) return True def write(self, table: TableCatalogEntry, rows: Batch): try: dir_path = Path(table.file_url) copied_files = [] for media_file_path in rows.file_paths(): media_file = Path(media_file_path) dst_file_name = self._xform_file_url_to_file_name(media_file) dst_path = dir_path / dst_file_name if dst_path.exists(): raise FileExistsError( f"Duplicate File: {media_file} already exists in the table {table.name}" ) src_path = Path.cwd() / media_file os.symlink(src_path, dst_path) copied_files.append(dst_path) # assuming sql write is an atomic operation self._rdb_handler.write( self.METHOD_NAME(table), Batch(pd.DataFrame({"file_url": list(rows.file_paths())})), ) except Exception as e: # delete the copied_files for file in copied_files: logger.info(f"Rollback file {file}") file.unlink() logger.exception(str(e)) raise RuntimeError(str(e)) else: return True def rename(self, old_table: TableCatalogEntry, new_name: TableInfo): try: self.db.catalog().rename_table_catalog_entry(old_table, new_name) except Exception as e: raise Exception(f"Failed to rename table {new_name} with exception {e}")
    null
    363
    from typing import Iterable from typing import Optional from typing import Union from .request import Request from .response import Response class CORSMiddleware(object): """CORS Middleware. This middleware provides a simple out-of-the box CORS policy, including handling of preflighted requests from the browser. See also: * https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS * https://www.w3.org/TR/cors/#resource-processing-model Keyword Arguments: allow_origins (Union[str, Iterable[str]]): List of origins to allow (case sensitive). The string ``'*'`` acts as a wildcard, matching every origin. (default ``'*'``). expose_headers (Optional[Union[str, Iterable[str]]]): List of additional response headers to expose via the ``Access-Control-Expose-Headers`` header. These headers are in addition to the CORS-safelisted ones: ``Cache-Control``, ``Content-Language``, ``Content-Length``, ``Content-Type``, ``Expires``, ``Last-Modified``, ``Pragma``. (default ``None``). See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers allow_credentials (Optional[Union[str, Iterable[str]]]): List of origins (case sensitive) for which to allow credentials via the ``Access-Control-Allow-Credentials`` header. The string ``'*'`` acts as a wildcard, matching every allowed origin, while ``None`` disallows all origins. This parameter takes effect only if the origin is allowed by the ``allow_origins`` argument. (Default ``None``). """ def __init__( self, allow_origins: Union[str, Iterable[str]] = '*', expose_headers: Optional[Union[str, Iterable[str]]] = None, allow_credentials: Optional[Union[str, Iterable[str]]] = None, ): if allow_origins == '*': self.allow_origins = allow_origins else: if isinstance(allow_origins, str): allow_origins = [allow_origins] self.allow_origins = frozenset(allow_origins) if '*' in self.allow_origins: raise ValueError( 'The wildcard string "*" may only be passed to allow_origins as a ' 'string literal, not inside an iterable.' ) if expose_headers is not None and not isinstance(expose_headers, str): expose_headers = ', '.join(expose_headers) self.expose_headers = expose_headers if allow_credentials is None: allow_credentials = frozenset() elif allow_credentials != '*': if isinstance(allow_credentials, str): allow_credentials = [allow_credentials] allow_credentials = frozenset(allow_credentials) if '*' in allow_credentials: raise ValueError( 'The wildcard string "*" may only be passed to allow_credentials ' 'as a string literal, not inside an iterable.' ) self.allow_credentials = allow_credentials def process_response(self, req: Request, resp: Response, resource, req_succeeded): """Implement the CORS policy for all routes. This middleware provides a simple out-of-the box CORS policy, including handling of preflighted requests from the browser. See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS See also: https://www.w3.org/TR/cors/#resource-processing-model """ origin = req.get_header('Origin') if origin is None: return if self.allow_origins != '*' and origin not in self.allow_origins: return if resp.get_header('Access-Control-Allow-Origin') is None: set_origin = '*' if self.allow_origins == '*' else origin if self.allow_credentials == '*' or origin in self.allow_credentials: set_origin = origin resp.set_header('Access-Control-Allow-Credentials', 'true') resp.set_header('Access-Control-Allow-Origin', set_origin) if self.expose_headers: resp.set_header('Access-Control-Expose-Headers', self.expose_headers) if ( req_succeeded and req.method == 'OPTIONS' and req.get_header('Access-Control-Request-Method') ): # NOTE(kgriffs): This is a CORS preflight request. Patch the # response accordingly. allow = resp.get_header('Allow') resp.delete_header('Allow') allow_headers = req.get_header( 'Access-Control-Request-Headers', default='*' ) resp.set_header('Access-Control-Allow-Methods', allow) resp.set_header('Access-Control-Allow-Headers', allow_headers) resp.set_header('Access-Control-Max-Age', '86400') # 24 hours async def METHOD_NAME(self, *args): self.process_response(*args)
    null
    364
    # Copyright (c) ZenML GmbH 2022. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """Local ZenML server deployment.""" import logging import sys from typing import TYPE_CHECKING, Optional from tests.harness.deployment.base import ( LOCAL_ZENML_SERVER_DEFAULT_PORT, BaseTestDeployment, ) from tests.harness.deployment.local_default import LocalDefaultTestDeployment from tests.harness.model import ( DeploymentConfig, DeploymentSetup, DeploymentStoreConfig, DeploymentType, ) if TYPE_CHECKING: from zenml.zen_server.deploy.deployment import ServerDeployment class ServerLocalTestDeployment(BaseTestDeployment): """A deployment that runs a ZenML server as a background process.""" def __init__(self, config: DeploymentConfig) -> None: """Initializes a local ZenML server deployment. Args: config: The configuration for the deployment. """ super().__init__(config) # The server local deployment is built on top of a local default # deployment because the server is provisioned through the client self.default_deployment = LocalDefaultTestDeployment(config) @property def server(self) -> Optional["ServerDeployment"]: """Returns the ZenML server corresponding to this configuration. Returns: The server for the deployment if it exists, None otherwise. """ from zenml.enums import ServerProviderType from zenml.zen_server.deploy.deployer import ServerDeployer # Managing the local server deployment is done through a default # local deployment with the same config. with self.default_deployment.connect(): deployer = ServerDeployer() servers = deployer.list_servers( provider_type=ServerProviderType.LOCAL ) if not servers: return None return servers[0] @property def is_running(self) -> bool: """Returns whether the ZenML server is running. Returns: True if the server is running, False otherwise. """ server = self.server if server is not None and server.is_running: return True return False def up(self) -> None: """Starts the ZenML deployment. Raises: RuntimeError: If the deployment is not supported on the host OS. """ from zenml.enums import ServerProviderType from zenml.utils.networking_utils import scan_for_available_port from zenml.zen_server.deploy.deployer import ServerDeployer from zenml.zen_server.deploy.deployment import ServerDeploymentConfig if sys.platform == "win32": raise RuntimeError( "Running the ZenML server locally as a background process is " "not supported on Windows." ) else: pass if self.is_running: logging.info( f"Deployment '{self.config.name}' is already running. " f"Skipping provisioning." ) return self.default_deployment.up() # Managing the local server deployment is done through the default # deployment with the same config. with self.default_deployment.connect(): port = scan_for_available_port(LOCAL_ZENML_SERVER_DEFAULT_PORT) if port is None: raise RuntimeError( "Could not find an available port for the ZenML server." ) deployer = ServerDeployer() server_config = ServerDeploymentConfig( name=self.config.name, provider=ServerProviderType.LOCAL, port=port, ) deployer.deploy_server(server_config) logging.info( f"Started ZenML server for deployment '{self.config.name}'." ) def down(self) -> None: """Stops the ZenML deployment.""" from zenml.zen_server.deploy.deployer import ServerDeployer server = self.server if server is None: logging.info( f"Deployment '{self.config.name}' is no longer running. " ) return # Managing the local server deployment is done through the default # deployment with the same config. with self.default_deployment.connect(): deployer = ServerDeployer() deployer.remove_server(server.config.name) self.default_deployment.down() def METHOD_NAME(self) -> Optional[DeploymentStoreConfig]: """Returns the store config for the deployment. Returns: The store config for the deployment if it is running, None otherwise. Raises: RuntimeError: If the deployment is not running. """ from zenml.zen_stores.base_zen_store import ( DEFAULT_PASSWORD, DEFAULT_USERNAME, ) if not self.is_running: raise RuntimeError( f"The '{self.config.name}' deployment is not running." ) server = self.server if ( server is None or server.status is None or server.status.url is None ): raise RuntimeError( f"The '{self.config.name}' deployment is not running." ) return DeploymentStoreConfig( url=server.status.url, username=DEFAULT_USERNAME, password=DEFAULT_PASSWORD, ) ServerLocalTestDeployment.register_deployment_class( type=DeploymentType.SERVER, setup=DeploymentSetup.DEFAULT )
    null
    365
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkretailcloud.endpoint import endpoint_data class CreateClusterRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'CreateCluster') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_BusinessCode(self): # String return self.get_query_params().get('BusinessCode') def set_BusinessCode(self, BusinessCode): # String self.add_query_param('BusinessCode', BusinessCode) def get_CreateWithLogIntegration(self): # Boolean return self.get_query_params().get('CreateWithLogIntegration') def set_CreateWithLogIntegration(self, CreateWithLogIntegration): # Boolean self.add_query_param('CreateWithLogIntegration', CreateWithLogIntegration) def get_Vswitchidss(self): # RepeatList return self.get_query_params().get('Vswitchids') def set_Vswitchidss(self, Vswitchids): # RepeatList for depth1 in range(len(Vswitchids)): self.add_query_param('Vswitchids.' + str(depth1 + 1), Vswitchids[depth1]) def get_CloudMonitorFlags(self): # Integer return self.get_query_params().get('CloudMonitorFlags') def set_CloudMonitorFlags(self, CloudMonitorFlags): # Integer self.add_query_param('CloudMonitorFlags', CloudMonitorFlags) def get_ClusterEnvType(self): # String return self.get_query_params().get('ClusterEnvType') def set_ClusterEnvType(self, ClusterEnvType): # String self.add_query_param('ClusterEnvType', ClusterEnvType) def METHOD_NAME(self): # Boolean return self.get_query_params().get('CreateWithArmsIntegration') def set_CreateWithArmsIntegration(self, CreateWithArmsIntegration): # Boolean self.add_query_param('CreateWithArmsIntegration', CreateWithArmsIntegration) def get_KeyPair(self): # String return self.get_query_params().get('KeyPair') def set_KeyPair(self, KeyPair): # String self.add_query_param('KeyPair', KeyPair) def get_ClusterTitle(self): # String return self.get_query_params().get('ClusterTitle') def set_ClusterTitle(self, ClusterTitle): # String self.add_query_param('ClusterTitle', ClusterTitle) def get_PodCIDR(self): # String return self.get_query_params().get('PodCIDR') def set_PodCIDR(self, PodCIDR): # String self.add_query_param('PodCIDR', PodCIDR) def get_ClusterId(self): # Long return self.get_query_params().get('ClusterId') def set_ClusterId(self, ClusterId): # Long self.add_query_param('ClusterId', ClusterId) def get_ClusterType(self): # String return self.get_query_params().get('ClusterType') def set_ClusterType(self, ClusterType): # String self.add_query_param('ClusterType', ClusterType) def get_Password(self): # String return self.get_query_params().get('Password') def set_Password(self, Password): # String self.add_query_param('Password', Password) def get_SnatEntry(self): # Integer return self.get_query_params().get('SnatEntry') def set_SnatEntry(self, SnatEntry): # Integer self.add_query_param('SnatEntry', SnatEntry) def get_NetPlug(self): # String return self.get_query_params().get('NetPlug') def set_NetPlug(self, NetPlug): # String self.add_query_param('NetPlug', NetPlug) def get_VpcId(self): # String return self.get_query_params().get('VpcId') def set_VpcId(self, VpcId): # String self.add_query_param('VpcId', VpcId) def get_RegionName(self): # String return self.get_query_params().get('RegionName') def set_RegionName(self, RegionName): # String self.add_query_param('RegionName', RegionName) def get_PrivateZone(self): # Boolean return self.get_query_params().get('PrivateZone') def set_PrivateZone(self, PrivateZone): # Boolean self.add_query_param('PrivateZone', PrivateZone) def get_ServiceCIDR(self): # String return self.get_query_params().get('ServiceCIDR') def set_ServiceCIDR(self, ServiceCIDR): # String self.add_query_param('ServiceCIDR', ServiceCIDR) def get_PublicSlb(self): # Integer return self.get_query_params().get('PublicSlb') def set_PublicSlb(self, PublicSlb): # Integer self.add_query_param('PublicSlb', PublicSlb)
    null
    366
    from pyrokinetics.gk_code import GKInputGENE from pyrokinetics import template_dir from pyrokinetics.local_geometry import LocalGeometryMiller from pyrokinetics.local_species import LocalSpecies from pyrokinetics.numerics import Numerics from pathlib import Path import numpy as np import pytest import sys docs_dir = Path(__file__).parent.parent.parent / "docs" sys.path.append(str(docs_dir)) from examples import example_JETTO # noqa template_file = template_dir / "input.gene" @pytest.fixture def default_gene(): return GKInputGENE() @pytest.fixture def gene(): return GKInputGENE(template_file) def test_read(gene): """Ensure a gene file can be read, and that the 'data' attribute is set""" params = ["general", "box", "geometry"] assert np.all(np.isin(params, list(gene.data))) def test_read_str(): """Ensure a gene file can be read as a string, and that the 'data' attribute is set""" params = ["general", "box", "geometry"] with open(template_file, "r") as f: gene = GKInputGENE.from_str(f.read()) assert np.all(np.isin(params, list(gene.data))) def test_verify_file_type(gene): """Ensure that 'verify_file_type' does not raise exception on GENE file""" gene.verify_file_type(template_file) @pytest.mark.parametrize( "filename", ["input.gs2", "input.cgyro", "transp.cdf", "helloworld"] ) def test_verify_file_type_bad_inputs(gene, filename): """Ensure that 'verify_file_type' raises exception on non-GENE file""" with pytest.raises(Exception): gene.verify_file_type(template_dir / filename) def test_is_nonlinear(gene): """Expect template file to be linear. Modify it so that it is nonlinear.""" gene.data["general"]["nonlinear"] = 0 assert gene.is_linear() assert not gene.is_nonlinear() gene.data["general"]["nonlinear"] = 1 assert not gene.is_linear() assert gene.is_nonlinear() def test_add_flags(gene): gene.add_flags({"foo": {"bar": "baz"}}) assert gene.data["foo"]["bar"] == "baz" def METHOD_NAME(gene): # TODO test it has the correct values local_geometry = gene.get_local_geometry() assert isinstance(local_geometry, LocalGeometryMiller) def test_get_local_species(gene): local_species = gene.get_local_species() assert isinstance(local_species, LocalSpecies) assert local_species.nspec == 2 assert len(gene.data["species"]) == 2 # Ensure you can index gene.data["species"] (doesn't work on some f90nml versions) assert gene.data["species"][0] assert gene.data["species"][1] assert local_species["electron"] assert local_species["ion1"] # TODO test it has the correct values def test_get_numerics(gene): # TODO test it has the correct values numerics = gene.get_numerics() assert isinstance(numerics, Numerics) def test_write(tmp_path, gene): """Ensure a gene file can be written, and that no info is lost in the process""" # Get template data local_geometry = gene.get_local_geometry() local_species = gene.get_local_species() numerics = gene.get_numerics() # Set output path filename = tmp_path / "input.in" # Write out a new input file gene_writer = GKInputGENE() gene_writer.set(local_geometry, local_species, numerics) # Ensure you can index gene.data["species"] (doesn't work on some f90nml versions) assert len(gene_writer.data["species"]) == 2 assert gene_writer.data["species"][0] assert gene_writer.data["species"][1] # Write to disk gene_writer.write(filename) # Ensure a new file exists assert Path(filename).exists() # Ensure it is a valid file GKInputGENE().verify_file_type(filename) gene_reader = GKInputGENE(filename) new_local_geometry = gene_reader.get_local_geometry() assert local_geometry.shat == new_local_geometry.shat new_local_species = gene_reader.get_local_species() assert local_species.nspec == new_local_species.nspec new_numerics = gene_reader.get_numerics() assert numerics.delta_time == new_numerics.delta_time def test_species_order(tmp_path): pyro = example_JETTO.main(tmp_path) # Reverse species order so electron is last pyro.local_species.names = pyro.local_species.names[::-1] pyro.gk_code = "GENE" pyro.write_gk_file(file_name=tmp_path / "input.in") assert Path(tmp_path / "input.in").exists()
    null
    367
    import re import warnings from contextlib import contextmanager import pymssql # pylint: disable=import-error from Orange.data import StringVariable, TimeVariable, ContinuousVariable, DiscreteVariable from Orange.data.sql.backend import Backend from Orange.data.sql.backend.base import ToSql, BackendError def METHOD_NAME(ex: Exception) -> str: try: return ex.args[0][1].decode().splitlines()[-1] except: # pylint: disable=bare-except return str(ex) class PymssqlBackend(Backend): display_name = "SQL Server" def __init__(self, connection_params): connection_params["server"] = connection_params.pop("host", None) for key in list(connection_params): if connection_params[key] is None: del connection_params[key] super().__init__(connection_params) try: self.connection = pymssql.connect(login_timeout=5, **connection_params) except pymssql.Error as ex: raise BackendError(METHOD_NAME(ex)) from ex except ValueError: # ValueError is raised when 'server' contains "\\" raise BackendError("Incorrect format of connection details") def list_tables_query(self, schema=None): return """ SELECT [TABLE_SCHEMA], [TABLE_NAME] FROM information_schema.tables WHERE TABLE_TYPE in ('VIEW' ,'BASE TABLE') ORDER BY [TABLE_NAME] """ def quote_identifier(self, name): return "[{}]".format(name) def unquote_identifier(self, quoted_name): return quoted_name[1:-1] def create_sql_query(self, table_name, fields, filters=(), group_by=None, order_by=None, offset=None, limit=None, use_time_sample=None): sql = ["SELECT"] if limit and not offset: sql.extend(["TOP", str(limit)]) sql.append(', '.join(fields)) sql.extend(["FROM", table_name]) if use_time_sample: sql.append("TABLESAMPLE system_time(%i)" % use_time_sample) if filters: sql.extend(["WHERE", " AND ".join(filters)]) if group_by: sql.extend(["GROUP BY", ", ".join(group_by)]) if offset and not order_by: order_by = fields[0].split("AS")[1:] if order_by: sql.extend(["ORDER BY", ",".join(order_by)]) if offset: sql.extend(["OFFSET", str(offset), "ROWS"]) if limit: sql.extend(["FETCH FIRST", str(limit), "ROWS ONLY"]) return " ".join(sql) @contextmanager def execute_sql_query(self, query, params=()): try: with self.connection.cursor() as cur: cur.execute(query, *params) yield cur except pymssql.Error as ex: raise BackendError(METHOD_NAME(ex)) from ex def create_variable(self, field_name, field_metadata, type_hints, inspect_table=None): if field_name in type_hints: var = type_hints[field_name] else: var = self._guess_variable(field_name, field_metadata, inspect_table) field_name_q = self.quote_identifier(field_name) if var.is_continuous: if isinstance(var, TimeVariable): var.to_sql = ToSql("DATEDIFF(s, '1970-01-01 00:00:00', {})".format(field_name_q)) else: var.to_sql = ToSql(field_name_q) else: # discrete or string var.to_sql = ToSql(field_name_q) return var def _guess_variable(self, field_name, field_metadata, inspect_table): # pylint: disable=import-error from pymssql import STRING, NUMBER, DATETIME, DECIMAL type_code, *_ = field_metadata if type_code in (NUMBER, DECIMAL): return ContinuousVariable(field_name) if type_code == DATETIME: tv = TimeVariable(field_name) tv.have_date = True tv.have_time = True return tv if type_code == STRING: if inspect_table: values = self.get_distinct_values(field_name, inspect_table) if values: return DiscreteVariable(field_name, values) return StringVariable(field_name) EST_ROWS_RE = re.compile(r'StatementEstRows="(\d+)"') def count_approx(self, query): with self.connection.cursor() as cur: try: cur.execute("SET SHOWPLAN_XML ON") try: cur.execute(query) result = cur.fetchone() match = self.EST_ROWS_RE.search(result[0]) if not match: # Either StatementEstRows was not found or # a float is received. # If it is a float then it is most probable # that the server's statistics are out of date # and the result is false. In that case # it is preferable to return None so # an exact count be used. return None return int(match.group(1)) finally: cur.execute("SET SHOWPLAN_XML OFF") except pymssql.Error as ex: if "SHOWPLAN permission denied" in str(ex): warnings.warn("SHOWPLAN permission denied, count approximates will not be used") return None raise BackendError(METHOD_NAME(ex)) from ex def distinct_values_query(self, field_name: str, table_name: str) -> str: field = self.quote_identifier(field_name) return self.create_sql_query( table_name, [field], # Cast - workaround for collations that are not case-sensitive and # UTF characters sensitive # DATALENGTH - workaround for string comparison that ignore trailing # spaces, two strings that differ only in space in the end would # group together if DATALENGTH wouldn't be used group_by=[f"{field}, Cast({field} as binary), DATALENGTH({field})"], order_by=[field], limit=21, )
    null
    368
    import pytest @pytest.fixture def user_data_model(): from alfasim_sdk._internal.models import data_model from alfasim_sdk._internal.types import BaseField class ValidType(BaseField): pass @data_model(icon="model.png", caption="PLUGIN DEV MODEL") class Model: valid_attribute = ValidType(caption="valid") return Model @pytest.fixture def user_data_container(user_data_model): from alfasim_sdk._internal.models import container_model from alfasim_sdk._internal.types import BaseField class ValidType(BaseField): pass @container_model( model=user_data_model, icon="container.png", caption="PLUGIN DEV CONTAINER" ) class Container: container_valid_attribute = ValidType(caption="valid") return Container def test_data_model(user_data_model): import attr # Attributes from the class, should be accessed by _alfasim_metadata assert user_data_model._alfasim_metadata["caption"] == "PLUGIN DEV MODEL" assert user_data_model._alfasim_metadata["icon"] == "model.png" # "data_model" should not have references to others model assert user_data_model._alfasim_metadata["model"] is None # Attributes defined from the user should be accessed by attr fields assert attr.fields(user_data_model).valid_attribute is not None def METHOD_NAME(user_data_container): import attr assert user_data_container._alfasim_metadata["model"] is not None assert "Model" in str(user_data_container._alfasim_metadata["model"]) assert user_data_container._alfasim_metadata["caption"] == "PLUGIN DEV CONTAINER" assert user_data_container._alfasim_metadata["icon"] == "container.png" assert attr.fields(user_data_container).container_valid_attribute is not None def test_invalid_attribute(): from alfasim_sdk._internal.models import data_model from alfasim_sdk._internal.types import BaseField class ValidType(BaseField): pass error_msg = "Error defining _invalid_attribute, attributes starting with '_' are not allowed" with pytest.raises(TypeError, match=error_msg): @data_model(icon="model.png", caption="PLUGIN DEV MODEL") class ModelPrivateAttribute: # pylint: disable=unused-variable _invalid_attribute = ValidType(caption="invalid") class Invalid(object): pass error_msg = ( "Error defining invalid, attributes must be a valid type defined by alfasim_sdk" ) with pytest.raises(TypeError, match=error_msg): @data_model(icon="model.png", caption="PLUGIN DEV MODEL") class Model: # pylint: disable=unused-variable invalid = Invalid() def test_attribute_order(): from alfasim_sdk._internal.models import data_model from alfasim_sdk._internal.types import ( Boolean, Reference, TracerType, Enum, String, Quantity, ) @data_model(icon="", caption="caption") class Model: boolean = Boolean(value=True, caption="caption") data_reference = Reference(ref_type=TracerType, caption="caption") enum = Enum(values=["value_1", "value_2"], caption="caption") string = String(value="value", caption="caption") quantity = Quantity(value=1, unit="m", caption="caption") expected_order = ["boolean", "data_reference", "enum", "string", "quantity"] assert [attr.name for attr in Model.__attrs_attrs__] == expected_order def test_check_model_in_container_model(): from alfasim_sdk._internal.models import container_model, data_model from alfasim_sdk._internal.types import String @data_model(caption="The child") class Child: name = String(value="A child", caption="Name") @container_model(caption="The parent", model=Child) class Parent: name = String(value="A parent", caption="Name") with pytest.raises(TypeError): @container_model(caption="The grand parent", model=Parent) class GrandParent: # pragma: no cover (`container_model` is expected to raise) name = String(value="A grand parent", caption="Name")
    null
    369
    import asyncio from typing import ( TYPE_CHECKING, Any, Callable, Collection, Optional, Type, ) import aiohttp from requests.exceptions import ( ConnectionError, HTTPError, Timeout, TooManyRedirects, ) from web3.types import ( AsyncMiddlewareCoroutine, RPCEndpoint, RPCResponse, ) if TYPE_CHECKING: from web3 import ( # noqa: F401 AsyncWeb3, Web3, ) whitelist = [ "admin", "miner", "net", "txpool", "testing", "evm", "eth_protocolVersion", "eth_syncing", "eth_coinbase", "eth_mining", "eth_hashrate", "eth_chainId", "eth_gasPrice", "eth_accounts", "eth_blockNumber", "eth_getBalance", "eth_getStorageAt", "eth_getProof", "eth_getCode", "eth_getBlockByNumber", "eth_getBlockByHash", "eth_getBlockTransactionCountByNumber", "eth_getBlockTransactionCountByHash", "eth_getUncleCountByBlockNumber", "eth_getUncleCountByBlockHash", "eth_getTransactionByHash", "eth_getTransactionByBlockHashAndIndex", "eth_getTransactionByBlockNumberAndIndex", "eth_getTransactionReceipt", "eth_getTransactionCount", "eth_getRawTransactionByHash", "eth_call", "eth_estimateGas", "eth_newBlockFilter", "eth_newPendingTransactionFilter", "eth_newFilter", "eth_getFilterChanges", "eth_getFilterLogs", "eth_getLogs", "eth_uninstallFilter", "eth_getCompilers", "eth_getWork", "eth_sign", "eth_signTypedData", "eth_sendRawTransaction", "personal_importRawKey", "personal_newAccount", "personal_listAccounts", "personal_listWallets", "personal_lockAccount", "personal_unlockAccount", "personal_ecRecover", "personal_sign", "personal_signTypedData", ] def check_if_retry_on_failure(method: RPCEndpoint) -> bool: root = method.split("_")[0] if root in whitelist: return True elif method in whitelist: return True else: return False def exception_retry_middleware( make_request: Callable[[RPCEndpoint, Any], RPCResponse], _w3: "Web3", errors: Collection[Type[BaseException]], retries: int = 5, ) -> Callable[[RPCEndpoint, Any], RPCResponse]: """ Creates middleware that retries failed HTTP requests. Is a default middleware for HTTPProvider. """ def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]: if check_if_retry_on_failure(method): for i in range(retries): try: return make_request(method, params) except tuple(errors): if i < retries - 1: continue else: raise return None else: return make_request(method, params) return middleware def http_retry_request_middleware( make_request: Callable[[RPCEndpoint, Any], Any], w3: "Web3" ) -> Callable[[RPCEndpoint, Any], Any]: return exception_retry_middleware( make_request, w3, (ConnectionError, HTTPError, Timeout, TooManyRedirects) ) async def METHOD_NAME( make_request: Callable[[RPCEndpoint, Any], Any], _async_w3: "AsyncWeb3", errors: Collection[Type[BaseException]], retries: int = 5, backoff_factor: float = 0.3, ) -> AsyncMiddlewareCoroutine: """ Creates middleware that retries failed HTTP requests. Is a default middleware for AsyncHTTPProvider. """ async def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]: if check_if_retry_on_failure(method): for i in range(retries): try: return await make_request(method, params) except tuple(errors): if i < retries - 1: await asyncio.sleep(backoff_factor) continue else: raise return None else: return await make_request(method, params) return middleware async def async_http_retry_request_middleware( make_request: Callable[[RPCEndpoint, Any], Any], async_w3: "AsyncWeb3" ) -> Callable[[RPCEndpoint, Any], Any]: return await METHOD_NAME( make_request, async_w3, (TimeoutError, aiohttp.ClientError), )
    null
    370
    import logging import operator from functools import reduce from time import sleep from typing import Any, Dict, List import boto3 import botocore.loaders as boto_loader import botocore.regions as boto_regions from botocore.config import Config as BotoConfig from botocore.exceptions import ClientError, NoCredentialsError, ProfileNotFound from taskcat.exceptions import TaskCatException LOG = logging.getLogger(__name__) REGIONAL_ENDPOINT_SERVICES = ["sts"] class Boto3Cache: RETRIES = 10 BACKOFF = 2 DELAY = 0.1 CLIENT_THROTTLE_RETRIES = 20 def __init__(self, _boto3=boto3): self._boto3 = _boto3 self._session_cache: Dict[str, Dict[str, boto3.Session]] = {} self._client_cache: Dict[str, Dict[str, Dict[str, boto3.client]]] = {} self._resource_cache: Dict[str, Dict[str, Dict[str, boto3.resource]]] = {} self._account_info: Dict[str, Dict[str, str]] = {} self._lock_cache_update = False def session(self, profile: str = "default", region: str = None) -> boto3.Session: region = self._get_region(region, profile) try: session = self._cache_lookup( self._session_cache, [profile, region], self._boto3.Session, [], {"region_name": region, "profile_name": profile}, ) except ProfileNotFound: if profile != "default": raise session = self._boto3.Session(region_name=region) self._cache_set(self._session_cache, [profile, region], session) return session def client( self, service: str, profile: str = "default", region: str = None ) -> boto3.client: region = self._get_region(region, profile) session = self.session(profile, region) kwargs = {"config": BotoConfig(retries={"max_attempts": 20})} if service in REGIONAL_ENDPOINT_SERVICES: kwargs.update({"endpoint_url": self._get_endpoint_url(service, region)}) return self._cache_lookup( self._client_cache, [profile, region, service], session.client, [service], kwargs, ) def resource( self, service: str, profile: str = "default", region: str = None ) -> boto3.resource: region = self._get_region(region, profile) session = self.session(profile, region) return self._cache_lookup( self._resource_cache, [profile, region, service], session.resource, [service], ) def partition(self, profile: str = "default") -> str: return self._cache_lookup( self._account_info, [profile], self._get_account_info, [profile] )["partition"] def account_id(self, profile: str = "default") -> str: return self._cache_lookup( self._account_info, [profile], self._get_account_info, [profile] )["account_id"] def _get_account_info(self, profile): partition, region = self._get_partition(profile) session = self.session(profile, region) sts_client = session.client("sts", region_name=region) try: account_id = sts_client.get_caller_identity()["Account"] except ClientError as e: if e.response["Error"]["Code"] == "AccessDenied": # pylint: disable=raise-missing-from raise TaskCatException( f"Not able to fetch account number from {region} using profile " f"{profile}. {str(e)}" ) raise except NoCredentialsError as e: # pylint: disable=raise-missing-from raise TaskCatException( f"Not able to fetch account number from {region} using profile " f"{profile}. {str(e)}" ) except ProfileNotFound as e: # pylint: disable=raise-missing-from raise TaskCatException( f"Not able to fetch account number from {region} using profile " f"{profile}. {str(e)}" ) return {"partition": partition, "account_id": account_id} def _make_parent_keys(self, cache: dict, keys: list): if keys: if not cache.get(keys[0]): cache[keys[0]] = {} self._make_parent_keys(cache[keys[0]], keys[1:]) def _cache_lookup(self, cache, key_list, create_func, args=None, kwargs=None): try: value = self._cache_get(cache, key_list) except KeyError: args = [] if not args else args kwargs = {} if not kwargs else kwargs value = self.METHOD_NAME(create_func, args, kwargs) self._cache_set(cache, key_list, value) return value def METHOD_NAME(self, create_func, args, kwargs): retries = self.RETRIES delay = self.DELAY while retries: try: return create_func(*args, **kwargs) except KeyError as e: if str(e) not in ["'credential_provider'", "'endpoint_resolver'"]: raise backoff = (self.RETRIES - retries + delay) * self.BACKOFF sleep(backoff) @staticmethod def _get_endpoint_url(service, region): data = boto_loader.create_loader().load_data("endpoints") endpoint_data = boto_regions.EndpointResolver(data).construct_endpoint( service, region ) if not endpoint_data: raise TaskCatException( f"unable to resolve endpoint for {service} in {region}" ) return f"https://{service}.{region}.{endpoint_data['dnsSuffix']}" @staticmethod def _cache_get(cache: dict, key_list: List[str]): return reduce(operator.getitem, key_list, cache) def _cache_set(self, cache: dict, key_list: list, value: Any): self._make_parent_keys(cache, key_list[:-1]) self._cache_get(cache, key_list[:-1])[key_list[-1]] = value def _get_region(self, region, profile): if not region: region = self.get_default_region(profile) return region def _get_partition(self, profile): partition_regions = [ ("aws", "us-east-1"), ("aws-cn", "cn-north-1"), ("aws-us-gov", "us-gov-west-1"), ] for partition, region in partition_regions: try: self.session(profile, region).client( "sts", region_name=region ).get_caller_identity() return (partition, region) except ClientError as e: if "InvalidClientTokenId" in str(e): continue raise raise ValueError("cannot find suitable AWS partition") def get_default_region(self, profile_name="default") -> str: try: if profile_name != "default": region = self._boto3.session.Session( profile_name=profile_name ).region_name else: region = self._boto3.session.Session().region_name except ProfileNotFound: if profile_name != "default": raise region = self._boto3.session.Session().region_name if not region: _, region = self._get_partition(profile_name) LOG.warning( "Region not set in credential chain, defaulting to {}".format(region) ) return region
    null
    371
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class ListGroupAuthorizationRulesRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'IoTCC', '2021-05-13', 'ListGroupAuthorizationRules','IoTCC') self.set_method('POST') def get_DestinationTypes(self): # RepeatList return self.get_query_params().get('DestinationType') def set_DestinationTypes(self, DestinationType): # RepeatList for depth1 in range(len(DestinationType)): self.add_query_param('DestinationType.' + str(depth1 + 1), DestinationType[depth1]) def get_Destinations(self): # RepeatList return self.get_query_params().get('Destination') def set_Destinations(self, Destination): # RepeatList for depth1 in range(len(Destination)): self.add_query_param('Destination.' + str(depth1 + 1), Destination[depth1]) def get_Type(self): # String return self.get_query_params().get('Type') def set_Type(self, Type): # String self.add_query_param('Type', Type) def get_Protocols(self): # RepeatList return self.get_query_params().get('Protocol') def set_Protocols(self, Protocol): # RepeatList for depth1 in range(len(Protocol)): self.add_query_param('Protocol.' + str(depth1 + 1), Protocol[depth1]) def get_AuthorizationRuleIdss(self): # RepeatList return self.get_query_params().get('AuthorizationRuleIds') def set_AuthorizationRuleIdss(self, AuthorizationRuleIds): # RepeatList for depth1 in range(len(AuthorizationRuleIds)): self.add_query_param('AuthorizationRuleIds.' + str(depth1 + 1), AuthorizationRuleIds[depth1]) def get_NextToken(self): # String return self.get_query_params().get('NextToken') def set_NextToken(self, NextToken): # String self.add_query_param('NextToken', NextToken) def get_Policys(self): # RepeatList return self.get_query_params().get('Policy') def METHOD_NAME(self, Policy): # RepeatList for depth1 in range(len(Policy)): self.add_query_param('Policy.' + str(depth1 + 1), Policy[depth1]) def get_AuthorizationRuleStatuss(self): # RepeatList return self.get_query_params().get('AuthorizationRuleStatus') def set_AuthorizationRuleStatuss(self, AuthorizationRuleStatus): # RepeatList for depth1 in range(len(AuthorizationRuleStatus)): self.add_query_param('AuthorizationRuleStatus.' + str(depth1 + 1), AuthorizationRuleStatus[depth1]) def get_IoTCloudConnectorGroupId(self): # String return self.get_query_params().get('IoTCloudConnectorGroupId') def set_IoTCloudConnectorGroupId(self, IoTCloudConnectorGroupId): # String self.add_query_param('IoTCloudConnectorGroupId', IoTCloudConnectorGroupId) def get_AuthorizationRuleNames(self): # RepeatList return self.get_query_params().get('AuthorizationRuleName') def set_AuthorizationRuleNames(self, AuthorizationRuleName): # RepeatList for depth1 in range(len(AuthorizationRuleName)): self.add_query_param('AuthorizationRuleName.' + str(depth1 + 1), AuthorizationRuleName[depth1]) def get_DestinationPorts(self): # RepeatList return self.get_query_params().get('DestinationPort') def set_DestinationPorts(self, DestinationPort): # RepeatList for depth1 in range(len(DestinationPort)): self.add_query_param('DestinationPort.' + str(depth1 + 1), DestinationPort[depth1]) def get_MaxResults(self): # Integer return self.get_query_params().get('MaxResults') def set_MaxResults(self, MaxResults): # Integer self.add_query_param('MaxResults', MaxResults)
    null
    372
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkr_kvstore.endpoint import endpoint_data class DescribeInstancesRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeInstances','redisa') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_SearchKey(self): # String return self.get_query_params().get('SearchKey') def set_SearchKey(self, SearchKey): # String self.add_query_param('SearchKey', SearchKey) def get_NetworkType(self): # String return self.get_query_params().get('NetworkType') def set_NetworkType(self, NetworkType): # String self.add_query_param('NetworkType', NetworkType) def get_EngineVersion(self): # String return self.get_query_params().get('EngineVersion') def set_EngineVersion(self, EngineVersion): # String self.add_query_param('EngineVersion', EngineVersion) def get_InstanceClass(self): # String return self.get_query_params().get('InstanceClass') def METHOD_NAME(self, InstanceClass): # String self.add_query_param('InstanceClass', InstanceClass) def get_PageNumber(self): # Integer return self.get_query_params().get('PageNumber') def set_PageNumber(self, PageNumber): # Integer self.add_query_param('PageNumber', PageNumber) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_Expired(self): # String return self.get_query_params().get('Expired') def set_Expired(self, Expired): # String self.add_query_param('Expired', Expired) def get_SecurityToken(self): # String return self.get_query_params().get('SecurityToken') def set_SecurityToken(self, SecurityToken): # String self.add_query_param('SecurityToken', SecurityToken) def get_PageSize(self): # Integer return self.get_query_params().get('PageSize') def set_PageSize(self, PageSize): # Integer self.add_query_param('PageSize', PageSize) def get_InstanceType(self): # String return self.get_query_params().get('InstanceType') def set_InstanceType(self, InstanceType): # String self.add_query_param('InstanceType', InstanceType) def get_EditionType(self): # String return self.get_query_params().get('EditionType') def set_EditionType(self, EditionType): # String self.add_query_param('EditionType', EditionType) def get_Tags(self): # RepeatList return self.get_query_params().get('Tag') def set_Tags(self, Tag): # RepeatList for depth1 in range(len(Tag)): if Tag[depth1].get('Value') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value')) if Tag[depth1].get('Key') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key')) def get_InstanceStatus(self): # String return self.get_query_params().get('InstanceStatus') def set_InstanceStatus(self, InstanceStatus): # String self.add_query_param('InstanceStatus', InstanceStatus) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_GlobalInstance(self): # Boolean return self.get_query_params().get('GlobalInstance') def set_GlobalInstance(self, GlobalInstance): # Boolean self.add_query_param('GlobalInstance', GlobalInstance) def get_PrivateIp(self): # String return self.get_query_params().get('PrivateIp') def set_PrivateIp(self, PrivateIp): # String self.add_query_param('PrivateIp', PrivateIp) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_VSwitchId(self): # String return self.get_query_params().get('VSwitchId') def set_VSwitchId(self, VSwitchId): # String self.add_query_param('VSwitchId', VSwitchId) def get_InstanceIds(self): # String return self.get_query_params().get('InstanceIds') def set_InstanceIds(self, InstanceIds): # String self.add_query_param('InstanceIds', InstanceIds) def get_ArchitectureType(self): # String return self.get_query_params().get('ArchitectureType') def set_ArchitectureType(self, ArchitectureType): # String self.add_query_param('ArchitectureType', ArchitectureType) def get_VpcId(self): # String return self.get_query_params().get('VpcId') def set_VpcId(self, VpcId): # String self.add_query_param('VpcId', VpcId) def get_ZoneId(self): # String return self.get_query_params().get('ZoneId') def set_ZoneId(self, ZoneId): # String self.add_query_param('ZoneId', ZoneId) def get_ChargeType(self): # String return self.get_query_params().get('ChargeType') def set_ChargeType(self, ChargeType): # String self.add_query_param('ChargeType', ChargeType)
    null
    373
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdklive.endpoint import endpoint_data class AddCustomLiveStreamTranscodeRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCustomLiveStreamTranscode','live') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResWithSource(self): # String return self.get_query_params().get('ResWithSource') def set_ResWithSource(self, ResWithSource): # String self.add_query_param('ResWithSource', ResWithSource) def get_Gop(self): # String return self.get_query_params().get('Gop') def set_Gop(self, Gop): # String self.add_query_param('Gop', Gop) def get_AudioCodec(self): # String return self.get_query_params().get('AudioCodec') def set_AudioCodec(self, AudioCodec): # String self.add_query_param('AudioCodec', AudioCodec) def get_KmsUID(self): # String return self.get_query_params().get('KmsUID') def set_KmsUID(self, KmsUID): # String self.add_query_param('KmsUID', KmsUID) def get_Height(self): # Integer return self.get_query_params().get('Height') def set_Height(self, Height): # Integer self.add_query_param('Height', Height) def get_App(self): # String return self.get_query_params().get('App') def set_App(self, App): # String self.add_query_param('App', App) def get_Profile(self): # Integer return self.get_query_params().get('Profile') def set_Profile(self, Profile): # Integer self.add_query_param('Profile', Profile) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_ExtWithSource(self): # String return self.get_query_params().get('ExtWithSource') def set_ExtWithSource(self, ExtWithSource): # String self.add_query_param('ExtWithSource', ExtWithSource) def get_BitrateWithSource(self): # String return self.get_query_params().get('BitrateWithSource') def set_BitrateWithSource(self, BitrateWithSource): # String self.add_query_param('BitrateWithSource', BitrateWithSource) def get_Domain(self): # String return self.get_query_params().get('Domain') def set_Domain(self, Domain): # String self.add_query_param('Domain', Domain) def get_Template(self): # String return self.get_query_params().get('Template') def set_Template(self, Template): # String self.add_query_param('Template', Template) def get_Lazy(self): # String return self.get_query_params().get('Lazy') def set_Lazy(self, Lazy): # String self.add_query_param('Lazy', Lazy) def get_KmsKeyExpireInterval(self): # String return self.get_query_params().get('KmsKeyExpireInterval') def set_KmsKeyExpireInterval(self, KmsKeyExpireInterval): # String self.add_query_param('KmsKeyExpireInterval', KmsKeyExpireInterval) def get_TemplateType(self): # String return self.get_query_params().get('TemplateType') def set_TemplateType(self, TemplateType): # String self.add_query_param('TemplateType', TemplateType) def get_AudioProfile(self): # String return self.get_query_params().get('AudioProfile') def set_AudioProfile(self, AudioProfile): # String self.add_query_param('AudioProfile', AudioProfile) def get_EncryptParameters(self): # String return self.get_query_params().get('EncryptParameters') def set_EncryptParameters(self, EncryptParameters): # String self.add_query_param('EncryptParameters', EncryptParameters) def get_AudioChannelNum(self): # Integer return self.get_query_params().get('AudioChannelNum') def set_AudioChannelNum(self, AudioChannelNum): # Integer self.add_query_param('AudioChannelNum', AudioChannelNum) def get_FPS(self): # Integer return self.get_query_params().get('FPS') def METHOD_NAME(self, FPS): # Integer self.add_query_param('FPS', FPS) def get_AudioRate(self): # Integer return self.get_query_params().get('AudioRate') def set_AudioRate(self, AudioRate): # Integer self.add_query_param('AudioRate', AudioRate) def get_FpsWithSource(self): # String return self.get_query_params().get('FpsWithSource') def set_FpsWithSource(self, FpsWithSource): # String self.add_query_param('FpsWithSource', FpsWithSource) def get_AudioBitrate(self): # Integer return self.get_query_params().get('AudioBitrate') def set_AudioBitrate(self, AudioBitrate): # Integer self.add_query_param('AudioBitrate', AudioBitrate) def get_Width(self): # Integer return self.get_query_params().get('Width') def set_Width(self, Width): # Integer self.add_query_param('Width', Width) def get_VideoBitrate(self): # Integer return self.get_query_params().get('VideoBitrate') def set_VideoBitrate(self, VideoBitrate): # Integer self.add_query_param('VideoBitrate', VideoBitrate) def get_KmsKeyID(self): # String return self.get_query_params().get('KmsKeyID') def set_KmsKeyID(self, KmsKeyID): # String self.add_query_param('KmsKeyID', KmsKeyID)
    null
    374
    # Copyright 2021 Memgraph Ltd. # # Use of this software is governed by the Business Source License # included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source # License, and you may not use this file except in compliance with the Business Source License. # # As of the Change Date specified in that file, in accordance with # the Business Source License, use of this software will be governed # by the Apache License, Version 2.0, included in the file # licenses/APL.txt. import copy import os import subprocess import sys import time import mgclient SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..")) BUILD_DIR = os.path.join(PROJECT_DIR, "build") MEMGRAPH_BINARY = os.path.join(BUILD_DIR, "memgraph") def wait_for_server(port, delay=0.01): cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)] count = 0 while subprocess.call(cmd) != 0: time.sleep(0.01) if count > 10 / 0.01: print("Could not wait for server on port", port, "to startup!") sys.exit(1) count += 1 time.sleep(delay) def extract_bolt_port(args): for arg_index, arg in enumerate(args): if arg.startswith("--bolt-port="): maybe_port = arg.split("=")[1] if not maybe_port.isdigit(): raise Exception("Unable to read Bolt port after --bolt-port=.") return int(maybe_port) elif arg == "--bolt-port": maybe_port = args[arg_index + 1] if not maybe_port.isdigit(): raise Exception("Unable to read Bolt port after --bolt-port.") return int(maybe_port) return 7687 def replace_paths(path): return path.replace("$PROJECT_DIR", PROJECT_DIR).replace("$SCRIPT_DIR", SCRIPT_DIR).replace("$BUILD_DIR", BUILD_DIR) class MemgraphInstanceRunner: def __init__(self, binary_path=MEMGRAPH_BINARY, use_ssl=False): self.host = "127.0.0.1" self.bolt_port = None self.binary_path = binary_path self.args = None self.proc_mg = None self.ssl = use_ssl def METHOD_NAME(self, setup_queries): if setup_queries is None: return # An assumption being database instance is fresh, no need for the auth. conn = mgclient.connect(host=self.host, port=self.bolt_port, sslmode=self.ssl) conn.autocommit = True cursor = conn.cursor() for query_coll in setup_queries: if isinstance(query_coll, str): cursor.execute(query_coll) elif isinstance(query_coll, list): for query in query_coll: cursor.execute(query) cursor.close() conn.close() # NOTE: Both query and get_connection may esablish new connection -> auth # details required -> username/password should be optional arguments. def query(self, query, conn=None, username="", password=""): new_conn = conn is None if new_conn: conn = self.get_connection(username, password) cursor = conn.cursor() cursor.execute(query) data = cursor.fetchall() cursor.close() if new_conn: conn.close() return data def get_connection(self, username="", password=""): conn = mgclient.connect( host=self.host, port=self.bolt_port, sslmode=self.ssl, username=username, password=password ) conn.autocommit = True return conn def start(self, restart=False, args=None, setup_queries=None): if not restart and self.is_running(): return self.stop() if args is not None: self.args = copy.deepcopy(args) self.args = [replace_paths(arg) for arg in self.args] args_mg = [ self.binary_path, "--storage-wal-enabled", "--storage-snapshot-interval-sec", "300", "--storage-properties-on-edges", ] + self.args self.bolt_port = extract_bolt_port(args_mg) self.proc_mg = subprocess.Popen(args_mg) wait_for_server(self.bolt_port) self.METHOD_NAME(setup_queries) assert self.is_running(), "The Memgraph process died!" def is_running(self): if self.proc_mg is None: return False if self.proc_mg.poll() is not None: return False return True def stop(self): if not self.is_running(): return self.proc_mg.terminate() code = self.proc_mg.wait() assert code == 0, "The Memgraph process exited with non-zero!" def kill(self): if not self.is_running(): return self.proc_mg.kill() code = self.proc_mg.wait() assert code == -9, "The killed Memgraph process exited with non-nine!"
    null
    375
    # Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pipeline.hpc.logger import Logger def _perform_command(action, msg, error_msg, skip_on_failure): Logger.info(msg) try: action() except RuntimeError as e: Logger.warn(error_msg) if not skip_on_failure: raise RuntimeError(error_msg, e) class GridEngineType: SGE = "SGE" SLURM = "SLURM" def __init__(self): pass class AllocationRuleParsingError(RuntimeError): pass class AllocationRule: ALLOWED_VALUES = ['$pe_slots', '$fill_up', '$round_robin'] def __init__(self, value): if value in AllocationRule.ALLOWED_VALUES: self.value = value else: raise AllocationRuleParsingError('Wrong AllocationRule value, only %s is available!' % AllocationRule.ALLOWED_VALUES) @staticmethod def pe_slots(): return AllocationRule('$pe_slots') @staticmethod def fill_up(): return AllocationRule('$fill_up') @staticmethod def round_robin(): return AllocationRule('$round_robin') @staticmethod def fractional_rules(): return [AllocationRule.round_robin(), AllocationRule.fill_up()] @staticmethod def integral_rules(): return [AllocationRule.pe_slots()] def __eq__(self, other): if not isinstance(other, AllocationRule): # don't attempt to compare against unrelated types return False return other.value == self.value class GridEngineJobState: RUNNING = 'running' PENDING = 'pending' SUSPENDED = 'suspended' ERROR = 'errored' DELETED = 'deleted' COMPLETED = 'completed' UNKNOWN = 'unknown' _letter_codes_to_states = { # Job statuses: [SGE] + [SLURM] RUNNING: ['r', 't', 'Rr', 'Rt'] + ['RUNNING'], PENDING: ['qw', 'qw', 'hqw', 'hqw', 'hRwq', 'hRwq', 'hRwq', 'qw', 'qw'] + ['PENDING'], SUSPENDED: ['s', 'ts', 'S', 'tS', 'T', 'tT', 'Rs', 'Rts', 'RS', 'RtS', 'RT', 'RtT'] + ['SUSPENDED', 'STOPPED'], ERROR: ['Eqw', 'Ehqw', 'EhRqw'] + ['DEADLINE', ' FAILED'], DELETED: ['dr', 'dt', 'dRr', 'dRt', 'ds', 'dS', 'dT', 'dRs', 'dRS', 'dRT'] + ['DELETED', 'CANCELLED'], COMPLETED: [] + ['COMPLETED', 'COMPLETING'] } @staticmethod def from_letter_code(code): for key in GridEngineJobState._letter_codes_to_states: if code in GridEngineJobState._letter_codes_to_states[key]: return key return GridEngineJobState.UNKNOWN class GridEngineJob: def __init__(self, id, root_id, name, user, state, datetime, hosts=None, cpu=0, gpu=0, mem=0, pe='local'): self.id = id self.root_id = root_id self.name = name self.user = user self.state = state self.datetime = datetime self.hosts = hosts if hosts else [] self.cpu = cpu self.gpu = gpu self.mem = mem self.pe = pe def __repr__(self): return str(self.__dict__) class GridEngine: def get_jobs(self): pass def disable_host(self, host): """ Disables host to prevent receiving new jobs from the queue. This command does not abort currently running jobs. :param host: Host to be enabled. """ pass def enable_host(self, host): """ Enables host to make it available to receive new jobs from the queue. :param host: Host to be enabled. """ pass def get_pe_allocation_rule(self, pe): """ Returns allocation rule of the pe :param pe: Parallel environment to return allocation rule. """ pass def delete_host(self, host, skip_on_failure=False): """ Completely deletes host from GE: 1. Shutdown host execution daemon. 2. Removes host from queue settings. 3. Removes host from host group. 4. Removes host from administrative hosts. 5. Removes host from GE. :param host: Host to be removed. :param skip_on_failure: Specifies if the host killing should be continued even if some of the commands has failed. """ pass def get_host_supplies(self): pass def METHOD_NAME(self, host): pass def get_engine_type(self): pass def is_valid(self, host): """ Validates host in GE checking corresponding execution host availability and its states. :param host: Host to be checked. :return: True if execution host is valid. """ return True def kill_jobs(self, jobs, force=False): """ Kills jobs in GE. :param jobs: Grid engine jobs. :param force: Specifies if this command should be performed with -f flag. """ pass class GridEngineDemandSelector: def select(self, jobs): pass class GridEngineJobValidator: def validate(self, jobs): pass
    null
    376
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkvpc.endpoint import endpoint_data class ListPublicIpAddressPoolsRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ListPublicIpAddressPools','vpc') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_Isp(self): # String return self.get_query_params().get('Isp') def set_Isp(self, Isp): # String self.add_query_param('Isp', Isp) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_NextToken(self): # String return self.get_query_params().get('NextToken') def set_NextToken(self, NextToken): # String self.add_query_param('NextToken', NextToken) def get_DryRun(self): # Boolean return self.get_query_params().get('DryRun') def set_DryRun(self, DryRun): # Boolean self.add_query_param('DryRun', DryRun) def get_PublicIpAddressPoolIdss(self): # RepeatList return self.get_query_params().get('PublicIpAddressPoolIds') def set_PublicIpAddressPoolIdss(self, PublicIpAddressPoolIds): # RepeatList for depth1 in range(len(PublicIpAddressPoolIds)): self.add_query_param('PublicIpAddressPoolIds.' + str(depth1 + 1), PublicIpAddressPoolIds[depth1]) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_Tagss(self): # RepeatList return self.get_query_params().get('Tags') def set_Tagss(self, Tags): # RepeatList for depth1 in range(len(Tags)): if Tags[depth1].get('Key') is not None: self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key')) if Tags[depth1].get('Value') is not None: self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value')) def METHOD_NAME(self): # String return self.get_query_params().get('Name') def set_Name(self, Name): # String self.add_query_param('Name', Name) def get_MaxResults(self): # Integer return self.get_query_params().get('MaxResults') def set_MaxResults(self, MaxResults): # Integer self.add_query_param('MaxResults', MaxResults) def get_Status(self): # String return self.get_query_params().get('Status') def set_Status(self, Status): # String self.add_query_param('Status', Status)
    null
    377
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkecs.endpoint import endpoint_data class DescribeDisksFullStatusRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDisksFullStatus','ecs') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_EventIds(self): # RepeatList return self.get_query_params().get('EventId') def set_EventIds(self, EventId): # RepeatList for depth1 in range(len(EventId)): self.add_query_param('EventId.' + str(depth1 + 1), EventId[depth1]) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_PageNumber(self): # Integer return self.get_query_params().get('PageNumber') def set_PageNumber(self, PageNumber): # Integer self.add_query_param('PageNumber', PageNumber) def get_EventTimeStart(self): # String return self.get_query_params().get('EventTime.Start') def set_EventTimeStart(self, EventTimeStart): # String self.add_query_param('EventTime.Start', EventTimeStart) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_PageSize(self): # Integer return self.get_query_params().get('PageSize') def METHOD_NAME(self, PageSize): # Integer self.add_query_param('PageSize', PageSize) def get_DiskIds(self): # RepeatList return self.get_query_params().get('DiskId') def set_DiskIds(self, DiskId): # RepeatList for depth1 in range(len(DiskId)): self.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1]) def get_Tags(self): # RepeatList return self.get_query_params().get('Tag') def set_Tags(self, Tag): # RepeatList for depth1 in range(len(Tag)): if Tag[depth1].get('Key') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key')) if Tag[depth1].get('Value') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value')) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_EventTimeEnd(self): # String return self.get_query_params().get('EventTime.End') def set_EventTimeEnd(self, EventTimeEnd): # String self.add_query_param('EventTime.End', EventTimeEnd) def get_HealthStatus(self): # String return self.get_query_params().get('HealthStatus') def set_HealthStatus(self, HealthStatus): # String self.add_query_param('HealthStatus', HealthStatus) def get_EventType(self): # String return self.get_query_params().get('EventType') def set_EventType(self, EventType): # String self.add_query_param('EventType', EventType) def get_Status(self): # String return self.get_query_params().get('Status') def set_Status(self, Status): # String self.add_query_param('Status', Status)
    null
    378
    """CustomFCNMaskHead for OTX template.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch from mmdet.models.builder import HEADS from mmdet.models.roi_heads.mask_heads.fcn_mask_head import FCNMaskHead from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled @HEADS.register_module() class CustomFCNMaskHead(FCNMaskHead): """Custom FCN Mask Head for fast mask evaluation.""" def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale): """Get segmentation masks from mask_pred and bboxes. The original `FCNMaskHead.get_seg_masks` grid sampled 28 x 28 masks to the original image resolution. As a result, the resized masks occupy a large amount of memory and slow down the inference. This method directly returns 28 x 28 masks and resize to bounding boxes size in post-processing step. Doing so can save memory and speed up the inference. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) scale_factor(ndarray | Tensor): If ``rescale is True``, box coordinates are divided by this scale factor to fit ``ori_shape``. rescale (bool): If True, the resulting masks will be rescaled to ``ori_shape``. Returns: list[list]: encoded masks. The c-th item in the outer list corresponds to the c-th class. Given the c-th outer list, the i-th item in that inner list is the mask for the i-th box with class label c. """ if isinstance(mask_pred, torch.Tensor): mask_pred = mask_pred.sigmoid() else: # In AugTest, has been activated before mask_pred = det_bboxes.new_tensor(mask_pred) cls_segms = [[] for _ in range(self.num_classes)] # BG is not included in num_classes labels = det_labels N = len(mask_pred) # The actual implementation split the input into chunks, # and paste them chunk by chunk. threshold = rcnn_test_cfg.mask_thr_binary if not self.class_agnostic: mask_pred = mask_pred[range(N), labels][:, None] for i in range(N): mask = mask_pred[i] if threshold >= 0: mask = (mask >= threshold).to(dtype=torch.bool) else: # for visualization and debugging mask = (mask * 255).to(dtype=torch.uint8) mask = mask.detach().cpu().numpy() cls_segms[labels[i]].append(mask[0]) return cls_segms def get_scaled_seg_masks(self, *args, **kwargs): """Original method "get_seg_mask" from FCNMaskHead. Used in Semi-SL algorithm.""" return super().get_seg_masks(*args, **kwargs) if is_mmdeploy_enabled(): from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( "otx.algorithms.detection.adapters.mmdet.models." "heads.custom_fcn_mask_head.CustomFCNMaskHead.get_seg_masks" ) def METHOD_NAME( ctx, self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, **kwargs ): """Rewrite `get_seg_masks` of `FCNMaskHead` for default backend. Rewrite the get_seg_masks for only fcn_mask_head inference. Args: ctx (dict): context dict self (CustomFCNMaskHead): CustomFCNMaskHead instance mask_pred (Tensor): shape (n, #class, h, w). det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) kwargs (dict): other arguments Returns: Tensor: a mask of shape (N, img_h, img_w). """ mask_pred = mask_pred.sigmoid() bboxes = det_bboxes[:, :4] labels = det_labels if not self.class_agnostic: box_inds = torch.arange(mask_pred.shape[0], device=bboxes.device) mask_pred = mask_pred[box_inds, labels][:, None] return mask_pred
    null
    379
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest class UpdateTaskDetailRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'UpdateTaskDetail') self.set_method('POST') def get_Note(self): # String return self.get_body_params().get('Note') def set_Note(self, Note): # String self.add_body_params('Note', Note) def get_ExecutorId(self): # String return self.get_body_params().get('ExecutorId') def set_ExecutorId(self, ExecutorId): # String self.add_body_params('ExecutorId', ExecutorId) def get_StartDate(self): # String return self.get_body_params().get('StartDate') def set_StartDate(self, StartDate): # String self.add_body_params('StartDate', StartDate) def get_DelInvolvers(self): # String return self.get_body_params().get('DelInvolvers') def set_DelInvolvers(self, DelInvolvers): # String self.add_body_params('DelInvolvers', DelInvolvers) def get_Content(self): # String return self.get_body_params().get('Content') def set_Content(self, Content): # String self.add_body_params('Content', Content) def get_SprintId(self): # String return self.get_body_params().get('SprintId') def set_SprintId(self, SprintId): # String self.add_body_params('SprintId', SprintId) def get_CustomFieldId(self): # String return self.get_body_params().get('CustomFieldId') def set_CustomFieldId(self, CustomFieldId): # String self.add_body_params('CustomFieldId', CustomFieldId) def get_ProjectId(self): # String return self.get_body_params().get('ProjectId') def set_ProjectId(self, ProjectId): # String self.add_body_params('ProjectId', ProjectId) def get_TaskId(self): # String return self.get_body_params().get('TaskId') def set_TaskId(self, TaskId): # String self.add_body_params('TaskId', TaskId) def get_TaskFlowStatusId(self): # String return self.get_body_params().get('TaskFlowStatusId') def set_TaskFlowStatusId(self, TaskFlowStatusId): # String self.add_body_params('TaskFlowStatusId', TaskFlowStatusId) def get_TagIds(self): # String return self.get_body_params().get('TagIds') def set_TagIds(self, TagIds): # String self.add_body_params('TagIds', TagIds) def METHOD_NAME(self): # String return self.get_body_params().get('AddInvolvers') def set_AddInvolvers(self, AddInvolvers): # String self.add_body_params('AddInvolvers', AddInvolvers) def get_Priority(self): # Long return self.get_body_params().get('Priority') def set_Priority(self, Priority): # Long self.add_body_params('Priority', Priority) def get_OrgId(self): # String return self.get_body_params().get('OrgId') def set_OrgId(self, OrgId): # String self.add_body_params('OrgId', OrgId) def get_DueDate(self): # String return self.get_body_params().get('DueDate') def set_DueDate(self, DueDate): # String self.add_body_params('DueDate', DueDate) def get_WorkTimes(self): # Long return self.get_body_params().get('WorkTimes') def set_WorkTimes(self, WorkTimes): # Long self.add_body_params('WorkTimes', WorkTimes) def get_StoryPoint(self): # String return self.get_body_params().get('StoryPoint') def set_StoryPoint(self, StoryPoint): # String self.add_body_params('StoryPoint', StoryPoint) def get_CustomFieldValues(self): # String return self.get_body_params().get('CustomFieldValues') def set_CustomFieldValues(self, CustomFieldValues): # String self.add_body_params('CustomFieldValues', CustomFieldValues)
    null
    380
    """EmceeSampler class.""" from __future__ import annotations import logging from typing import List, Union import numpy as np from ..problem import Problem from ..result import McmcPtResult from ..startpoint import UniformStartpoints, uniform from .sampler import Sampler, SamplerImportError logger = logging.getLogger(__name__) class EmceeSampler(Sampler): """Use emcee for sampling. Wrapper around https://emcee.readthedocs.io/en/stable/, see there for details. """ def __init__( self, nwalkers: int = 1, sampler_args: dict = None, run_args: dict = None, ): """ Initialize sampler. Parameters ---------- nwalkers: The number of walkers in the ensemble. sampler_args: Further keyword arguments that are passed on to ``emcee.EnsembleSampler.__init__``. run_args: Further keyword arguments that are passed on to ``emcee.EnsembleSampler.run_mcmc``. """ # check dependencies try: import emcee except ImportError: raise SamplerImportError("emcee") super().__init__() self.nwalkers: int = nwalkers if sampler_args is None: sampler_args = {} self.sampler_args: dict = sampler_args if run_args is None: run_args = {} self.run_args: dict = run_args # set in initialize self.problem: Union[Problem, None] = None self.sampler: Union[emcee.EnsembleSampler, None] = None self.state: Union[emcee.State, None] = None def METHOD_NAME( self, center: np.ndarray, problem: Problem, epsilon: float = 1e-3, ): """Get walker initial positions as samples from an epsilon ball. The ball is scaled in each direction according to the magnitude of the center in that direction. It is assumed that, because vectors are generated near a good point, all generated vectors are evaluable, so evaluability is not checked. Points that are generated outside the problem bounds will get shifted to lie on the edge of the problem bounds. Parameters ---------- center: The center of the epsilon ball. The dimension should match the full dimension of the pyPESTO problem. This will be returned as the first position. problem: The pyPESTO problem. epsilon: The relative radius of the ball. e.g., if `epsilon=0.5` and the center of the first dimension is at 100, then the upper and lower bounds of the epsilon ball in the first dimension will be 150 and 50, respectively. """ # Epsilon ball lb = center * (1 - epsilon) ub = center * (1 + epsilon) # Adjust bounds to satisfy problem bounds lb[lb < problem.lb] = problem.lb[lb < problem.lb] ub[ub > problem.ub] = problem.ub[ub > problem.ub] # Sample initial positions initial_state_after_first = uniform( n_starts=self.nwalkers - 1, lb=lb, ub=ub, ) # Include `center` in initial positions initial_state = np.row_stack( ( center, initial_state_after_first, ) ) return initial_state def initialize( self, problem: Problem, x0: Union[np.ndarray, List[np.ndarray]], ) -> None: """Initialize the sampler. It is recommended to initialize walkers Parameters ---------- x0: The "a priori preferred position". e.g., an optimized parameter vector. https://emcee.readthedocs.io/en/stable/user/faq/ The position of the first walker will be this, the remaining walkers will be assigned positions uniformly in a smaller ball around this vector. Alternatively, a set of vectors can be provided, which will be used to initialize walkers. In this case, any remaining walkers will be initialized at points sampled uniformly within the problem bounds. """ import emcee self.problem = problem # extract for pickling efficiency objective = self.problem.objective lb = self.problem.lb ub = self.problem.ub # parameter dimenstion ndim = len(self.problem.x_free_indices) def log_prob(x): """Log-probability density function.""" # check if parameter lies within bounds if any(x < lb) or any(x > ub): return -np.inf # invert sign return -1.0 * objective(x) # initialize sampler self.sampler = emcee.EnsembleSampler( nwalkers=self.nwalkers, ndim=ndim, log_prob_fn=log_prob, **self.sampler_args, ) # assign startpoints if self.state is None: if x0.ndim > 1 and len(x0.shape[0]) > 1: logger.warning( "More than a single vector was provided to initialize the " "walker positions. If these vectors do not exist in a " "small ball around a high-probability position (e.g. " "optimized vector) then sampling may be inefficient (see " "emcee FAQ: " "https://emcee.readthedocs.io/en/stable/user/faq/ )." ) # extract x0 x0 = np.asarray(x0) if x0.ndim == 1: x0 = [x0] x0 = np.array([problem.get_full_vector(x) for x in x0]) x_guesses_full0 = problem.x_guesses_full # add x0 to guesses problem.set_x_guesses( np.row_stack( ( x0, problem.x_guesses_full, ) ) ) # sample start points initial_state = UniformStartpoints( use_guesses=True, check_fval=True, check_grad=False, )( n_starts=self.nwalkers, problem=problem, ) # restore original guesses problem.set_x_guesses(x_guesses_full0) else: initial_state = self.METHOD_NAME( center=x0, problem=problem, ) self.state = initial_state def sample(self, n_samples: int, beta: float = 1.0) -> None: """Return the most recent sample state.""" self.state = self.sampler.run_mcmc( initial_state=self.state, nsteps=n_samples, **self.run_args, ) def get_samples(self) -> McmcPtResult: """Get the samples into the fitting pypesto format.""" # all walkers are concatenated, yielding a flat array trace_x = np.array([self.sampler.get_chain(flat=True)]) trace_neglogpost = np.array([-self.sampler.get_log_prob(flat=True)]) # the sampler does not know priors trace_neglogprior = np.full(trace_neglogpost.shape, np.nan) # the walkers all run on temperature 1 betas = np.array([1.0]) result = McmcPtResult( trace_x=trace_x, trace_neglogpost=trace_neglogpost, trace_neglogprior=trace_neglogprior, betas=betas, ) return result
    null
    381
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkoceanbasepro.endpoint import endpoint_data class DescribeSlowSQLListRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeSlowSQLList','oceanbase') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_StartTime(self): # String return self.get_body_params().get('StartTime') def set_StartTime(self, StartTime): # String self.add_body_params('StartTime', StartTime) def METHOD_NAME(self): # Integer return self.get_body_params().get('PageNumber') def set_PageNumber(self, PageNumber): # Integer self.add_body_params('PageNumber', PageNumber) def get_SearchRule(self): # String return self.get_body_params().get('SearchRule') def set_SearchRule(self, SearchRule): # String self.add_body_params('SearchRule', SearchRule) def get_TenantId(self): # String return self.get_body_params().get('TenantId') def set_TenantId(self, TenantId): # String self.add_body_params('TenantId', TenantId) def get_PageSize(self): # Integer return self.get_body_params().get('PageSize') def set_PageSize(self, PageSize): # Integer self.add_body_params('PageSize', PageSize) def get_SearchParameter(self): # String return self.get_body_params().get('SearchParameter') def set_SearchParameter(self, SearchParameter): # String self.add_body_params('SearchParameter', SearchParameter) def get_SortOrder(self): # String return self.get_body_params().get('SortOrder') def set_SortOrder(self, SortOrder): # String self.add_body_params('SortOrder', SortOrder) def get_SearchValue(self): # String return self.get_body_params().get('SearchValue') def set_SearchValue(self, SearchValue): # String self.add_body_params('SearchValue', SearchValue) def get_SQLId(self): # String return self.get_body_params().get('SQLId') def set_SQLId(self, SQLId): # String self.add_body_params('SQLId', SQLId) def get_FilterCondition(self): # String return self.get_body_params().get('FilterCondition') def set_FilterCondition(self, FilterCondition): # String self.add_body_params('FilterCondition', FilterCondition) def get_EndTime(self): # String return self.get_body_params().get('EndTime') def set_EndTime(self, EndTime): # String self.add_body_params('EndTime', EndTime) def get_NodeIp(self): # String return self.get_body_params().get('NodeIp') def set_NodeIp(self, NodeIp): # String self.add_body_params('NodeIp', NodeIp) def get_DbName(self): # String return self.get_body_params().get('DbName') def set_DbName(self, DbName): # String self.add_body_params('DbName', DbName) def get_SearchKeyWord(self): # String return self.get_body_params().get('SearchKeyWord') def set_SearchKeyWord(self, SearchKeyWord): # String self.add_body_params('SearchKeyWord', SearchKeyWord) def get_SortColumn(self): # String return self.get_body_params().get('SortColumn') def set_SortColumn(self, SortColumn): # String self.add_body_params('SortColumn', SortColumn)
    null
    382
    from functools import partial from unittest import ( TestCase, mock, ) from lxml import etree from pcs.lib.cib.resource import primitive from pcs.lib.cib.tools import IdProvider from pcs.lib.resource_agent import ResourceAgentName from pcs_test.tools.assertions import assert_xml_equal class FindPrimitivesByAgent(TestCase): # pylint: disable=protected-access def setUp(self): self.resources_section = etree.fromstring( """ <resources> <primitive class="standard" provider="provider" type="agent_type" id="r0" /> <primitive class="something" provider="provider" type="agent_type" id="r23" /> <primitive class="stonith" type="agent_type" id="r1"/> <primitive class="standard" provider="provider" type="dummy1" id="r123" /> <group> <primitive class="stonith" type="agent_type" id="r2"/> <primitive class="standard" provider="pacemaker" type="agent_type" id="r3" /> <primitive class="standard" provider="provider" type="agent_type" id="r4" /> </group> <clone> <group> <primitive class="standard" provider="provider" type="agent_type" id="r5" /> </group> </clone> <clone> <primitive class="standard" provider="provider" type="agent_type" id="r6" /> </clone> </resources> """ ) def test_stonith(self): results = primitive._find_primitives_by_agent( self.resources_section, ResourceAgentName( "stonith", None, "agent_type", ), ) expected_results = [ '<primitive class="stonith" type="agent_type" id="r1"/>', '<primitive class="stonith" type="agent_type" id="r2"/>', ] self.assertEqual(len(expected_results), len(results)) for i, res in enumerate(results): assert_xml_equal(expected_results[i], etree.tostring(res).decode()) def test_with_provider(self): results = primitive._find_primitives_by_agent( self.resources_section, ResourceAgentName( "standard", "provider", "agent_type", ), ) expected_results = [ """<primitive class="standard" provider="provider" type="agent_type" id="r0" />""", """<primitive class="standard" provider="provider" type="agent_type" id="r4" />""", """<primitive class="standard" provider="provider" type="agent_type" id="r5" />""", """<primitive class="standard" provider="provider" type="agent_type" id="r6" />""", ] self.assertEqual(len(expected_results), len(results)) for i, res in enumerate(results): assert_xml_equal(expected_results[i], etree.tostring(res).decode()) @mock.patch("pcs.lib.cib.resource.primitive.append_new_instance_attributes") @mock.patch("pcs.lib.cib.resource.primitive.append_new_meta_attributes") @mock.patch("pcs.lib.cib.resource.primitive.create_operations") class AppendNew(TestCase): def setUp(self): self.resources_section = etree.fromstring("<resources/>") self.instance_attributes = {"a": "b"} self.meta_attributes = {"c": "d"} self.operation_list = [{"name": "monitoring"}] self.id_provider = IdProvider(self.resources_section) self.run = partial( primitive.append_new, self.resources_section, self.id_provider, instance_attributes=self.instance_attributes, meta_attributes=self.meta_attributes, operation_list=self.operation_list, ) def check_mocks( self, primitive_element, create_operations, append_new_meta_attributes, append_new_instance_attributes, ): create_operations.assert_called_once_with( primitive_element, self.id_provider, self.operation_list ) append_new_meta_attributes.assert_called_once_with( primitive_element, self.meta_attributes, self.id_provider ) append_new_instance_attributes.assert_called_once_with( primitive_element, self.instance_attributes, self.id_provider ) def METHOD_NAME( self, create_operations, append_new_meta_attributes, append_new_instance_attributes, ): primitive_element = self.run("RESOURCE_ID", "OCF", None, "DUMMY") self.assertEqual( primitive_element, self.resources_section.find(".//primitive") ) self.assertEqual(primitive_element.attrib["class"], "OCF") self.assertEqual(primitive_element.attrib["type"], "DUMMY") self.assertFalse(primitive_element.attrib.has_key("provider")) self.check_mocks( primitive_element, create_operations, append_new_meta_attributes, append_new_instance_attributes, ) def test_append_with_provider( self, create_operations, append_new_meta_attributes, append_new_instance_attributes, ): primitive_element = self.run("RESOURCE_ID", "OCF", "HEARTBEAT", "DUMMY") self.assertEqual( primitive_element, self.resources_section.find(".//primitive") ) self.assertEqual(primitive_element.attrib["class"], "OCF") self.assertEqual(primitive_element.attrib["type"], "DUMMY") self.assertEqual(primitive_element.attrib["provider"], "HEARTBEAT") self.check_mocks( primitive_element, create_operations, append_new_meta_attributes, append_new_instance_attributes, )
    null
    383
    from itertools import product from website.notifications.emails import compile_subscriptions from website.notifications import utils, constants def get_file_subs_from_folder(addon, user, kind, path, name): """Find the file tree under a specified folder.""" folder = dict(kind=kind, path=path, name=name) file_tree = addon._get_file_tree(filenode=folder, user=user, version='latest-published') return list_of_files(file_tree) def list_of_files(file_object): files = [] if file_object['kind'] == 'file': return [file_object['path']] else: for child in file_object['children']: files.extend(list_of_files(child)) return files def compile_user_lists(files, user, source_node, node): """Take multiple file ids and compiles them. :param files: List of WaterButler paths :param user: User who initiated action/event :param source_node: Node instance from :param node: Node instance to :return: move, warn, and remove dicts """ # initialise subscription dictionaries move = {key: [] for key in constants.NOTIFICATION_TYPES} warn = {key: [] for key in constants.NOTIFICATION_TYPES} remove = {key: [] for key in constants.NOTIFICATION_TYPES} # get the node subscription if len(files) == 0: move, warn, remove = categorize_users( user, 'file_updated', source_node, 'file_updated', node ) # iterate through file subscriptions for file_path in files: path = file_path.strip('/') t_move, t_warn, t_remove = categorize_users( user, path + '_file_updated', source_node, path + '_file_updated', node ) # Add file subs to overall list of subscriptions for notification in constants.NOTIFICATION_TYPES: move[notification] = list(set(move[notification]).union(set(t_move[notification]))) warn[notification] = list(set(warn[notification]).union(set(t_warn[notification]))) remove[notification] = list(set(remove[notification]).union(set(t_remove[notification]))) return move, warn, remove def categorize_users(user, source_event, source_node, event, node): """Categorize users from a file subscription into three categories. Puts users in one of three bins: - Moved: User has permissions on both nodes, subscribed to both - Warned: User has permissions on both, not subscribed to destination - Removed: Does not have permission on destination node :param user: User instance who started the event :param source_event: <guid>_event_name :param source_node: node from where the event happened :param event: new guid event name :param node: node where event ends up :return: Moved, to be warned, and removed users. """ remove = utils.users_to_remove(source_event, source_node, node) source_node_subs = compile_subscriptions(source_node, utils.find_subscription_type(source_event)) new_subs = compile_subscriptions(node, utils.find_subscription_type(source_event), event) # Moves users into the warn bucket or the move bucket move = subscriptions_users_union(source_node_subs, new_subs) warn = subscriptions_users_difference(source_node_subs, new_subs) # Removes users without permissions warn, remove = METHOD_NAME(node, warn, remove) # Remove duplicates warn = subscriptions_users_remove_duplicates(warn, new_subs, remove_same=False) move = subscriptions_users_remove_duplicates(move, new_subs, remove_same=False) # Remove duplicates between move and warn; and move and remove move = subscriptions_users_remove_duplicates(move, warn, remove_same=True) move = subscriptions_users_remove_duplicates(move, remove, remove_same=True) for notifications in constants.NOTIFICATION_TYPES: # Remove the user who started this whole thing. user_id = user._id if user_id in warn[notifications]: warn[notifications].remove(user_id) if user_id in move[notifications]: move[notifications].remove(user_id) if user_id in remove[notifications]: remove[notifications].remove(user_id) return move, warn, remove def METHOD_NAME(node, warn_subscription, remove_subscription): for notification in constants.NOTIFICATION_TYPES: subbed, removed = utils.separate_users(node, warn_subscription[notification]) warn_subscription[notification] = subbed remove_subscription[notification].extend(removed) remove_subscription[notification] = list(set(remove_subscription[notification])) return warn_subscription, remove_subscription def subscriptions_users_union(emails_1, emails_2): return { notification: list( set(emails_1[notification]).union(set(emails_2[notification])) ) for notification in constants.NOTIFICATION_TYPES.keys() } def subscriptions_users_difference(emails_1, emails_2): return { notification: list( set(emails_1[notification]).difference(set(emails_2[notification])) ) for notification in constants.NOTIFICATION_TYPES.keys() } def subscriptions_users_remove_duplicates(emails_1, emails_2, remove_same=False): emails_list = dict(emails_1) product_list = product(constants.NOTIFICATION_TYPES, repeat=2) for notification_1, notification_2 in product_list: if notification_2 == notification_1 and not remove_same or notification_2 == 'none': continue emails_list[notification_1] = list( set(emails_list[notification_1]).difference(set(emails_2[notification_2])) ) return emails_list
    null
    384
    from datetime import datetime from flask import g, request from flask_appbuilder import ModelRestApi from flask_appbuilder.api import expose, safe from flask_appbuilder.const import API_RESULT_RES_KEY from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.security.decorators import permission_name, protect from flask_appbuilder.security.sqla.apis.user.schema import ( UserPostSchema, UserPutSchema, ) from flask_appbuilder.security.sqla.models import Role, User from marshmallow import ValidationError from sqlalchemy.exc import IntegrityError from werkzeug.security import generate_password_hash class UserApi(ModelRestApi): resource_name = "security/users" openapi_spec_tag = "Security Users" class_permission_name = "User" datamodel = SQLAInterface(User) allow_browser_login = True list_columns = [ "id", "roles.id", "roles.name", "first_name", "last_name", "username", "active", "email", "last_login", "login_count", "fail_login_count", "created_on", "changed_on", "created_by.id", "changed_by.id", ] show_columns = list_columns add_columns = [ "roles", "first_name", "last_name", "username", "active", "email", "password", ] edit_columns = add_columns search_columns = [ "username", "first_name", "last_name", "active", "email", "created_by", "changed_by", "roles", ] add_model_schema = UserPostSchema() edit_model_schema = UserPutSchema() def METHOD_NAME(self, item): item.changed_on = datetime.now() item.changed_by_fk = g.user.id if item.password: item.password = generate_password_hash(item.password) def pre_add(self, item): item.password = generate_password_hash(item.password) @expose("/", methods=["POST"]) @protect() @safe @permission_name("post") def post(self): """Create new user --- post: requestBody: description: Model schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' responses: 201: description: Item changed content: application/json: schema: type: object properties: result: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' """ try: item = self.add_model_schema.load(request.json) model = User() roles = [] for key, value in item.items(): if key != "roles": setattr(model, key, value) else: for role_id in item[key]: role = ( self.datamodel.session.query(Role) .filter(Role.id == role_id) .one_or_none() ) if role: role.user_id = model.id role.role_id = role_id roles.append(role) if "roles" in item.keys(): model.roles = roles self.pre_add(model) self.datamodel.add(model, raise_exception=True) return self.response(201, id=model.id) except ValidationError as error: return self.response_400(message=error.messages) except IntegrityError as e: return self.response_422(message=str(e.orig)) @expose("/<pk>", methods=["PUT"]) @protect() @safe @permission_name("put") def put(self, pk): """Edit user --- put: parameters: - in: path schema: type: integer name: pk requestBody: description: Model schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' responses: 200: description: Item changed content: application/json: schema: type: object properties: result: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' """ try: item = self.edit_model_schema.load(request.json) model = self.datamodel.get(pk, self._base_filters) roles = [] for key, value in item.items(): if key != "roles": setattr(model, key, value) else: for role_id in item[key]: role = ( self.datamodel.session.query(Role) .filter(Role.id == role_id) .one_or_none() ) if role: role.user_id = model.id role.role_id = role_id roles.append(role) if "roles" in item.keys(): model.roles = roles self.METHOD_NAME(model) self.datamodel.edit(model, raise_exception=True) return self.response( 200, **{API_RESULT_RES_KEY: self.edit_model_schema.dump(item, many=False)}, ) except ValidationError as e: return self.response_400(message=e.messages) except IntegrityError as e: return self.response_422(message=str(e.orig))
    null
    385
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkecd.endpoint import endpoint_data class ExportDesktopListInfoRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'ecd', '2020-09-30', 'ExportDesktopListInfo') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_OfficeSiteId(self): # String return self.get_query_params().get('OfficeSiteId') def set_OfficeSiteId(self, OfficeSiteId): # String self.add_query_param('OfficeSiteId', OfficeSiteId) def get_DesktopStatus(self): # String return self.get_query_params().get('DesktopStatus') def set_DesktopStatus(self, DesktopStatus): # String self.add_query_param('DesktopStatus', DesktopStatus) def get_NextToken(self): # String return self.get_query_params().get('NextToken') def set_NextToken(self, NextToken): # String self.add_query_param('NextToken', NextToken) def get_EndUserIds(self): # RepeatList return self.get_query_params().get('EndUserId') def set_EndUserIds(self, EndUserId): # RepeatList for depth1 in range(len(EndUserId)): self.add_query_param('EndUserId.' + str(depth1 + 1), EndUserId[depth1]) def get_DesktopIds(self): # RepeatList return self.get_query_params().get('DesktopId') def set_DesktopIds(self, DesktopId): # RepeatList for depth1 in range(len(DesktopId)): self.add_query_param('DesktopId.' + str(depth1 + 1), DesktopId[depth1]) def get_Tags(self): # RepeatList return self.get_query_params().get('Tag') def set_Tags(self, Tag): # RepeatList for depth1 in range(len(Tag)): if Tag[depth1].get('Value') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value')) if Tag[depth1].get('Key') is not None: self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key')) def get_DesktopName(self): # String return self.get_query_params().get('DesktopName') def set_DesktopName(self, DesktopName): # String self.add_query_param('DesktopName', DesktopName) def get_GroupId(self): # String return self.get_query_params().get('GroupId') def set_GroupId(self, GroupId): # String self.add_query_param('GroupId', GroupId) def get_ExpiredTime(self): # String return self.get_query_params().get('ExpiredTime') def set_ExpiredTime(self, ExpiredTime): # String self.add_query_param('ExpiredTime', ExpiredTime) def get_MaxResults(self): # Integer return self.get_query_params().get('MaxResults') def set_MaxResults(self, MaxResults): # Integer self.add_query_param('MaxResults', MaxResults) def get_LangType(self): # String return self.get_query_params().get('LangType') def set_LangType(self, LangType): # String self.add_query_param('LangType', LangType) def get_ChargeType(self): # String return self.get_query_params().get('ChargeType') def set_ChargeType(self, ChargeType): # String self.add_query_param('ChargeType', ChargeType) def METHOD_NAME(self): # String return self.get_query_params().get('PolicyGroupId') def set_PolicyGroupId(self, PolicyGroupId): # String self.add_query_param('PolicyGroupId', PolicyGroupId) def get_UserName(self): # String return self.get_query_params().get('UserName') def set_UserName(self, UserName): # String self.add_query_param('UserName', UserName)
    null
    386
    from datetime import datetime from unittest import mock import pytest import responses from api.share.utils import shtrove_ingest_url, sharev2_push_url from framework.auth.core import Auth from osf.models.spam import SpamStatus from osf.utils.permissions import READ, WRITE, ADMIN from osf_tests.factories import ( AuthUserFactory, ProjectFactory, SubjectFactory, PreprintFactory, PreprintProviderFactory, ) from website import settings from website.preprints.tasks import on_preprint_updated from ._utils import expect_preprint_ingest_request @pytest.mark.django_db @pytest.mark.enable_enqueue_task class TestPreprintShare: @pytest.fixture(scope='class', autouse=True) def METHOD_NAME(self): with mock.patch.object(settings, 'USE_CELERY', False): yield @pytest.fixture def user(self): return AuthUserFactory() @pytest.fixture def auth(self, user): return Auth(user=user) @pytest.fixture def provider(self): return PreprintProviderFactory( name='Lars Larson Snowmobiling Experience', access_token='Snowmobiling' ) @pytest.fixture def project(self, user, mock_share_responses): return ProjectFactory(creator=user, is_public=True) @pytest.fixture def subject(self): return SubjectFactory(text='Subject #1') @pytest.fixture def subject_two(self): return SubjectFactory(text='Subject #2') @pytest.fixture def preprint(self, project, user, provider, subject): return PreprintFactory( creator=user, filename='second_place.pdf', provider=provider, subjects=[[subject._id]], project=project, is_published=False ) def test_save_unpublished_not_called(self, mock_share_responses, preprint): # expecting no ingest requests (delete or otherwise) with expect_preprint_ingest_request(mock_share_responses, preprint, count=0): preprint.save() def test_save_published_called(self, mock_share_responses, preprint, user, auth): with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.set_published(True, auth=auth, save=True) # This covers an edge case where a preprint is forced back to unpublished # that it sends the information back to share def test_save_unpublished_called_forced(self, mock_share_responses, auth, preprint): with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.set_published(True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True): preprint.is_published = False preprint.save(**{'force_update': True}) def test_save_published_subject_change_called(self, mock_share_responses, auth, preprint, subject, subject_two): preprint.set_published(True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.set_subjects([[subject_two._id]], auth=auth) def test_save_unpublished_subject_change_not_called(self, mock_share_responses, auth, preprint, subject_two): with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True): preprint.set_subjects([[subject_two._id]], auth=auth) def test_send_to_share_is_true(self, mock_share_responses, auth, preprint): preprint.set_published(True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint): on_preprint_updated(preprint._id, saved_fields=['title']) def test_preprint_contributor_changes_updates_preprints_share(self, mock_share_responses, user, auth): preprint = PreprintFactory(is_published=True, creator=user) preprint.set_published(True, auth=auth, save=True) user2 = AuthUserFactory() with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.add_contributor(contributor=user2, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.move_contributor(contributor=user, index=0, auth=auth, save=True) data = [{'id': user._id, 'permissions': ADMIN, 'visible': True}, {'id': user2._id, 'permissions': WRITE, 'visible': False}] with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.manage_contributors(data, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.update_contributor(user2, READ, True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.remove_contributor(contributor=user2, auth=auth) def test_call_async_update_on_500_failure(self, mock_share_responses, preprint, auth): mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=500) mock_share_responses.replace(responses.POST, sharev2_push_url(), status=500) preprint.set_published(True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint, count=5): preprint.update_search() def test_no_call_async_update_on_400_failure(self, mock_share_responses, preprint, auth): mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=400) mock_share_responses.replace(responses.POST, sharev2_push_url(), status=400) preprint.set_published(True, auth=auth, save=True) with expect_preprint_ingest_request(mock_share_responses, preprint, count=1): preprint.update_search() def test_delete_from_share(self, mock_share_responses): preprint = PreprintFactory() with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.update_search() preprint.date_withdrawn = datetime.now() preprint.save() with expect_preprint_ingest_request(mock_share_responses, preprint): preprint.update_search() preprint.spam_status = SpamStatus.SPAM preprint.save() with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True): preprint.update_search()
    null
    387
    import pytest pytestmark = [ pytest.mark.django_db, pytest.mark.usefixtures("purchase"), ] @pytest.fixture def METHOD_NAME(another_user, another_answer, question): another_answer.question = question another_answer.author = another_user another_answer.save() return another_answer @pytest.mark.freeze_time("2022-10-09 10:30:12+12:00") # +12 hours kamchatka timezone @pytest.mark.usefixtures("kamchatka_timezone") def test_ok(api, question, answer): got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got[0]) == 9 assert got[0]["created"] == "2022-10-09T10:30:12+12:00" assert got[0]["modified"] == "2022-10-09T10:30:12+12:00" assert got[0]["slug"] == str(answer.slug) assert got[0]["question"] == str(answer.question.slug) assert "<em>test</em>" in got[0]["text"] assert got[0]["src"] == "*test*" assert got[0]["author"]["uuid"] == str(api.user.uuid) assert got[0]["author"]["first_name"] == api.user.first_name assert got[0]["author"]["last_name"] == api.user.last_name assert got[0]["has_descendants"] is False assert got[0]["reactions"] == [] def test_has_reaction_fields_if_there_is_reaction(api, question, answer, reaction): got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] reactions = got[0]["reactions"] assert len(reactions[0]) == 4 assert reactions[0]["emoji"] == reaction.emoji assert reactions[0]["slug"] == str(reaction.slug) assert reactions[0]["answer"] == str(reaction.answer.slug) assert reactions[0]["author"]["uuid"] == str(reaction.author.uuid) assert reactions[0]["author"]["first_name"] == reaction.author.first_name assert reactions[0]["author"]["last_name"] == reaction.author.last_name def test_has_descendants_is_true_if_answer_has_children(api, question, answer, another_answer): another_answer.parent = answer another_answer.save() got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert got[0]["has_descendants"] is True def test_nplusone(api, question, answer, another_answer, django_assert_num_queries, mixer): for _ in range(5): mixer.blend("homework.Reaction", author=api.user, answer=answer) mixer.blend("homework.Reaction", author=api.user, answer=another_answer) with django_assert_num_queries(7): api.get(f"/api/v2/homework/answers/?question={question.slug}") @pytest.mark.usefixtures("answer") def test_answers_from_other_questions_are_excluded(api, another_question): got = api.get(f"/api/v2/homework/answers/?question={another_question.slug}")["results"] assert len(got) == 0 def test_non_root_answers_are_excluded(api, question, answer, METHOD_NAME): answer.parent = METHOD_NAME answer.save() got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got) == 1 # only answer_from_another_user assert got[0]["slug"] == str(METHOD_NAME.slug) @pytest.mark.usefixtures("answer", "answer_from_another_user") def test_answers_from_other_questions_are_excluded_even_if_user_has_the_permission(api, another_question): api.user.add_perm("homework.answer.see_all_answers") got = api.get(f"/api/v2/homework/answers/?question={another_question.slug}")["results"] assert len(got) == 0 @pytest.mark.usefixtures("answer_from_another_user") def test_answers_from_another_authors_are_excluded(api, question): got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got) == 0 def test_answers_from_another_authors_are_included_if_already_seen(api, mixer, question, METHOD_NAME): mixer.blend("homework.AnswerAccessLogEntry", user=api.user, answer=METHOD_NAME) got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got) == 1 def test_answers_from_another_authors_are_excluded_if_author_is_filtered(api, mixer, question, METHOD_NAME): mixer.blend("homework.AnswerAccessLogEntry", user=api.user, answer=METHOD_NAME) got = api.get(f"/api/v2/homework/answers/?question={question.slug}&author={api.user.uuid}")["results"] assert len(got) == 0 def test_access_log_entries_from_another_users_do_not_break_the_select(api, mixer, question, answer): mixer.cycle(5).blend("homework.AnswerAccessLogEntry", question=question, answer=answer) got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got) == 1 @pytest.mark.usefixtures("answer_from_another_user") def test_users_with_permission_may_see_all_answers(api, question): api.user.add_perm("homework.answer.see_all_answers") got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"] assert len(got) == 1 def test_no_anon(anon, question): anon.get(f"/api/v2/homework/answers/?question={question.slug}", expected_status_code=401) @pytest.mark.parametrize( "disable_pagination_value", [ "True", "true", "1", ], ) def test_pagination_could_be_disable_with_query_param(api, question, answer, disable_pagination_value): got = api.get(f"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}") assert len(got) == 1 assert got[0]["slug"] == str(answer.slug) @pytest.mark.parametrize( "disable_pagination_value", [ "false", "False", "any-other-value", ], ) def test_paginated_response_with_disable_pagination_false_or_invalid_value(api, question, answer, disable_pagination_value): got = api.get(f"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}") assert "results" in got assert "count" in got assert len(got["results"]) == 1
    null
    388
    from galaxy import model from galaxy.util.unittest import TestCase from galaxy.workflow import extract UNDEFINED_JOB = object() class TestWorkflowExtractSummary(TestCase): def setUp(self): self.history = MockHistory() self.trans = MockTrans(self.history) def METHOD_NAME(self): job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert not job_dict def test_summarize_returns_name_and_dataset_list(self): # Create two jobs and three datasets, test they are groupped # by job correctly with correct output names. hda1 = MockHda() self.history.active_datasets.append(hda1) hda2 = MockHda(job=hda1.job, output_name="out2") self.history.active_datasets.append(hda2) hda3 = MockHda(output_name="out3") self.history.active_datasets.append(hda3) job_dict, warnings = extract.summarize(trans=self.trans) assert len(job_dict) == 2 assert not warnings assert job_dict[hda1.job] == [("out1", hda1), ("out2", hda2)] assert job_dict[hda3.job] == [("out3", hda3)] def test_finds_original_job_if_copied(self): hda = MockHda() derived_hda_1 = MockHda() derived_hda_1.copied_from_history_dataset_association = hda derived_hda_2 = MockHda() derived_hda_2.copied_from_history_dataset_association = derived_hda_1 self.history.active_datasets.append(derived_hda_2) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 assert job_dict[hda.job] == [("out1", derived_hda_2)] def test_fake_job_hda(self): """Fakes job if creating_job_associations is empty.""" hda = MockHda(job=UNDEFINED_JOB) self.history.active_datasets.append(hda) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 fake_job = next(iter(job_dict.keys())) assert fake_job.id.startswith("fake_") datasets = next(iter(job_dict.values())) assert datasets == [(None, hda)] def test_fake_job_hda_name_guess(self): hda_from_history = MockHda(job=UNDEFINED_JOB) hda_from_history.copied_from_history_dataset_association = MockHda(job=UNDEFINED_JOB) self.history.active_datasets.append(hda_from_history) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 fake_job = next(iter(job_dict.keys())) assert "History" in fake_job.name self.history.active_datasets.remove(hda_from_history) hda_from_library = MockHda(job=UNDEFINED_JOB) hda_from_library.copied_from_library_dataset_dataset_association = MockHda(job=UNDEFINED_JOB) self.history.active_datasets.append(hda_from_library) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 fake_job = next(iter(job_dict.keys())) assert "Library" in fake_job.name def test_fake_job_hdca(self): hdca = MockHdca() self.history.active_datasets.append(hdca) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 fake_job = next(iter(job_dict.keys())) assert fake_job.id.startswith("fake_") assert fake_job.is_fake content_instances = next(iter(job_dict.values())) assert content_instances == [(None, hdca)] def test_implicit_map_job_hdca(self): creating_job = model.Job() hdca = MockHdca(implicit_output_name="out1", job=creating_job) self.history.active_datasets.append(hdca) job_dict, warnings = extract.summarize(trans=self.trans) assert not warnings assert len(job_dict) == 1 job = next(iter(job_dict.keys())) assert job is creating_job def test_warns_and_skips_datasets_if_not_finished(self): hda = MockHda(state="queued") self.history.active_datasets.append(hda) job_dict, warnings = extract.summarize(trans=self.trans) assert warnings assert len(job_dict) == 0 class MockJobToOutputDatasetAssociation: job = None def __init__(self, name, dataset): self.name = name self.dataset = dataset class MockHistory: def __init__(self): self.active_datasets = [] @property def active_contents(self): return self.active_datasets class MockTrans: def __init__(self, history): self.history = history def get_history(self): return self.history class MockHda: def __init__(self, state="ok", output_name="out1", job=None): self.hid = 1 self.id = 123 self.state = state self.copied_from_history_dataset_association = None self.copied_from_library_dataset_dataset_association = None self.history_content_type = "dataset" if job is not UNDEFINED_JOB: if not job: job = model.Job() self.job = job assoc = MockJobToOutputDatasetAssociation(output_name, self) assoc.job = job self.creating_job_associations = [assoc] else: self.creating_job_associations = [] class MockHdca: def __init__(self, implicit_output_name=None, job=None, hid=1): self.id = 124 self.copied_from_history_dataset_collection_association = None self.history_content_type = "dataset_collection" self.implicit_output_name = implicit_output_name self.hid = 1 self.collection = model.DatasetCollection() self.creating_job_associations = [] element = model.DatasetCollectionElement( collection=self.collection, element=model.HistoryDatasetAssociation(), element_index=0, element_identifier="moocow", ) element.dataset_instance.dataset = model.Dataset() element.dataset_instance.dataset.state = "ok" creating = model.JobToOutputDatasetAssociation( implicit_output_name, element.dataset_instance, ) creating.job = job element.dataset_instance.creating_job_associations = [ creating, ]
    null
    389
    # -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def METHOD_NAME(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
    null
    390
    # Copyright 2021-2023 AIPlan4EU project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from warnings import warn import unified_planning as up from unified_planning.model.types import _UserType from unified_planning.exceptions import UPProblemDefinitionError, UPValueError from typing import List, Dict, Optional, cast class UserTypesSetMixin: """ This class is a mixin that contains a `set` of `user types` with some related methods. NOTE: when this mixin is used in combination with other mixins that share some of the attributes (e.g. `has_name_method`), it is required to pass the very same arguments to the mixins constructors. """ def __init__(self, env, has_name_method): self._env = env self._has_name_method = has_name_method self._user_types: List["up.model.types.Type"] = [] # The field _user_types_hierarchy stores the information about the types and the list of their sons. self._user_types_hierarchy: Dict[ Optional["up.model.types.Type"], List["up.model.types.Type"] ] = {} def _add_user_type(self, type: "up.model.types.Type"): """This method adds a Type, together with all it's ancestors, to the user_types_hierarchy""" assert type.is_user_type() if type not in self._user_types: ut = cast(_UserType, type) if self._has_name_method(ut.name): msg = f"The type name {ut.name} is already used in the problem! Different elements of a problem can have the same name if the environment flag error_used_name is disabled." if self._env.error_used_name or any( ut.name == cast(_UserType, t).name for t in self._user_types ): raise UPProblemDefinitionError(msg) else: warn(msg) if ut.father is not None: self._add_user_type(ut.father) self._user_types.append(type) @property def user_types(self) -> List["up.model.types.Type"]: """Returns the `list` of all the `user types` in the `problem`.""" return self._user_types def user_type(self, name: str) -> "up.model.types.Type": """ Returns the `user type` in the `problem` with the given `name`. :param name: The target `name` for the `type`. :return: The `type` in the `problem` with the given `name`. """ for ut in self.user_types: assert ut.is_user_type() if cast(_UserType, ut).name == name: return ut raise UPValueError(f"UserType {name} is not defined!") def METHOD_NAME(self, name: str) -> bool: """ Returns `True` if the `type` with the given `name` is defined in the `problem`, `False`, otherwise. :param name: The target `name` for the `type`. :return: `True` if a `type` with the given `name` is in the `problem`, `False` otherwise. """ for ut in self.user_types: assert ut.is_user_type() if cast(_UserType, ut).name == name: return True return False @property def user_types_hierarchy( self, ) -> Dict[Optional["up.model.types.Type"], List["up.model.types.Type"]]: """ Returns a `Dict` where every `key` represents an `Optional Type` and the `value` associated to the `key` is the `List` of the `direct sons` of the `Optional Type`. All the `user types` corresponding to the 'None' key are fatherless. """ res: Dict[Optional["up.model.types.Type"], List["up.model.types.Type"]] = {} for t in self._user_types: if t not in res: res[t] = [] f = cast(_UserType, t).father if f not in res: res[f] = [t] else: res[f].append(t) return res def __eq__(self, other): return isinstance(other, UserTypesSetMixin) and set(self._user_types) == set( other._user_types ) def __hash__(self): return sum(map(hash, self._user_types)) def _clone_to(self, other: "UserTypesSetMixin"): other._user_types = self._user_types[:] other._user_types_hierarchy = self._user_types_hierarchy.copy()
    null
    391
    #!/usr/bin/env python ## @ CommonUtility.py # Common utility script # # Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## ## # Import Modules # import os import sys import re import shutil import subprocess import struct import hashlib import string from ctypes import * from functools import reduce from importlib.machinery import SourceFileLoader def print_bytes (data, indent=0, offset=0, show_ascii = False): bytes_per_line = 16 printable = ' ' + string.ascii_letters + string.digits + string.punctuation str_fmt = '{:s}{:04x}: {:%ds} {:s}' % (bytes_per_line * 3) bytes_per_line data_array = bytearray(data) for idx in range(0, len(data_array), bytes_per_line): hex_str = ' '.join('%02X' % val for val in data_array[idx:idx + bytes_per_line]) asc_str = ''.join('%c' % (val if (chr(val) in printable) else '.') for val in data_array[idx:idx + bytes_per_line]) print (str_fmt.format(indent * ' ', offset + idx, hex_str, ' ' + asc_str if show_ascii else '')) def get_bits_from_bytes (bytes, start, length): if length == 0: return 0 byte_start = (start) // 8 byte_end = (start + length - 1) // 8 bit_start = start & 7 mask = (1 << length) - 1 val = bytes_to_value (bytes[byte_start:byte_end + 1]) val = (val >> bit_start) & mask return val def set_bits_to_bytes (bytes, start, length, bvalue): if length == 0: return byte_start = (start) // 8 byte_end = (start + length - 1) // 8 bit_start = start & 7 mask = (1 << length) - 1 val = bytes_to_value (bytes[byte_start:byte_end + 1]) val &= ~(mask << bit_start) val |= ((bvalue & mask) << bit_start) bytes[byte_start:byte_end+1] = value_to_bytearray (val, byte_end + 1 - byte_start) def value_to_bytes (value, length): return value.to_bytes(length, 'little') def bytes_to_value (bytes): return int.from_bytes (bytes, 'little') def value_to_bytearray (value, length): return bytearray(value_to_bytes(value, length)) def value_to_bytearray (value, length): return bytearray(value_to_bytes(value, length)) def get_aligned_value (value, alignment = 4): if alignment != (1 << (alignment.bit_length() - 1)): raise Exception ('Alignment (0x%x) should to be power of 2 !' % alignment) value = (value + (alignment - 1)) & ~(alignment - 1) return value def get_padding_length (data_len, alignment = 4): new_data_len = get_aligned_value (data_len, alignment) return new_data_len - data_len def METHOD_NAME (file, mode = 'rb'): return open(file, mode).read() def gen_file_from_object (file, object): open (file, 'wb').write(object) def gen_file_with_size (file, size): open (file, 'wb').write(b'\xFF' * size); def check_files_exist (base_name_list, dir = '', ext = ''): for each in base_name_list: if not os.path.exists (os.path.join (dir, each + ext)): return False return True def load_source (name, filepath): mod = SourceFileLoader (name, filepath).load_module() return mod def get_openssl_path (): if os.name == 'nt': if 'OPENSSL_PATH' not in os.environ: openssl_dir = "C:\\Openssl\\bin\\" if os.path.exists (openssl_dir): os.environ['OPENSSL_PATH'] = openssl_dir else: os.environ['OPENSSL_PATH'] = "C:\\Openssl\\" if 'OPENSSL_CONF' not in os.environ: openssl_cfg = "C:\\Openssl\\openssl.cfg" if os.path.exists(openssl_cfg): os.environ['OPENSSL_CONF'] = openssl_cfg openssl = os.path.join(os.environ.get ('OPENSSL_PATH', ''), 'openssl.exe') else: # Get openssl path for Linux cases openssl = shutil.which('openssl') return openssl def run_process (arg_list, print_cmd = False, capture_out = False): sys.stdout.flush() if os.name == 'nt' and os.path.splitext(arg_list[0])[1] == '' and \ os.path.exists (arg_list[0] + '.exe'): arg_list[0] += '.exe' if print_cmd: print (' '.join(arg_list)) exc = None result = 0 output = '' try: if capture_out: output = subprocess.check_output(arg_list).decode() else: result = subprocess.call (arg_list) except Exception as ex: result = 1 exc = ex if result: if not print_cmd: print ('Error in running process:\n %s' % ' '.join(arg_list)) if exc is None: sys.exit(1) else: raise exc return output
    null
    392
    # Copyright (C) 2015-2021 Regents of the University of California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Type if TYPE_CHECKING: from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem logger = logging.getLogger(__name__) def aws_batch_batch_system_factory(): from toil.batchSystems.awsBatch import AWSBatchBatchSystem return AWSBatchBatchSystem def gridengine_batch_system_factory(): from toil.batchSystems.gridengine import GridEngineBatchSystem return GridEngineBatchSystem def parasol_batch_system_factory(): from toil.batchSystems.parasol import ParasolBatchSystem return ParasolBatchSystem def lsf_batch_system_factory(): from toil.batchSystems.lsf import LSFBatchSystem return LSFBatchSystem def single_machine_batch_system_factory(): from toil.batchSystems.singleMachine import SingleMachineBatchSystem return SingleMachineBatchSystem def mesos_batch_system_factory(): from toil.batchSystems.mesos.batchSystem import MesosBatchSystem return MesosBatchSystem def slurm_batch_system_factory(): from toil.batchSystems.slurm import SlurmBatchSystem return SlurmBatchSystem def tes_batch_system_factory(): from toil.batchSystems.tes import TESBatchSystem return TESBatchSystem def torque_batch_system_factory(): from toil.batchSystems.torque import TorqueBatchSystem return TorqueBatchSystem def htcondor_batch_system_factory(): from toil.batchSystems.htcondor import HTCondorBatchSystem return HTCondorBatchSystem def kubernetes_batch_system_factory(): from toil.batchSystems.kubernetes import KubernetesBatchSystem return KubernetesBatchSystem BATCH_SYSTEM_FACTORY_REGISTRY: Dict[str, Callable[[], Type["AbstractBatchSystem"]]] = { 'aws_batch' : aws_batch_batch_system_factory, 'parasol' : parasol_batch_system_factory, 'single_machine' : single_machine_batch_system_factory, 'grid_engine' : gridengine_batch_system_factory, 'lsf' : lsf_batch_system_factory, 'mesos' : mesos_batch_system_factory, 'slurm' : slurm_batch_system_factory, 'tes' : tes_batch_system_factory, 'torque' : torque_batch_system_factory, 'htcondor' : htcondor_batch_system_factory, 'kubernetes' : kubernetes_batch_system_factory } BATCH_SYSTEMS = list(BATCH_SYSTEM_FACTORY_REGISTRY.keys()) DEFAULT_BATCH_SYSTEM = 'single_machine' def addBatchSystemFactory(key: str, batchSystemFactory: Callable[[], Type['AbstractBatchSystem']]): """ Adds a batch system to the registry for workflow-supplied batch systems. """ BATCH_SYSTEMS.append(key) BATCH_SYSTEM_FACTORY_REGISTRY[key] = batchSystemFactory # We need a snapshot save/restore system for testing. We can't just tamper with # the globals because module-level globals are their own references, so we # can't touch this module's global name bindings from a client module. def METHOD_NAME() -> Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]: """ Return a snapshot of the plugin registry that can be restored to remove added plugins. Useful for testing the plugin system in-process with other tests. """ snapshot = (list(BATCH_SYSTEMS), dict(BATCH_SYSTEM_FACTORY_REGISTRY)) return snapshot def restore_batch_system_plugin_state(snapshot: Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]): """ Restore the batch system registry state to a snapshot from save_batch_system_plugin_state(). """ # We need to apply the snapshot without rebinding the names, because that # won't affect modules that imported the names. wanted_batch_systems, wanted_registry = snapshot BATCH_SYSTEMS.clear() BATCH_SYSTEMS.extend(wanted_batch_systems) BATCH_SYSTEM_FACTORY_REGISTRY.clear() BATCH_SYSTEM_FACTORY_REGISTRY.update(wanted_registry)
    null
    393
    #/*########################################################################## # Copyright (C) 2004-2022 European Synchrotron Radiation Facility # # This file is part of the PyMca X-ray Fluorescence Toolkit developed at # the ESRF. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #############################################################################*/ __author__ = "V.A. Sole - ESRF" __contact__ = "[email protected]" __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" __doc__ = """ This plugin open a plot window with a browser to browse all images in the stack. A averaging filter with a configurable width is provided, to display an average of several consecutive frames rather than a single frame. The plot has also mask tools synchronized with the mask in the primary window. """ import logging from PyMca5 import StackPluginBase from PyMca5.PyMcaGui.pymca import StackBrowser from PyMca5.PyMcaGui import PyMca_Icons _logger = logging.getLogger(__name__) class StackBrowserPlugin(StackPluginBase.StackPluginBase): def __init__(self, stackWindow, **kw): if _logger.getEffectiveLevel() == logging.DEBUG: StackPluginBase.pluginBaseLogger.setLevel(logging.DEBUG) StackPluginBase.StackPluginBase.__init__(self, stackWindow, **kw) self.methodDict = {'Show':[self.METHOD_NAME, "Show Stack Image Browser", PyMca_Icons.brushselect]} self.__methodKeys = ['Show'] self.widget = None def stackUpdated(self): _logger.debug("StackBrowserPlugin.stackUpdated() called") if self.widget is None: return if self.widget.isHidden(): return stack = self.getStackDataObject() self.widget.setStackDataObject(stack, stack_name="Stack Index") self.widget.setBackgroundImage(self._getBackgroundImage()) mask = self.getStackSelectionMask() self.widget.setSelectionMask(mask) def _getBackgroundImage(self): images, names = self.getStackROIImagesAndNames() B = None for key in names: if key.endswith("ackground"): B = images[names.index(key)] return B def selectionMaskUpdated(self): if self.widget is None: return if self.widget.isHidden(): return mask = self.getStackSelectionMask() self.widget.setSelectionMask(mask) def stackROIImageListUpdated(self): if self.widget is None: return self.widget.setBackgroundImage(self._getBackgroundImage()) def mySlot(self, ddict): _logger.debug("mySlot %s %s", ddict['event'], ddict.keys()) if ddict['event'] == "selectionMaskChanged": self.setStackSelectionMask(ddict['current']) elif ddict['event'] == "addImageClicked": self.addImage(ddict['image'], ddict['title']) elif ddict['event'] == "removeImageClicked": self.removeImage(ddict['title']) elif ddict['event'] == "replaceImageClicked": self.replaceImage(ddict['image'], ddict['title']) elif ddict['event'] == "resetSelection": self.setStackSelectionMask(None) #Methods implemented by the plugin def getMethods(self): return self.__methodKeys def getMethodToolTip(self, name): return self.methodDict[name][1] def getMethodPixmap(self, name): return self.methodDict[name][2] def applyMethod(self, name): return self.methodDict[name][0]() def METHOD_NAME(self): if self.widget is None: self.widget = StackBrowser.StackBrowser(parent=None, rgbwidget=None, selection=True, colormap=True, imageicons=True, standalonesave=True, profileselection=True) self.widget.setSelectionMode(True) qt = StackBrowser.qt self.widget.sigMaskImageWidgetSignal.connect(self.mySlot) #Show self.widget.show() self.widget.raise_() #update self.stackUpdated() MENU_TEXT = "Stack Image Browser" def getStackPluginInstance(stackWindow, **kw): ob = StackBrowserPlugin(stackWindow) return ob
    null
    394
    #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2023, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for FirefoxGetLocaleAndVersion class ''' # Standard imports import configparser import glob import os import plistlib # AutoPkg imports # pylint: disable = import-error from autopkglib.DmgMounter import DmgMounter from autopkglib import ProcessorError __all__ = ['FirefoxGetLocaleAndVersion'] __version__ = '1.0' # pylint: disable = too-few-public-methods class FirefoxGetLocaleAndVersion(DmgMounter): ''' Returns the locale and version of the Firefox.app passed to dmg_path Raising if Firefox.app not located at dmg_path. Based off of: https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/AppDmgVersioner.py#L69-L86 ''' description = __doc__ input_variables = { 'choosen_locale': { 'required': True, 'description': ('Value of LOCALE in the override.'), }, 'dmg_path': { 'required': True, 'description': ('Path to the downloaded DMG.'), } } output_variables = { 'app_locale': { 'description': ('Locale of the .app.'), }, 'app_version': { 'description': ('Version of the .app.'), }, } def METHOD_NAME(self, path): ''' Find app bundle at path ''' # Look for any .app in the mounted dmg apps = glob.glob(os.path.join(path, "*.app")) # Raise if no .app found if len(apps) == 0: raise ProcessorError("No app found in dmg") # Return 1st found .app only return apps[0] def main(self): ''' See docstring for the FirefoxGetLocaleAndVersion class ''' # Mount the image. mount_point = self.mount(self.env["dmg_path"]) # Wrap all other actions in a try/finally so the image is always # unmounted. try: # Get the path the the .app in the DMG, raise if no .app found app_path = self.METHOD_NAME(mount_point) self.output(f"app_path = {app_path}") # Get the path to locale.ini, if doesn't exist and LOCALE is en-US we're good app_locale_ini = os.path.join(app_path, 'Contents/Resources/locale.ini') self.output(f"Looking for {app_locale_ini}...") # Get the .app's locale, if app_locale_ini exists if os.path.exists(app_locale_ini): # Progress notification self.output(f"Found: {app_locale_ini}...") # Try Read in the locale, raise if cannot be parsed try: # Create confgparser object app_config = configparser.ConfigParser() app_config.read(app_locale_ini) # Setting app_locale self.env['app_locale'] = app_config['locale']['locale'] # Raise if app_locale cannot be retrieved from app_locale_ini except Exception as locale_parse_error: raise ProcessorError("Cannot determine app_locale") from locale_parse_error # en-US doesn't have a app_locale_ini, so if selected then elif self.env["choosen_locale"] == 'en-US': # Setting app_locale self.env['app_locale'] = 'en-US' self.output(f"Setting app_locale to \"en-US\", as {app_locale_ini} does " f"not exist for the \"en-US\" locale") # Raise if we can't find app_locale_ini and choosen_locale isn't en-US else: raise ProcessorError(f"Cannot find {app_locale_ini}") # Progress notification self.output(f"app_locale: {self.env['app_locale']}") # Now we need to get the version app_info_plist = os.path.join(app_path, 'Contents/Info.plist') # If the info.plist exists if os.path.exists(app_info_plist): # Try to read in app_info_plist, raise if cannot be parsed try: # Read in the plist with open(app_info_plist, "rb") as plist_file: parsed_plist = plistlib.load(plist_file) # Get version from info.plist self.env['app_version'] = parsed_plist['CFBundleShortVersionString'] self.output(f"app_version: {self.env['app_version']}") # Raising if plist cannot be parsed or version determined from plist except Exception as info_plist_error: raise ProcessorError(f"Cannot parse {app_info_plist}") from info_plist_error # Raise if we can't find app_info_plist else: raise ProcessorError(f"Cannot find {app_info_plist}") # Unmount the dmg finally: self.output("unmounting...") self.unmount(self.env["dmg_path"]) if __name__ == '__main__': PROCESSOR = FirefoxGetLocaleAndVersion()
    null
    395
    import logging import os import subprocess import time from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional from fastapi import FastAPI, HTTPException from meerkat.interactive.server import Server from meerkat.tools.utils import WeakMapping if TYPE_CHECKING: from meerkat.interactive.modification import Modification from meerkat.mixins.identifiable import IdentifiableMixin logger = logging.getLogger(__name__) @dataclass class Secrets: api_keys: Dict[str, str] = field(default_factory=dict) def add(self, api: str, api_key: str): self.api_keys[api] = api_key def get(self, api: str): try: return self.api_keys[api] except KeyError: raise HTTPException( status_code=404, detail=f"No API key found for {api}.\ Add one with `secrets.add(api, api_key)`.", ) @dataclass class LanguageModel: manifest: Any = None def set(self, client: str = "ai21", engine: str = "j1-jumbo"): from manifest import Manifest self.manifest = Manifest( client_name=client, client_connection=state.secrets.get(client), engine=engine, cache_name="sqlite", cache_connection="./logs", ) def get(self): return self.manifest @dataclass class APIInfo: api: Optional[FastAPI] port: Optional[int] server: Optional[Server] = None name: str = "localhost" shared: bool = False process: Optional[subprocess.Popen] = None _url: Optional[str] = None @property def url(self): if self._url: return self._url if self.shared: return f"http://{self.name}" return f"http://{self.name}:{self.port}" @property def METHOD_NAME(self): return f"{self.url}/docs" @property def docs(self): from IPython.display import IFrame return IFrame(self.METHOD_NAME, width=800, height=600) @dataclass class FrontendInfo: package_manager: Optional[str] port: Optional[int] name: str = "localhost" shared: bool = False process: Optional[subprocess.Popen] = None _url: Optional[str] = None @property def url(self): if self._url: return self._url if self.shared: return f"http://{self.name}" return f"http://{self.name}:{self.port}" @dataclass class Identifiables: """We maintain a separate group for each type of identifiable object. Objects in the group are identified by a unique id. """ columns: WeakMapping = field(default_factory=WeakMapping) dataframes: WeakMapping = field(default_factory=WeakMapping) pages: Mapping = field(default_factory=dict) slicebys: WeakMapping = field(default_factory=WeakMapping) aggregations: WeakMapping = field(default_factory=WeakMapping) box_operations: WeakMapping = field(default_factory=WeakMapping) components: WeakMapping = field(default_factory=WeakMapping) refs: WeakMapping = field(default_factory=WeakMapping) stores: WeakMapping = field(default_factory=WeakMapping) endpoints: WeakMapping = field(default_factory=WeakMapping) routers: WeakMapping = field(default_factory=WeakMapping) nodes: WeakMapping = field(default_factory=WeakMapping) states: WeakMapping = field(default_factory=WeakMapping) def add(self, obj: "IdentifiableMixin"): group = getattr(self, obj.identifiable_group) group[obj.id] = obj def get(self, id: str, group: str): group, group_name = getattr(self, group), group try: value = group[id] except KeyError: raise HTTPException( status_code=404, detail=f"No object in group '{group_name}' with id '{id}'", ) return value @dataclass class ModificationQueue: """A queue of modifications to be applied to a dataframe.""" queue: List["Modification"] = field(default_factory=list) # Boolean attribute that controls whether the queue is accepting new # modifications # When _ready is False, `add` will no-op _ready: bool = False def add(self, modification: "Modification"): if self._ready: logger.debug(f"Adding modification {modification} to queue.") self.queue.append(modification) return # Do nothing if not ready logger.debug(f"Modification queue not ready. Ignoring {modification}.") def clear(self) -> List["Modification"]: """Clear the modification queue, and return the old queue.""" logger.debug("Clearing modification queue.") current_queue = self.queue self.queue = [] return current_queue def ready(self): """Ready the queue for accepting new modifications.""" count = 0 while self._ready: # Modification queue is already in use # Wait for it to be unready logger.debug("Modification queue is already in use. Waiting...") time.sleep(0.1) count += 1 if count == 1e-3: logger.warn( "Modification queue is taking a long time to unready." "Check for deadlocks." ) self._ready = True logger.debug("Modification queue is now ready.") def unready(self): """Unready the queue for accepting new modifications.""" self._ready = False logger.debug("Modification queue is now unready.") @dataclass class ProgressQueue: """A queue of progress messages to be displayed to the user.""" queue: list = field(default_factory=list) def add(self, message: str): self.queue.append(message) def clear(self) -> list: """Clear the progress queue, and return the old queue.""" current_queue = self.queue self.queue = [] return current_queue @dataclass class GlobalState: api_info: Optional[APIInfo] = None frontend_info: Optional[FrontendInfo] = None identifiables: Identifiables = field(default_factory=Identifiables) secrets: Secrets = field(default_factory=Secrets) llm: LanguageModel = field(default_factory=LanguageModel) modification_queue: ModificationQueue = field(default_factory=ModificationQueue) progress_queue: ProgressQueue = field(default_factory=ProgressQueue) global state state = GlobalState() def add_secret(api: str, api_key: str): """Add an API key to the global state.""" state.secrets.add(api, api_key) def run_on_startup(): """Run on startup.""" frontend_url = os.environ.get("MEERKAT_FRONTEND_URL", None) if frontend_url: state.frontend_info = FrontendInfo(None, None, _url=frontend_url) api_url = os.environ.get("MEERKAT_API_URL", None) if api_url: state.api_info = APIInfo(None, None, _url=api_url) run_on_startup()
    null
    396
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkidaas_doraemon.endpoint import endpoint_data class VerifyUserAuthenticationRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'idaas-doraemon', '2021-05-20', 'VerifyUserAuthentication') self.set_protocol_type('https') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_LogParams(self): # String return self.get_query_params().get('LogParams') def set_LogParams(self, LogParams): # String self.add_query_param('LogParams', LogParams) def METHOD_NAME(self): # String return self.get_query_params().get('ClientExtendParamsJson') def set_ClientExtendParamsJson(self, ClientExtendParamsJson): # String self.add_query_param('ClientExtendParamsJson', ClientExtendParamsJson) def get_UserId(self): # String return self.get_query_params().get('UserId') def set_UserId(self, UserId): # String self.add_query_param('UserId', UserId) def get_LogTag(self): # String return self.get_query_params().get('LogTag') def set_LogTag(self, LogTag): # String self.add_query_param('LogTag', LogTag) def get_ServerExtendParamsJson(self): # String return self.get_query_params().get('ServerExtendParamsJson') def set_ServerExtendParamsJson(self, ServerExtendParamsJson): # String self.add_query_param('ServerExtendParamsJson', ServerExtendParamsJson) def get_RequireBindHashBase64(self): # String return self.get_query_params().get('RequireBindHashBase64') def set_RequireBindHashBase64(self, RequireBindHashBase64): # String self.add_query_param('RequireBindHashBase64', RequireBindHashBase64) def get_AuthenticationContext(self): # String return self.get_query_params().get('AuthenticationContext') def set_AuthenticationContext(self, AuthenticationContext): # String self.add_query_param('AuthenticationContext', AuthenticationContext) def get_RequireChallengeBase64(self): # String return self.get_query_params().get('RequireChallengeBase64') def set_RequireChallengeBase64(self, RequireChallengeBase64): # String self.add_query_param('RequireChallengeBase64', RequireChallengeBase64) def get_AuthenticatorType(self): # String return self.get_query_params().get('AuthenticatorType') def set_AuthenticatorType(self, AuthenticatorType): # String self.add_query_param('AuthenticatorType', AuthenticatorType) def get_ClientExtendParamsJsonSign(self): # String return self.get_query_params().get('ClientExtendParamsJsonSign') def set_ClientExtendParamsJsonSign(self, ClientExtendParamsJsonSign): # String self.add_query_param('ClientExtendParamsJsonSign', ClientExtendParamsJsonSign) def get_UserSourceIp(self): # String return self.get_query_params().get('UserSourceIp') def set_UserSourceIp(self, UserSourceIp): # String self.add_query_param('UserSourceIp', UserSourceIp) def get_ApplicationExternalId(self): # String return self.get_query_params().get('ApplicationExternalId') def set_ApplicationExternalId(self, ApplicationExternalId): # String self.add_query_param('ApplicationExternalId', ApplicationExternalId)
    null
    397
    # This script compares an on-going release notes file with published release notes files. # If the ongoing release notes file has a duplicate note with the published one, the script reports the note and replaces it with the published one. import re, os from tempfile import mkstemp from shutil import move from os import remove # 获取已发布的 release notes Issue 号和 PR 号 def METHOD_NAME(ext_path,main_path): exst_notes = [] exst_issue_nums = [] exst_note_levels = [] for maindir, subdir, files in os.walk(ext_path): for afile in files: file_path = (os.path.join(maindir, afile)) if file_path.endswith('.md') and not os.path.samefile(file_path,main_path): with open(file_path,'r', encoding='utf-8') as fp: level1 = level2 = level3 = "" for line in fp: exst_issue_num = re.search(r'https://github.com/(pingcap|tikv)/\w+/(issues|pull)/\d+', line) if exst_issue_num: if exst_issue_num.group() not in exst_issue_nums: note_level = level1 + level2 + level3 note_pair = [exst_issue_num.group(),line,afile, note_level] exst_issue_nums.append(exst_issue_num.group()) exst_notes.append(note_pair) else: continue elif line.startswith("##"): level1 = "> " + line.replace("##","").strip() level2 = level3 = "" elif line.startswith ("+") or line.startswith ("-"): level2 = "> " + line.replace("+","").replace("-","").strip() level3 = "" elif line.startswith (" +") or line.startswith (" -"): level3 = "> " + line.replace(" +","").replace(" -","").strip() else: continue else: pass if len(exst_issue_nums) != 0: return exst_notes else: return 0 # 检查当前准备中的 release notes 的 Issue 号和 PR 号是否有重复,如果有就进行替换 def check_exst_rn(note_pairs, main_path): DupNum = 0 NoteNum = 0 target_file_path = mkstemp()[1] source_file_path = main_path with open(target_file_path, 'w', encoding='utf-8') as target_file: with open(source_file_path, 'r', encoding='utf-8') as source_file: LineNum = 0 for line in source_file: LineNum += 1 issue_num = re.search('https://github.com/(pingcap|tikv)/\w+/(issues|pull)/\d+', line) if issue_num: NoteNum +=1 for note_pair in note_pairs: if issue_num.group() == note_pair[0] and not line.strip().startswith("(dup"): print('A duplicated note is found in line ' + str(LineNum) + " from " + note_pair[2] + note_pair[1]) match = re.fullmatch(r'(\s*)(?:- .+?)( @.+?)?\s*', line) if match: line = '{}(dup: {} {}){}{}\n'.format(match.group(1), note_pair[2], note_pair[3], note_pair[1].strip(), match.group(2) or "") print('The duplicated note is replaced with ' + line) DupNum += 1 else: continue break target_file.write(line) remove(source_file_path) move(target_file_path, source_file_path) DupRate = "%.0f%%" % (DupNum/NoteNum*100) #计算 release notes 重复率 print (str(DupNum) + " duplicated notes are found in " + str(NoteNum) + " notes. The duplicated rate is " + str(DupRate) + ".") if __name__ == "__main__": ext_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases' # 已发布的 release notes 文件夹 main_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases/release-5.3.1.md' # 当前正在准备的release notes 文档路径 note_pairs = METHOD_NAME(ext_path,main_path) check_exst_rn(note_pairs, main_path)
    null
    398
    # -*- coding: utf-8 -*- import re import hmac import hashlib import logging from django.apps import apps from nameparser import HumanName from werkzeug.utils import cached_property from framework.flask import request from website import settings from website.conferences.exceptions import ConferenceError logger = logging.getLogger(__name__) SSCORE_MAX_VALUE = 5 DKIM_PASS_VALUES = ['Pass'] SPF_PASS_VALUES = ['Pass', 'Neutral'] ANGLE_BRACKETS_REGEX = re.compile(r'<(.*?)>') BASE_REGEX = r""" (?P<test>(test|stage)(\d*)-)? (?P<meeting>\w*?) - (?P<category>{allowed_types}) @osf\.io """ class ConferenceMessage(object): def __init__(self): self.request = request._get_current_object() def verify(self): self.verify_signature() _ = [self.sender_email, self.route] # noqa def verify_signature(self): """Verify that request comes from Mailgun. Based on sample code from http://documentation.mailgun.com/user_manual.html#webhooks """ signature = hmac.new( key=settings.MAILGUN_API_KEY.encode(), msg='{}{}'.format( self.form['timestamp'], self.form['token'], ).encode(), digestmod=hashlib.sha256, ).hexdigest() if signature != self.form['signature']: raise ConferenceError('Invalid headers on incoming mail') @cached_property def is_spam(self): """Check SSCORE, DKIM, and SPF headers for spam. See http://documentation.mailgun.com/user_manual.html#spam-filter for details. :return: At least one header indicates spam """ try: # Mailgun only inserts score headers for messages checked for spam. sscore_header = float(self.form.get('X-Mailgun-Sscore', 0)) except (TypeError, ValueError): return True dkim_header = self.form.get('X-Mailgun-Dkim-Check-Result') spf_header = self.form.get('X-Mailgun-Spf') return ( (sscore_header and sscore_header > SSCORE_MAX_VALUE) or (dkim_header and dkim_header not in DKIM_PASS_VALUES) or (spf_header and spf_header not in SPF_PASS_VALUES) ) @cached_property def form(self): return self.request.form @cached_property def raw(self): return { 'headers': dict(self.request.headers), 'form': self.request.form.to_dict(), 'args': self.request.args.to_dict(), } @cached_property def subject(self): subject = self.form['subject'] subject = re.sub(r'^re:', '', subject, flags=re.I) subject = re.sub(r'^fwd:', '', subject, flags=re.I) return subject.strip() @cached_property def METHOD_NAME(self): return self.form['recipient'] @cached_property def text(self): # Not included if there is no message body # https://documentation.mailgun.com/user_manual.html#routes return self.form.get('stripped-text', '') @cached_property def sender(self): return self.form['from'] @cached_property def sender_name(self): if '<' in self.sender: # sender format: "some name" <[email protected]> name = ANGLE_BRACKETS_REGEX.sub('', self.sender) name = name.strip().replace('"', '') else: # sender format: [email protected] name = self.sender return str(HumanName(name)) @cached_property def sender_email(self): match = ANGLE_BRACKETS_REGEX.search(self.sender) if match: # sender format: "some name" <[email protected]> return match.groups()[0].lower().strip() elif '@' in self.sender: # sender format: [email protected] return self.sender.lower().strip() raise ConferenceError('Could not extract sender email') @cached_property def sender_display(self): return self.sender_name or self.sender_email.split('@')[0] @cached_property def route(self): match = re.search(re.compile(BASE_REGEX.format(allowed_types=(self.allowed_types or 'poster|talk')), re.IGNORECASE | re.VERBOSE), self.form['recipient']) if not match: raise ConferenceError('Invalid recipient: '.format(self.form['recipient'])) data = match.groupdict() if bool(settings.DEV_MODE) != bool(data['test']): # NOTE: test.osf.io has DEV_MODE = False if not data['test'] or (data['test'] and data['test'].rstrip('-') != 'test'): raise ConferenceError( 'Mismatch between `DEV_MODE` and recipient {0}'.format( self.form['recipient'] ) ) return data @cached_property def conference_name(self): return self.route['meeting'] @cached_property def conference_category(self): return self.route['category'] @cached_property def attachments(self): count = self.form.get('attachment-count', 0) try: count = int(count) except (TypeError, ValueError): count = 0 return list(filter( lambda value: value is not None, list(map( lambda idx: self.request.files.get('attachment-{0}'.format(idx + 1)), list(range(count)), )), )) @property def allowed_types(self): Conference = apps.get_model('osf.Conference') allowed_types = [] for field_names in Conference.objects.values_list('field_names', flat=True): allowed_types.extend([field_names['submission1'], field_names['submission2']]) regex_types_allowed = '|'.join(set(allowed_types)) return regex_types_allowed
    null
    399
    """Unit Test for otx.algorithms.action.adapters.mmaction.utils.config_utils.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from typing import Any import torch from mmaction.models import Recognizer3D from mmcv.runner import BaseModule from mmcv.utils import Config from torch import nn from otx.algorithms.action.adapters.mmaction.models.detectors.fast_rcnn import ( AVAFastRCNN, ) from otx.algorithms.action.adapters.mmaction.utils.export_utils import ( Exporter, _convert_sync_batch_to_normal_batch, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit class MockRecognizer3D(Recognizer3D, BaseModule): """Mock class for Recognizer3D.""" def __init__(self) -> None: super(BaseModule, self).__init__() def forward(self, inputs: Any) -> str: return "Forward function is replaced!" def load_state_dict(self, weights) -> Recognizer3D: pass class MockAVAFastRCNN(AVAFastRCNN): """Mock class for AVAFastRCNN.""" def __init__(self) -> None: super(BaseModule, self).__init__() self.deploy_cfg = None def METHOD_NAME(self) -> None: pass def forward_infer(self, inputs: Any, img_metas: Any) -> str: return "Forward function is replaced!" def load_state_dict(self, weights) -> AVAFastRCNN: pass def _mock_sync_batchnorm(inputs): """Mock function for _sync_batch_to_normal_batch function. It returns its inputs """ return inputs @e2e_pytest_unit def test_convert_sync_batch_to_normal_batch() -> None: """Test _convert_sync_batch_to_normal_batch function. <Steps> 1. Create sample module, which has some Conv3D, SyncBatchNorm, BatchNorm3d ops 2. Run _convert_sync_batch_to_normal_batch function to sample module 3. Check SyncBatchNorm is changed into BatchNorm3d 4. Check the other ops don't affect by this function """ sample_module = nn.Sequential( nn.Conv3d(100, 100, 3), nn.SyncBatchNorm(100), nn.Conv3d(100, 100, 3), nn.BatchNorm3d(100) ) output_module = _convert_sync_batch_to_normal_batch(sample_module) assert isinstance(output_module[0], nn.Conv3d) assert isinstance(output_module[1], nn.BatchNorm3d) assert isinstance(output_module[2], nn.Conv3d) assert isinstance(output_module[3], nn.BatchNorm3d) class MockTaskProcessor: """Mock class of task_processor.""" def __init__(self, model_cfg, deploy_cfg, device): self.model_cfg = model_cfg def init_pytorch_model(self, weights): if self.model_cfg.model == "cls": return MockRecognizer3D() return MockAVAFastRCNN() def mock_build_task_processor(model_cfg, deploy_cfg, device): return MockTaskProcessor(model_cfg, deploy_cfg, device) class TestExporter: """Test class for Exporter.""" @e2e_pytest_unit def test_init(self, mocker) -> None: """Test __init__ function. <Steps> 1. Create mock task_processor 2. Create mock Recognizer3D using task_processor 3. Get inputs 4. Create mock AVAFastRCNN using task_processor 5. Get inputs 6. Check mo options when half precision """ mocker.patch( "otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor", side_effect=mock_build_task_processor, ) recipe_cfg = Config(dict(model="cls")) deploy_cfg = Config( dict( backend_config=dict( type="openvino", mo_options={}, model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))], ) ) ) exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False) assert isinstance(exporter.model, Recognizer3D) assert exporter.input_tensor.shape == torch.Size([1, 1, 3, 32, 224, 224]) assert exporter.input_metas is None recipe_cfg = Config(dict(model="det")) deploy_cfg = Config( dict( backend_config=dict( type="openvino", mo_options={}, model_inputs=[dict(opt_shapes=dict(input=[1, 3, 32, 224, 224]))], ) ) ) exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False) assert isinstance(exporter.model, AVAFastRCNN) assert exporter.input_tensor.shape == torch.Size([1, 3, 32, 224, 224]) assert exporter.input_metas is not None exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", True, False) assert exporter.deploy_cfg.backend_config.mo_options["flags"] == ["--compress_to_fp16"] @e2e_pytest_unit def test_export(self, mocker) -> None: """Test export function.""" mocker.patch("otx.algorithms.action.adapters.mmaction.utils.export_utils.export", return_value=True) mocker.patch("otx.algorithms.action.adapters.mmaction.utils.export_utils.from_onnx", return_value=True) mocker.patch( "otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor", side_effect=mock_build_task_processor, ) recipe_cfg = Config(dict(model="cls")) deploy_cfg = Config( dict( backend_config=dict( type="openvino", mo_options={}, model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))], ), ir_config=dict(input_names=["input"], output_names=["output"]), ) ) exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False) exporter.export()
    null