{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\"\"\"\n\nSAMPLE_XML_NS = \"\"\"\n\n text\n \n
\n subtext\n
\n\n\"\"\"\n\ndef sanity():\n \"\"\"\n Import sanity.\n\n >>> from xml.etree import cElementTree\n \"\"\"\n\ndef check_method(method):\n if not hasattr(method, '__call__'):\n print method, \"not callable\"\n\ndef serialize(ET, elem, encoding=None):\n import StringIO\n file = StringIO.StringIO()\n tree = ET.ElementTree(elem)\n if encoding:\n tree.write(file, encoding)\n else:\n tree.write(file)\n return file.getvalue()\n\ndef summarize(elem):\n return elem.tag\n\ndef summarize_list(seq):\n return map(summarize, seq)\n\ndef interface():\n \"\"\"\n Test element tree interface.\n\n >>> element = ET.Element(\"tag\", key=\"value\")\n >>> tree = ET.ElementTree(element)\n\n Make sure all standard element methods exist.\n\n >>> check_method(element.append)\n >>> check_method(element.insert)\n >>> check_method(element.remove)\n >>> check_method(element.getchildren)\n >>> check_method(element.find)\n >>> check_method(element.findall)\n >>> check_method(element.findtext)\n >>> check_method(element.clear)\n >>> check_method(element.get)\n >>> check_method(element.set)\n >>> check_method(element.keys)\n >>> check_method(element.items)\n >>> check_method(element.getiterator)\n\n Basic method sanity checks.\n\n >>> serialize(ET, element) # 1\n ''\n >>> subelement = ET.Element(\"subtag\")\n >>> element.append(subelement)\n >>> serialize(ET, element) # 2\n ''\n >>> element.insert(0, subelement)\n >>> serialize(ET, element) # 3\n ''\n >>> element.remove(subelement)\n >>> serialize(ET, element) # 4\n ''\n >>> element.remove(subelement)\n >>> serialize(ET, element) # 5\n ''\n >>> element.remove(subelement)\n Traceback (most recent call last):\n ValueError: list.remove(x): x not in list\n >>> serialize(ET, element) # 6\n ''\n \"\"\"\n\ndef METHOD_NAME():\n \"\"\"\n Test find methods (including xpath syntax).\n\n >>> elem = ET.XML(SAMPLE_XML)\n >>> elem.find(\"tag\").tag\n 'tag'\n >>> ET.ElementTree(elem).find(\"tag\").tag\n 'tag'\n >>> elem.find(\"section/tag\").tag\n 'tag'\n >>> ET.ElementTree(elem).find(\"section/tag\").tag\n 'tag'\n >>> elem.findtext(\"tag\")\n 'text'\n >>> elem.findtext(\"tog\")\n >>> elem.findtext(\"tog\", \"default\")\n 'default'\n >>> ET.ElementTree(elem).findtext(\"tag\")\n 'text'\n >>> elem.findtext(\"section/tag\")\n 'subtext'\n >>> ET.ElementTree(elem).findtext(\"section/tag\")\n 'subtext'\n >>> summarize_list(elem.findall(\"tag\"))\n ['tag', 'tag']\n >>> summarize_list(elem.findall(\"*\"))\n ['tag', 'tag', 'section']\n >>> summarize_list(elem.findall(\".//tag\"))\n ['tag', 'tag', 'tag']\n >>> summarize_list(elem.findall(\"section/tag\"))\n ['tag']\n >>> summarize_list(elem.findall(\"section//tag\"))\n ['tag']\n >>> summarize_list(elem.findall(\"section/*\"))\n ['tag']\n >>> summarize_list(elem.findall(\"section//*\"))\n ['tag']\n >>> summarize_list(elem.findall(\"section/.//*\"))\n ['tag']\n >>> summarize_list(elem.findall(\"*/*\"))\n ['tag']\n >>> summarize_list(elem.findall(\"*//*\"))\n ['tag']\n >>> summarize_list(elem.findall(\"*/tag\"))\n ['tag']\n >>> summarize_list(elem.findall(\"*/./tag\"))\n ['tag']\n >>> summarize_list(elem.findall(\"./tag\"))\n ['tag', 'tag']\n >>> summarize_list(elem.findall(\".//tag\"))\n ['tag', 'tag', 'tag']\n >>> summarize_list(elem.findall(\"././tag\"))\n ['tag', 'tag']\n >>> summarize_list(ET.ElementTree(elem).findall(\"/tag\"))\n ['tag', 'tag']\n >>> summarize_list(ET.ElementTree(elem).findall(\"./tag\"))\n ['tag', 'tag']\n >>> elem = ET.XML(SAMPLE_XML_NS)\n >>> summarize_list(elem.findall(\"tag\"))\n []\n >>> summarize_list(elem.findall(\"{http://effbot.org/ns}tag\"))\n ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']\n >>> summarize_list(elem.findall(\".//{http://effbot.org/ns}tag\"))\n ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']\n \"\"\"\n\ndef parseliteral():\n r\"\"\"\n\n >>> element = ET.XML(\"text\")\n >>> ET.ElementTree(element).write(sys.stdout)\n text\n >>> element = ET.fromstring(\"text\")\n >>> ET.ElementTree(element).write(sys.stdout)\n text\n >>> print ET.tostring(element)\n text\n >>> print ET.tostring(element, \"ascii\")\n \n text\n >>> _, ids = ET.XMLID(\"text\")\n >>> len(ids)\n 0\n >>> _, ids = ET.XMLID(\"text\")\n >>> len(ids)\n 1\n >>> ids[\"body\"].tag\n 'body'\n \"\"\"\n\ndef check_encoding(encoding):\n \"\"\"\n >>> check_encoding(\"ascii\")\n >>> check_encoding(\"us-ascii\")\n >>> check_encoding(\"iso-8859-1\")\n >>> check_encoding(\"iso-8859-15\")\n >>> check_encoding(\"cp437\")\n >>> #check_encoding(\"mac-roman\")\n \"\"\"\n ET.XML(\n \"\" % encoding\n )\n\ndef bug_1534630():\n \"\"\"\n >>> bob = ET.TreeBuilder()\n >>> e = bob.data(\"data\")\n >>> e = bob.start(\"tag\", {})\n >>> e = bob.end(\"tag\")\n >>> e = bob.close()\n >>> serialize(ET, e)\n ''\n \"\"\"\n\ndef test_main():\n from test import test_xml_etree_c\n test_support.run_doctest(test_xml_etree_c, verbosity=True)\n\nif __name__ == '__main__':\n test_main()"}}},{"rowIdx":2001,"cells":{"id":{"kind":"number","value":2001,"string":"2,001"},"label":{"kind":"string","value":"visualise result"},"text":{"kind":"string","value":"# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ\n# SPDX-FileCopyrightText: 2021 Janek Groehl\n# SPDX-License-Identifier: MIT\n\nfrom simpa import Tags\nimport simpa as sp\nimport numpy as np\nfrom skimage.data import shepp_logan_phantom\nfrom scipy.ndimage import zoom\nfrom simpa_tests.manual_tests import ManualIntegrationTestClass\n\n# FIXME temporary workaround for newest Intel architectures\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\n\nclass SegmentationLoaderTest(ManualIntegrationTestClass):\n\n def setup(self):\n self.path_manager = sp.PathManager()\n target_spacing = 1.0\n label_mask = shepp_logan_phantom()\n label_mask = np.digitize(label_mask, bins=np.linspace(0.0, 1.0, 11), right=True)\n label_mask = np.reshape(label_mask, (400, 1, 400))\n input_spacing = 0.2\n segmentation_volume_tiled = np.tile(label_mask, (1, 128, 1))\n segmentation_volume_mask = np.round(zoom(segmentation_volume_tiled, input_spacing/target_spacing,\n order=0)).astype(int)\n\n def segmentation_class_mapping():\n ret_dict = dict()\n ret_dict[0] = sp.TISSUE_LIBRARY.heavy_water()\n ret_dict[1] = sp.TISSUE_LIBRARY.blood()\n ret_dict[2] = sp.TISSUE_LIBRARY.epidermis()\n ret_dict[3] = sp.TISSUE_LIBRARY.muscle()\n ret_dict[4] = sp.TISSUE_LIBRARY.mediprene()\n ret_dict[5] = sp.TISSUE_LIBRARY.ultrasound_gel()\n ret_dict[6] = sp.TISSUE_LIBRARY.heavy_water()\n ret_dict[7] = (sp.MolecularCompositionGenerator()\n .append(sp.MOLECULE_LIBRARY.oxyhemoglobin(0.01))\n .append(sp.MOLECULE_LIBRARY.deoxyhemoglobin(0.01))\n .append(sp.MOLECULE_LIBRARY.water(0.98))\n .get_molecular_composition(sp.SegmentationClasses.COUPLING_ARTIFACT))\n ret_dict[8] = sp.TISSUE_LIBRARY.heavy_water()\n ret_dict[9] = sp.TISSUE_LIBRARY.heavy_water()\n ret_dict[10] = sp.TISSUE_LIBRARY.heavy_water()\n ret_dict[11] = sp.TISSUE_LIBRARY.heavy_water()\n return ret_dict\n\n self.settings = sp.Settings()\n self.settings[Tags.SIMULATION_PATH] = self.path_manager.get_hdf5_file_save_path()\n self.settings[Tags.VOLUME_NAME] = \"SegmentationTest\"\n self.settings[Tags.RANDOM_SEED] = 1234\n self.settings[Tags.WAVELENGTHS] = [700]\n self.settings[Tags.SPACING_MM] = target_spacing\n self.settings[Tags.DIM_VOLUME_X_MM] = 400 / (target_spacing / input_spacing)\n self.settings[Tags.DIM_VOLUME_Y_MM] = 128 / (target_spacing / input_spacing)\n self.settings[Tags.DIM_VOLUME_Z_MM] = 400 / (target_spacing / input_spacing)\n # self.settings[Tags.IGNORE_QA_ASSERTIONS] = True\n\n self.settings.set_volume_creation_settings({\n Tags.INPUT_SEGMENTATION_VOLUME: segmentation_volume_mask,\n Tags.SEGMENTATION_CLASS_MAPPING: segmentation_class_mapping(),\n\n })\n\n self.settings.set_optical_settings({\n Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7,\n Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(),\n Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_MSOT_ACUITY_ECHO,\n Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50,\n })\n\n self.pipeline = [\n sp.SegmentationBasedVolumeCreationAdapter(self.settings),\n sp.MCXAdapter(self.settings)\n ]\n\n def perform_test(self):\n sp.simulate(self.pipeline, self.settings, sp.RSOMExplorerP50(element_spacing_mm=2.0,\n number_elements_y=10,\n number_elements_x=20,\n device_position_mm=np.asarray([20, 10, 0])))\n\n def tear_down(self):\n os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH])\n\n def METHOD_NAME(self, show_figure_on_screen=True, save_path=None):\n\n if show_figure_on_screen:\n save_path = None\n else:\n save_path = save_path + \"SegmentationLoaderExample.png\"\n\n sp.visualise_data(path_to_hdf5_file=self.path_manager.get_hdf5_file_save_path() + \"/\" + \"SegmentationTest\" + \".hdf5\",\n wavelength=700,\n show_initial_pressure=True,\n show_segmentation_map=True,\n show_absorption=True,\n show_fluence=True,\n show_tissue_density=True,\n show_speed_of_sound=True,\n show_anisotropy=True,\n show_scattering=True,\n save_path=save_path,\n log_scale=False)\n\n\nif __name__ == \"__main__\":\n test = SegmentationLoaderTest()\n test.run_test(show_figure_on_screen=False)"}}},{"rowIdx":2002,"cells":{"id":{"kind":"number","value":2002,"string":"2,002"},"label":{"kind":"string","value":"set up"},"text":{"kind":"string","value":"# Copyright 2022 The KerasCV Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport numpy as np\nimport pytest\nfrom absl.testing import parameterized\n\nfrom keras_cv.backend import keras\nfrom keras_cv.backend import ops\nfrom keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (\n ResNet50V2Backbone,\n)\nfrom keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (\n ResNetV2Backbone,\n)\nfrom keras_cv.tests.test_case import TestCase\nfrom keras_cv.utils.train import get_feature_extractor\n\n\nclass ResNetV2BackboneTest(TestCase):\n def METHOD_NAME(self):\n self.input_batch = np.ones(shape=(8, 224, 224, 3))\n\n def test_valid_call(self):\n model = ResNetV2Backbone(\n stackwise_filters=[64, 128, 256, 512],\n stackwise_blocks=[2, 2, 2, 2],\n stackwise_strides=[1, 2, 2, 2],\n include_rescaling=False,\n )\n model(self.input_batch)\n\n def test_valid_call_applications_model(self):\n model = ResNet50V2Backbone()\n model(self.input_batch)\n\n def test_valid_call_with_rescaling(self):\n model = ResNetV2Backbone(\n stackwise_filters=[64, 128, 256, 512],\n stackwise_blocks=[2, 2, 2, 2],\n stackwise_strides=[1, 2, 2, 2],\n include_rescaling=True,\n )\n model(self.input_batch)\n\n @pytest.mark.large # Saving is slow, so mark these large.\n def test_saved_model(self):\n model = ResNetV2Backbone(\n stackwise_filters=[64, 128, 256, 512],\n stackwise_blocks=[2, 2, 2, 2],\n stackwise_strides=[1, 2, 2, 2],\n include_rescaling=False,\n )\n model_output = model(self.input_batch)\n save_path = os.path.join(\n self.get_temp_dir(), \"resnet_v2_backbone.keras\"\n )\n model.save(save_path)\n restored_model = keras.models.load_model(save_path)\n\n # Check we got the real object back.\n self.assertIsInstance(restored_model, ResNetV2Backbone)\n\n # Check that output matches.\n restored_output = restored_model(self.input_batch)\n self.assertAllClose(\n ops.convert_to_numpy(model_output),\n ops.convert_to_numpy(restored_output),\n )\n\n @pytest.mark.large # Saving is slow, so mark these large.\n def test_saved_alias_model(self):\n model = ResNet50V2Backbone()\n model_output = model(self.input_batch)\n save_path = os.path.join(\n self.get_temp_dir(), \"resnet_v2_backbone.keras\"\n )\n model.save(save_path)\n restored_model = keras.models.load_model(save_path)\n\n # Check we got the real object back.\n # Note that these aliases serialized as the base class\n self.assertIsInstance(restored_model, ResNetV2Backbone)\n\n # Check that output matches.\n restored_output = restored_model(self.input_batch)\n self.assertAllClose(\n ops.convert_to_numpy(model_output),\n ops.convert_to_numpy(restored_output),\n )\n\n def test_feature_pyramid_inputs(self):\n model = ResNet50V2Backbone()\n backbone_model = get_feature_extractor(\n model,\n model.pyramid_level_inputs.values(),\n model.pyramid_level_inputs.keys(),\n )\n input_size = 256\n inputs = keras.Input(shape=[input_size, input_size, 3])\n outputs = backbone_model(inputs)\n levels = [\"P2\", \"P3\", \"P4\", \"P5\"]\n self.assertEquals(list(outputs.keys()), levels)\n self.assertEquals(\n outputs[\"P2\"].shape,\n (None, input_size // 2**2, input_size // 2**2, 256),\n )\n self.assertEquals(\n outputs[\"P3\"].shape,\n (None, input_size // 2**3, input_size // 2**3, 512),\n )\n self.assertEquals(\n outputs[\"P4\"].shape,\n (None, input_size // 2**4, input_size // 2**4, 1024),\n )\n self.assertEquals(\n outputs[\"P5\"].shape,\n (None, input_size // 2**5, input_size // 2**5, 2048),\n )\n\n @parameterized.named_parameters(\n (\"one_channel\", 1),\n (\"four_channels\", 4),\n )\n def test_application_variable_input_channels(self, num_channels):\n # ResNet50 model\n model = ResNetV2Backbone(\n stackwise_filters=[64, 128, 256, 512],\n stackwise_blocks=[3, 4, 6, 3],\n stackwise_strides=[1, 2, 2, 2],\n input_shape=(None, None, num_channels),\n include_rescaling=False,\n )\n self.assertEqual(model.output_shape, (None, None, None, 2048))"}}},{"rowIdx":2003,"cells":{"id":{"kind":"number","value":2003,"string":"2,003"},"label":{"kind":"string","value":"async neo4j driver"},"text":{"kind":"string","value":"# Copyright (c) \"Neo4j\"\n# Neo4j Sweden AB [https://neo4j.com]\n#\n# This file is part of Neo4j.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport asyncio\nimport sys\nfrom functools import wraps\n\nimport pytest\nimport pytest_asyncio\n\nfrom neo4j import (\n AsyncGraphDatabase,\n GraphDatabase,\n)\nfrom neo4j.debug import watch\n\nfrom . import env\n\n\n# from neo4j.debug import watch\n#\n# watch(\"neo4j\")\n\n\n@pytest.fixture(scope=\"session\")\ndef uri():\n return env.NEO4J_SERVER_URI\n\n\n@pytest.fixture(scope=\"session\")\ndef bolt_uri(uri):\n if env.NEO4J_SCHEME != \"bolt\":\n pytest.skip(\"Test requires bolt scheme\")\n return uri\n\n\n@pytest.fixture(scope=\"session\")\ndef _forced_bolt_uri():\n return f\"bolt://{env.NEO4J_HOST}:{env.NEO4J_PORT}\"\n\n\n@pytest.fixture(scope=\"session\")\ndef neo4j_uri():\n if env.NEO4J_SCHEME != \"neo4j\":\n pytest.skip(\"Test requires neo4j scheme\")\n return uri\n\n\n@pytest.fixture(scope=\"session\")\ndef _forced_neo4j_uri():\n return f\"neo4j://{env.NEO4J_HOST}:{env.NEO4J_PORT}\"\n\n\n@pytest.fixture(scope=\"session\")\ndef auth():\n return env.NEO4J_USER, env.NEO4J_PASS\n\n\n@pytest.fixture\ndef driver(uri, auth):\n with GraphDatabase.driver(uri, auth=auth) as driver:\n yield driver\n\n\n@pytest.fixture\ndef bolt_driver(bolt_uri, auth):\n with GraphDatabase.driver(bolt_uri, auth=auth) as driver:\n yield driver\n\n\n@pytest.fixture\ndef neo4j_driver(neo4j_uri, auth):\n with GraphDatabase.driver(neo4j_uri, auth=auth) as driver:\n yield driver\n\n\n@wraps(AsyncGraphDatabase.driver)\ndef get_async_driver(*args, **kwargs):\n return AsyncGraphDatabase.driver(*args, **kwargs)\n\n\n@pytest_asyncio.fixture\nasync def async_driver(uri, auth):\n async with get_async_driver(uri, auth=auth) as driver:\n yield driver\n\n\n@pytest_asyncio.fixture\nasync def async_bolt_driver(bolt_uri, auth):\n async with get_async_driver(bolt_uri, auth=auth) as driver:\n yield driver\n\n\n@pytest_asyncio.fixture\nasync def METHOD_NAME(neo4j_uri, auth):\n async with get_async_driver(neo4j_uri, auth=auth) as driver:\n yield driver\n\n\n@pytest.fixture\ndef _forced_bolt_driver(_forced_bolt_uri):\n with GraphDatabase.driver(_forced_bolt_uri, auth=auth) as driver:\n yield driver\n\n\n@pytest.fixture\ndef _forced_neo4j_driver(_forced_neo4j_uri):\n with GraphDatabase.driver(_forced_neo4j_uri, auth=auth) as driver:\n yield driver\n\n\n@pytest.fixture(scope=\"session\")\ndef server_info(_forced_bolt_driver):\n return _forced_bolt_driver.get_server_info()\n\n\n@pytest.fixture(scope=\"session\")\ndef bolt_protocol_version(server_info):\n return server_info.protocol_version\n\n\ndef mark_requires_min_bolt_version(version=\"3.5\"):\n return pytest.mark.skipif(\n env.NEO4J_VERSION < version,\n reason=f\"requires server version '{version}' or higher, \"\n f\"found '{env.NEO4J_VERSION}'\"\n )\n\n\ndef mark_requires_edition(edition):\n return pytest.mark.skipif(\n env.NEO4J_EDITION != edition,\n reason=f\"requires server edition '{edition}', \"\n f\"found '{env.NEO4J_EDITION}'\"\n )\n\n\n@pytest.fixture\ndef session(driver):\n with driver.session() as session:\n yield session\n\n\n@pytest.fixture\ndef bolt_session(bolt_driver):\n with bolt_driver.session() as session:\n yield session\n\n\n@pytest.fixture\ndef neo4j_session(neo4j_driver):\n with neo4j_driver.session() as session:\n yield session\n\n\n# async support for pytest-benchmark\n# https://github.com/ionelmc/pytest-benchmark/issues/66\n@pytest_asyncio.fixture\nasync def aio_benchmark(benchmark, event_loop):\n def _wrapper(func, *args, **kwargs):\n if asyncio.iscoroutinefunction(func):\n @benchmark\n def _():\n return event_loop.run_until_complete(func(*args, **kwargs))\n else:\n benchmark(func, *args, **kwargs)\n\n return _wrapper\n\n\n@pytest.fixture\ndef watcher():\n with watch(\"neo4j\", out=sys.stdout, colour=True):\n yield"}}},{"rowIdx":2004,"cells":{"id":{"kind":"number","value":2004,"string":"2,004"},"label":{"kind":"string","value":"recharge connection config"},"text":{"kind":"string","value":"import uuid\nfrom typing import Any, Dict, Generator\n\nimport pydash\nimport pytest\nimport requests\nfrom faker import Faker\nfrom requests import Response\nfrom sqlalchemy.orm import Session\n\nfrom fides.api.db import session\nfrom fides.api.models.connectionconfig import (\n AccessLevel,\n ConnectionConfig,\n ConnectionType,\n)\nfrom fides.api.models.datasetconfig import DatasetConfig\nfrom fides.api.models.sql_models import Dataset as CtlDataset\nfrom fides.api.util.saas_util import (\n load_config_with_replacement,\n load_dataset_with_replacement,\n)\nfrom tests.ops.test_helpers.saas_test_utils import poll_for_existence\nfrom tests.ops.test_helpers.vault_client import get_secrets\n\nsecrets = get_secrets(\"recharge\")\n\n\n@pytest.fixture(scope=\"function\")\ndef recharge_secrets(saas_config):\n return {\n \"domain\": pydash.get(saas_config, \"recharge.domain\") or secrets[\"domain\"],\n \"api_key\": pydash.get(saas_config, \"recharge.api_key\") or secrets[\"api_key\"],\n }\n\n\n@pytest.fixture(scope=\"function\")\ndef recharge_identity_email(saas_config):\n return (\n pydash.get(saas_config, \"recharge.identity_email\") or secrets[\"identity_email\"]\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef recharge_erasure_identity_email():\n return f\"{uuid.uuid4().hex}@email.com\"\n\n\n@pytest.fixture\ndef recharge_config() -> Dict[str, Any]:\n return load_config_with_replacement(\n \"data/saas/config/recharge_config.yml\",\n \"\",\n \"recharge_instance\",\n )\n\n\n@pytest.fixture\ndef recharge_dataset() -> Dict[str, Any]:\n return load_dataset_with_replacement(\n \"data/saas/dataset/recharge_dataset.yml\",\n \"\",\n \"recharge_instance\",\n )[0]\n\n\n@pytest.fixture(scope=\"function\")\ndef METHOD_NAME(\n db: session, recharge_config, recharge_secrets\n) -> Generator:\n fides_key = recharge_config[\"fides_key\"]\n connection_config = ConnectionConfig.create(\n db=db,\n data={\n \"key\": fides_key,\n \"name\": fides_key,\n \"connection_type\": ConnectionType.saas,\n \"access\": AccessLevel.write,\n \"secrets\": recharge_secrets,\n \"saas_config\": recharge_config,\n },\n )\n yield connection_config\n connection_config.delete(db)\n\n\n@pytest.fixture\ndef recharge_dataset_config(\n db: Session,\n METHOD_NAME: ConnectionConfig,\n recharge_dataset: Dict[str, Any],\n) -> Generator:\n fides_key = recharge_dataset[\"fides_key\"]\n METHOD_NAME.name = fides_key\n METHOD_NAME.key = fides_key\n METHOD_NAME.save(db=db)\n\n ctl_dataset = CtlDataset.create_from_dataset_dict(db, recharge_dataset)\n\n dataset = DatasetConfig.create(\n db=db,\n data={\n \"connection_config_id\": METHOD_NAME.id,\n \"fides_key\": fides_key,\n \"ctl_dataset_id\": ctl_dataset.id,\n },\n )\n yield dataset\n dataset.delete(db=db)\n ctl_dataset.delete(db)\n\n\nclass RechargeTestClient:\n \"\"\"Helper to call various Recharge data management requests\"\"\"\n\n def __init__(self, METHOD_NAME: ConnectionConfig):\n self.recharge_secrets = METHOD_NAME.secrets\n self.headers = {\n \"X-Recharge-Access-Token\": self.recharge_secrets[\"api_key\"],\n \"Content-Type\": \"application/json\",\n }\n self.base_url = f\"https://{self.recharge_secrets['domain']}\"\n self.faker = Faker()\n self.first_name = self.faker.first_name()\n self.last_name = self.faker.last_name()\n self.street_address = self.faker.street_address()\n\n # 1: Creates, checks for existance and deletes customer\n def create_customer(self, email) -> Response:\n customer_body = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"email\": email,\n \"billing_address1\": self.street_address,\n \"billing_city\": \"New York City\",\n \"billing_province\": \"New York\",\n \"billing_country\": \"United States\",\n \"billing_first_name\": self.first_name,\n \"billing_last_name\": self.last_name,\n \"billing_zip\": \"10001\",\n }\n\n customer_response: Response = requests.post(\n url=f\"{self.base_url}/customers\",\n json=customer_body,\n headers=self.headers,\n )\n assert customer_response.ok\n\n return customer_response\n\n def get_customer(self, email):\n customer_response: Response = requests.get(\n url=f\"{self.base_url}/customers\",\n params={\"email\": email},\n headers=self.headers,\n )\n assert customer_response.ok\n return customer_response.json()\n\n def delete_customer(self, customer_id):\n customer_response: Response = requests.delete(\n url=f\"{self.base_url}/customers/{customer_id}\", headers=self.headers\n )\n assert customer_response.ok\n\n # 2: Creates, checks for existance and deletes address\n def create_address(self, customer_id) -> Response:\n address_body = {\n \"customer_id\": customer_id,\n \"address1\": self.street_address,\n \"address2\": self.street_address,\n \"city\": \"Los Angeles\",\n \"company\": \"Recharge\",\n \"country_code\": \"US\",\n \"country\": \"United States\",\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"order_attributes\": [{\"name\": \"custom name\", \"value\": \"custom value\"}],\n \"phone\": \"5551234567\",\n \"province\": \"California\",\n \"zip\": \"90001\",\n }\n address_response = requests.post(\n url=f\"{self.base_url}/addresses\",\n headers=self.headers,\n json=address_body,\n )\n assert address_response.ok\n return address_response\n\n def get_addresses(self, customer_id):\n address_response: Response = requests.get(\n url=f\"{self.base_url}/addresses\",\n params={\"customer_id\": customer_id},\n headers=self.headers,\n )\n assert address_response.ok\n return address_response.json()\n\n def delete_address(self, address_id):\n address_response: Response = requests.delete(\n url=f\"{self.base_url}/addresses/{address_id}\", headers=self.headers\n )\n assert address_response.ok\n\n\n@pytest.fixture(scope=\"function\")\ndef recharge_test_client(METHOD_NAME: RechargeTestClient) -> Generator:\n test_client = RechargeTestClient(\n METHOD_NAME=METHOD_NAME\n )\n yield test_client\n\n\n@pytest.fixture(scope=\"function\")\ndef recharge_erasure_data(\n recharge_test_client: RechargeTestClient, recharge_erasure_identity_email: str\n) -> Generator:\n customer_response = recharge_test_client.create_customer(\n recharge_erasure_identity_email\n )\n error_message = f\"customer with email {recharge_erasure_identity_email} could not be created in Recharge\"\n poll_for_existence(\n recharge_test_client.get_customer,\n (recharge_erasure_identity_email,),\n error_message=error_message,\n )\n customer_id = customer_response.json()[\"customer\"][\"id\"]\n\n address_response = recharge_test_client.create_address(customer_id)\n error_message = f\"address for customer '{recharge_erasure_identity_email}' could not be created in Recharge\"\n poll_for_existence(\n recharge_test_client.get_addresses,\n args=(customer_id,),\n error_message=error_message,\n )\n address_id = address_response.json()[\"address\"][\"id\"]\n\n yield customer_response, address_response\n\n recharge_test_client.delete_address(address_id)\n recharge_test_client.delete_customer(customer_id)"}}},{"rowIdx":2005,"cells":{"id":{"kind":"number","value":2005,"string":"2,005"},"label":{"kind":"string","value":"post load parent"},"text":{"kind":"string","value":"\"\"\"\nFaraday Penetration Test IDE\nCopyright (C) 2016 Infobyte LLC (https://faradaysec.com/)\nSee the file 'doc/LICENSE' for the license information\n\"\"\"\n\n# Related third party imports\nfrom flask import Blueprint, abort, make_response, jsonify\nfrom filteralchemy import FilterSet, operators # pylint:disable=unused-import\nfrom marshmallow import fields, post_load, ValidationError\nfrom marshmallow.validate import OneOf, Range\nfrom sqlalchemy.orm.exc import NoResultFound\n\n# Local application imports\nfrom faraday.server.models import (\n Host,\n Service,\n Workspace,\n db\n)\nfrom faraday.server.api.base import (\n AutoSchema,\n ReadWriteWorkspacedView,\n FilterSetMeta,\n FilterAlchemyMixin,\n BulkDeleteWorkspacedMixin,\n BulkUpdateWorkspacedMixin\n)\nfrom faraday.server.schemas import (\n MetadataSchema,\n MutableField,\n PrimaryKeyRelatedField,\n SelfNestedField,\n)\nfrom faraday.server.utils.command import set_command_id\n\nservices_api = Blueprint('services_api', __name__)\n\n\nclass ServiceSchema(AutoSchema):\n _id = fields.Integer(attribute='id', dump_only=True)\n _rev = fields.String(default='', dump_only=True)\n owned = fields.Boolean(default=False)\n owner = PrimaryKeyRelatedField('username', dump_only=True,\n attribute='creator')\n # Port is loaded via ports\n port = fields.Integer(dump_only=True, required=True,\n validate=[Range(min=0, error=\"The value must be greater than or equal to 0\")])\n ports = MutableField(fields.Integer(required=True,\n validate=[Range(min=0, error=\"The value must be greater than or equal to 0\")]),\n fields.Method(deserialize='load_ports'),\n required=True,\n attribute='port')\n status = fields.String(missing='open', validate=OneOf(Service.STATUSES),\n allow_none=False)\n parent = fields.Integer(attribute='host_id') # parent is not required for updates\n host_id = fields.Integer(attribute='host_id', dump_only=True)\n vulns = fields.Integer(attribute='vulnerability_count', dump_only=True)\n credentials = fields.Integer(attribute='credentials_count', dump_only=True)\n metadata = SelfNestedField(MetadataSchema())\n type = fields.Function(lambda obj: 'Service', dump_only=True)\n summary = fields.String(dump_only=True)\n command_id = fields.Int(required=False, load_only=True)\n\n @staticmethod\n def load_ports(value):\n if not isinstance(value, list):\n raise ValidationError('ports must be a list')\n if len(value) != 1:\n raise ValidationError('ports must be a list with exactly one'\n 'element')\n port = value.pop()\n if isinstance(port, str):\n try:\n port = int(port)\n except ValueError as e:\n raise ValidationError('The value must be a number') from e\n if port > 65535 or port < 1:\n raise ValidationError('The value must be in the range [1-65535]')\n\n return str(port)\n\n @post_load\n def METHOD_NAME(self, data, **kwargs):\n \"\"\"Gets the host_id from parent attribute. Pops it and tries to\n get a Host with that id in the corresponding workspace.\n \"\"\"\n host_id = data.pop('host_id', None)\n if self.context['updating']:\n if host_id is None:\n # Partial update?\n return data\n\n if 'object' in self.context:\n if host_id != self.context['object'].parent.id:\n raise ValidationError('Can\\'t change service parent.')\n else:\n if any(host_id != obj.parent.id for obj in self.context['objects']):\n raise ValidationError('Can\\'t change service parent.')\n\n else:\n if not host_id:\n raise ValidationError('Parent id is required when creating a service.')\n\n try:\n data['host'] = Host.query.join(Workspace).filter(\n Workspace.name == self.context['workspace_name'],\n Host.id == host_id\n ).one()\n except NoResultFound as e:\n raise ValidationError(f'Host with id {host_id} not found') from e\n\n return data\n\n class Meta:\n model = Service\n fields = ('id', '_id', 'status', 'parent', 'type',\n 'protocol', 'description', '_rev',\n 'owned', 'owner', 'credentials', 'vulns',\n 'name', 'version', '_id', 'port', 'ports',\n 'metadata', 'summary', 'host_id', 'command_id')\n\n\nclass ServiceFilterSet(FilterSet):\n class Meta(FilterSetMeta):\n model = Service\n fields = ('id', 'host_id', 'protocol', 'name', 'port')\n default_operator = operators.Equal\n operators = (operators.Equal,)\n\n\nclass ServiceView(FilterAlchemyMixin, ReadWriteWorkspacedView, BulkDeleteWorkspacedMixin, BulkUpdateWorkspacedMixin):\n\n route_base = 'services'\n model_class = Service\n schema_class = ServiceSchema\n count_extra_filters = [Service.status == 'open']\n get_undefer = [Service.credentials_count, Service.vulnerability_count]\n get_joinedloads = [Service.credentials, Service.update_user]\n filterset_class = ServiceFilterSet\n\n def _envelope_list(self, objects, pagination_metadata=None):\n services = []\n for service in objects:\n services.append({\n 'id': service['_id'],\n 'key': service['_id'],\n 'value': service\n })\n return {\n 'services': services,\n }\n\n def _perform_create(self, data, **kwargs):\n command_id = data.pop('command_id', None)\n port_number = data.get(\"port\", \"1\")\n if not port_number.isdigit():\n abort(make_response(jsonify(message=\"Invalid Port number\"), 400))\n obj = super()._perform_create(data, **kwargs)\n if command_id:\n set_command_id(db.session, obj, True, command_id)\n return obj\n\n\nServiceView.register(services_api)"}}},{"rowIdx":2006,"cells":{"id":{"kind":"number","value":2006,"string":"2,006"},"label":{"kind":"string","value":"create mock svc record"},"text":{"kind":"string","value":"from unittest import TestCase\n\nfrom tapiriik.services import Service, ServiceRecord, ServiceBase\nfrom tapiriik.services.interchange import Activity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Lap, Location\n\nfrom datetime import datetime, timedelta\nimport random\nimport pytz\nfrom tapiriik.database import db\n\n\nclass MockServiceA(ServiceBase):\n ID = \"mockA\"\n SupportedActivities = [ActivityType.Rowing]\n\n\nclass MockServiceB(ServiceBase):\n ID = \"mockB\"\n SupportedActivities = [ActivityType.Rowing, ActivityType.Wheelchair]\n\n\nclass TapiriikTestCase(TestCase):\n def assertActivitiesEqual(self, a, b):\n ''' compare activity records with more granular asserts '''\n if a == b:\n return\n else:\n self.assertEqual(a.StartTime, b.StartTime)\n self.assertEqual(a.EndTime, b.EndTime)\n self.assertEqual(a.Type, b.Type)\n self.assertEqual(a.Stats.Distance, b.Stats.Distance)\n self.assertEqual(a.Name, b.Name)\n self.assertLapsListsEqual(a.Laps, b.Laps)\n\n def assertLapsListsEqual(self, lapsa, lapsb):\n self.assertEqual(len(lapsa), len(lapsb))\n for idx in range(len(lapsa)):\n la = lapsa[idx]\n lb = lapsb[idx]\n self.assertLapsEqual(la, lb)\n\n def assertLapsEqual(self, la, lb):\n self.assertEqual(la.StartTime, lb.StartTime)\n self.assertEqual(la.EndTime, lb.EndTime)\n self.assertEqual(len(la.Waypoints), len(lb.Waypoints))\n for idx in range(len(la.Waypoints)):\n wpa = la.Waypoints[idx]\n wpb = lb.Waypoints[idx]\n self.assertEqual(wpa.Timestamp.astimezone(pytz.utc), wpb.Timestamp.astimezone(pytz.utc))\n self.assertEqual(wpa.Location.Latitude, wpb.Location.Latitude)\n self.assertEqual(wpa.Location.Longitude, wpb.Location.Longitude)\n self.assertEqual(wpa.Location.Altitude, wpb.Location.Altitude)\n self.assertEqual(wpa.Type, wpb.Type)\n self.assertEqual(wpa.HR, wpb.HR)\n self.assertEqual(wpa.Calories, wpb.Calories)\n self.assertEqual(wpa.Power, wpb.Power)\n self.assertEqual(wpa.Cadence, wpb.Cadence)\n self.assertEqual(wpa.Temp, wpb.Temp)\n self.assertEqual(wpa.Location, wpb.Location)\n self.assertEqual(wpa, wpb)\n\n\nclass TestTools:\n def create_mock_user():\n db.test.insert({\"asd\": \"asdd\"})\n return {\"_id\": str(random.randint(1, 1000))}\n\n def METHOD_NAME(svc):\n return ServiceRecord({\"Service\": svc.ID, \"_id\": str(random.randint(1, 1000)), \"ExternalID\": str(random.randint(1, 1000))})\n\n def create_mock_servicedata(svc, record=None):\n return {\"ActivityID\": random.randint(1, 1000), \"Connection\": record}\n\n def create_mock_servicedatacollection(svc, record=None):\n record = record if record else TestTools.METHOD_NAME(svc)\n return {record._id: TestTools.create_mock_servicedata(svc, record=record)}\n\n def create_blank_activity(svc=None, actType=ActivityType.Other, record=None):\n act = Activity()\n act.Type = actType\n if svc:\n record = record if record else TestTools.METHOD_NAME(svc)\n act.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svc, record=record)\n act.StartTime = datetime.now()\n act.EndTime = act.StartTime + timedelta(seconds=42)\n act.CalculateUID()\n return act\n\n def create_random_activity(svc=None, actType=ActivityType.Other, tz=False, record=None, withPauses=True, withLaps=True):\n ''' creates completely random activity with valid waypoints and data '''\n act = TestTools.create_blank_activity(svc, actType, record=record)\n\n if tz is True:\n tz = pytz.timezone(\"America/Atikokan\")\n act.TZ = tz\n elif tz is not False:\n act.TZ = tz\n\n if act.CountTotalWaypoints() > 0:\n raise ValueError(\"Waypoint list already populated\")\n # this is entirely random in case the testing account already has events in it (API doesn't support delete, etc)\n act.StartTime = datetime(2011, 12, 13, 14, 15, 16)\n if tz is not False:\n if hasattr(tz, \"localize\"):\n act.StartTime = tz.localize(act.StartTime)\n else:\n act.StartTime = act.StartTime.replace(tzinfo=tz)\n act.EndTime = act.StartTime + timedelta(0, random.randint(60 * 5, 60 * 60)) # don't really need to upload 1000s of pts to test this...\n act.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=random.random() * 10000)\n act.Name = str(random.random())\n paused = False\n waypointTime = act.StartTime\n backToBackPauses = False\n act.Laps = []\n lap = Lap(startTime=act.StartTime)\n while waypointTime < act.EndTime:\n wp = Waypoint()\n if waypointTime == act.StartTime:\n wp.Type = WaypointType.Start\n wp.Timestamp = waypointTime\n wp.Location = Location(random.random() * 180 - 90, random.random() * 180 - 90, random.random() * 1000) # this is gonna be one intense activity\n\n if not (wp.HR == wp.Cadence == wp.Calories == wp.Power == wp.Temp == None):\n raise ValueError(\"Waypoint did not initialize cleanly\")\n if svc.SupportsHR:\n wp.HR = float(random.randint(90, 180))\n if svc.SupportsPower:\n wp.Power = float(random.randint(0, 1000))\n if svc.SupportsCalories:\n wp.Calories = float(random.randint(0, 500))\n if svc.SupportsCadence:\n wp.Cadence = float(random.randint(0, 100))\n if svc.SupportsTemp:\n wp.Temp = float(random.randint(0, 100))\n\n if withPauses and (random.randint(40, 50) == 42 or backToBackPauses) and not paused: # pause quite often\n wp.Type = WaypointType.Pause\n paused = True\n\n elif paused:\n paused = False\n wp.Type = WaypointType.Resume\n backToBackPauses = not backToBackPauses\n\n waypointTime += timedelta(0, int(random.random() + 9.5)) # 10ish seconds\n\n lap.Waypoints.append(wp)\n if waypointTime > act.EndTime:\n wp.Timestamp = act.EndTime\n wp.Type = WaypointType.End\n elif withLaps and wp.Timestamp < act.EndTime and random.randint(40, 60) == 42:\n # occasionally start new laps\n lap.EndTime = wp.Timestamp\n act.Laps.append(lap)\n lap = Lap(startTime=waypointTime)\n\n # Final lap\n lap.EndTime = act.EndTime\n act.Laps.append(lap)\n if act.CountTotalWaypoints() == 0:\n raise ValueError(\"No waypoints populated\")\n\n act.CalculateUID()\n act.EnsureTZ()\n\n return act\n\n def create_mock_service(id):\n mock = MockServiceA()\n mock.ID = id\n Service._serviceMappings[id] = mock\n return mock\n\n def create_mock_services():\n mockA = MockServiceA()\n mockB = MockServiceB()\n Service._serviceMappings[\"mockA\"] = mockA\n Service._serviceMappings[\"mockB\"] = mockB\n return (mockA, mockB)"}}},{"rowIdx":2007,"cells":{"id":{"kind":"number","value":2007,"string":"2,007"},"label":{"kind":"string","value":"get view frame from calib frame"},"text":{"kind":"string","value":"import numpy as np\n\nimport common.transformations.orientation as orient\n\n## -- hardcoded hardware params --\neon_f_focal_length = 910.0\neon_d_focal_length = 650.0\ntici_f_focal_length = 2648.0\ntici_e_focal_length = tici_d_focal_length = 567.0 # probably wrong? magnification is not consistent across frame\n\neon_f_frame_size = (1164, 874)\neon_d_frame_size = (816, 612)\ntici_f_frame_size = tici_e_frame_size = tici_d_frame_size = (1928, 1208)\n\n# aka 'K' aka camera_frame_from_view_frame\neon_fcam_intrinsics = np.array([\n [eon_f_focal_length, 0.0, float(eon_f_frame_size[0])/2],\n [0.0, eon_f_focal_length, float(eon_f_frame_size[1])/2],\n [0.0, 0.0, 1.0]])\neon_intrinsics = eon_fcam_intrinsics # xx\n\neon_dcam_intrinsics = np.array([\n [eon_d_focal_length, 0.0, float(eon_d_frame_size[0])/2],\n [0.0, eon_d_focal_length, float(eon_d_frame_size[1])/2],\n [0.0, 0.0, 1.0]])\n\ntici_fcam_intrinsics = np.array([\n [tici_f_focal_length, 0.0, float(tici_f_frame_size[0])/2],\n [0.0, tici_f_focal_length, float(tici_f_frame_size[1])/2],\n [0.0, 0.0, 1.0]])\n\ntici_dcam_intrinsics = np.array([\n [tici_d_focal_length, 0.0, float(tici_d_frame_size[0])/2],\n [0.0, tici_d_focal_length, float(tici_d_frame_size[1])/2],\n [0.0, 0.0, 1.0]])\n\ntici_ecam_intrinsics = tici_dcam_intrinsics\n\n# aka 'K_inv' aka view_frame_from_camera_frame\neon_fcam_intrinsics_inv = np.linalg.inv(eon_fcam_intrinsics)\neon_intrinsics_inv = eon_fcam_intrinsics_inv # xx\n\ntici_fcam_intrinsics_inv = np.linalg.inv(tici_fcam_intrinsics)\ntici_ecam_intrinsics_inv = np.linalg.inv(tici_ecam_intrinsics)\n\n\nFULL_FRAME_SIZE = tici_f_frame_size\nFOCAL = tici_f_focal_length\nfcam_intrinsics = tici_fcam_intrinsics\n\nW, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]\n\n\n# device/mesh : x->forward, y-> right, z->down\n# view : x->right, y->down, z->forward\ndevice_frame_from_view_frame = np.array([\n [ 0., 0., 1.],\n [ 1., 0., 0.],\n [ 0., 1., 0.]\n])\nview_frame_from_device_frame = device_frame_from_view_frame.T\n\n\ndef get_calib_from_vp(vp):\n vp_norm = normalize(vp)\n yaw_calib = np.arctan(vp_norm[0])\n pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))\n roll_calib = 0\n return roll_calib, pitch_calib, yaw_calib\n\n\n# aka 'extrinsic_matrix'\n# road : x->forward, y -> left, z->up\ndef get_view_frame_from_road_frame(roll, pitch, yaw, height):\n device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))\n view_from_road = view_frame_from_device_frame.dot(device_from_road)\n return np.hstack((view_from_road, [[0], [height], [0]]))\n\n\n\n# aka 'extrinsic_matrix'\ndef METHOD_NAME(roll, pitch, yaw, height):\n device_from_calib= orient.rot_from_euler([roll, pitch, yaw])\n view_from_calib = view_frame_from_device_frame.dot(device_from_calib)\n return np.hstack((view_from_calib, [[0], [height], [0]]))\n\n\ndef vp_from_ke(m):\n \"\"\"\n Computes the vanishing point from the product of the intrinsic and extrinsic\n matrices C = KE.\n\n The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T\n \"\"\"\n return (m[0, 0]/m[2, 0], m[1, 0]/m[2, 0])\n\n\ndef roll_from_ke(m):\n # note: different from calibration.h/RollAnglefromKE: i think that one's just wrong\n return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),\n -(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))\n\n\ndef normalize(img_pts, intrinsics=fcam_intrinsics):\n # normalizes image coordinates\n # accepts single pt or array of pts\n intrinsics_inv = np.linalg.inv(intrinsics)\n img_pts = np.array(img_pts)\n input_shape = img_pts.shape\n img_pts = np.atleast_2d(img_pts)\n img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1))))\n img_pts_normalized = img_pts.dot(intrinsics_inv.T)\n img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan\n return img_pts_normalized[:, :2].reshape(input_shape)\n\n\ndef denormalize(img_pts, intrinsics=fcam_intrinsics, width=np.inf, height=np.inf):\n # denormalizes image coordinates\n # accepts single pt or array of pts\n img_pts = np.array(img_pts)\n input_shape = img_pts.shape\n img_pts = np.atleast_2d(img_pts)\n img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1), dtype=img_pts.dtype)))\n img_pts_denormalized = img_pts.dot(intrinsics.T)\n if np.isfinite(width):\n img_pts_denormalized[img_pts_denormalized[:, 0] > width] = np.nan\n img_pts_denormalized[img_pts_denormalized[:, 0] < 0] = np.nan\n if np.isfinite(height):\n img_pts_denormalized[img_pts_denormalized[:, 1] > height] = np.nan\n img_pts_denormalized[img_pts_denormalized[:, 1] < 0] = np.nan\n return img_pts_denormalized[:, :2].reshape(input_shape)\n\n\ndef device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):\n # device from ecef frame\n # device frame is x -> forward, y-> right, z -> down\n # accepts single pt or array of pts\n input_shape = pt_ecef.shape\n pt_ecef = np.atleast_2d(pt_ecef)\n ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)\n device_from_ecef_rot = ecef_from_device_rot.T\n pt_ecef_rel = pt_ecef - pos_ecef\n pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)\n return pt_device.reshape(input_shape)\n\n\ndef img_from_device(pt_device):\n # img coordinates from pts in device frame\n # first transforms to view frame, then to img coords\n # accepts single pt or array of pts\n input_shape = pt_device.shape\n pt_device = np.atleast_2d(pt_device)\n pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)\n\n # This function should never return negative depths\n pt_view[pt_view[:, 2] < 0] = np.nan\n\n pt_img = pt_view/pt_view[:, 2:3]\n return pt_img.reshape(input_shape)[:, :2]\n"}}},{"rowIdx":2008,"cells":{"id":{"kind":"number","value":2008,"string":"2,008"},"label":{"kind":"string","value":"test tuple contains"},"text":{"kind":"string","value":"from collections import defaultdict\n\nimport pytest\n\nfrom diofant import (Basic, Dict, FiniteSet, Integer, Matrix, Rational, Tuple,\n false, sympify, true)\nfrom diofant.abc import p, q, r, s, x, y, z\nfrom diofant.core.compatibility import is_sequence, iterable\nfrom diofant.core.containers import tuple_wrapper\n\n\n__all__ = ()\n\n\ndef test_Tuple():\n t = (1, 2, 3, 4)\n st = Tuple(*t)\n assert set(sympify(t)) == set(st)\n assert len(t) == len(st)\n assert set(sympify(t[:2])) == set(st[:2])\n assert isinstance(st[:], Tuple)\n assert st == Tuple(1, 2, 3, 4)\n assert st.func(*st.args) == st\n t2 = (p, q, r, s)\n st2 = Tuple(*t2)\n assert st2.atoms() == set(t2)\n assert st == st2.subs({p: 1, q: 2, r: 3, s: 4})\n # issue sympy/sympy#5505\n assert all(isinstance(arg, Basic) for arg in st.args)\n assert Tuple(p, 1).subs({p: 0}) == Tuple(0, 1)\n assert Tuple(p, Tuple(p, 1)).subs({p: 0}) == Tuple(0, Tuple(0, 1))\n\n assert Tuple(t2) == Tuple(Tuple(*t2))\n\n\ndef METHOD_NAME():\n t1, t2 = Tuple(1), Tuple(2)\n assert t1 in Tuple(1, 2, 3, t1, Tuple(t2))\n assert t2 not in Tuple(1, 2, 3, t1, Tuple(t2))\n\n\ndef test_Tuple_concatenation():\n assert Tuple(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)\n assert (1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)\n assert Tuple(1, 2) + (3, 4) == Tuple(1, 2, 3, 4)\n pytest.raises(TypeError, lambda: Tuple(1, 2) + 3)\n pytest.raises(TypeError, lambda: 1 + Tuple(2, 3))\n\n # the Tuple case in __radd__ is only reached when a subclass is involved\n class Tuple2(Tuple):\n def __radd__(self, other):\n return Tuple.__radd__(self, other + other)\n assert Tuple(1, 2) + Tuple2(3, 4) == Tuple(1, 2, 1, 2, 3, 4)\n assert Tuple2(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)\n\n\ndef test_Tuple_equality():\n assert (Tuple(1, 2) == (1, 2)) is True\n assert (Tuple(1, 2) != (1, 2)) is False\n assert (Tuple(1, 2) == (1, 3)) is False\n assert (Tuple(1, 2) != (1, 3)) is True\n assert (Tuple(1, 2) == Tuple(1, 2)) is True\n assert (Tuple(1, 2) != Tuple(1, 2)) is False\n assert (Tuple(1, 2) == Tuple(1, 3)) is False\n assert (Tuple(1, 2) != Tuple(1, 3)) is True\n\n\ndef test_Tuple_comparision():\n assert (Tuple(1, 3) >= Tuple(-10, 30)) is true\n assert (Tuple(1, 3) <= Tuple(-10, 30)) is false\n assert (Tuple(1, 3) >= Tuple(1, 3)) is true\n assert (Tuple(1, 3) <= Tuple(1, 3)) is true\n\n\ndef test_Tuple_tuple_count():\n assert Tuple(0, 1, 2, 3).tuple_count(4) == 0\n assert Tuple(0, 4, 1, 2, 3).tuple_count(4) == 1\n assert Tuple(0, 4, 1, 4, 2, 3).tuple_count(4) == 2\n assert Tuple(0, 4, 1, 4, 2, 4, 3).tuple_count(4) == 3\n\n\ndef test_Tuple_index():\n assert Tuple(4, 0, 1, 2, 3).index(4) == 0\n assert Tuple(0, 4, 1, 2, 3).index(4) == 1\n assert Tuple(0, 1, 4, 2, 3).index(4) == 2\n assert Tuple(0, 1, 2, 4, 3).index(4) == 3\n assert Tuple(0, 1, 2, 3, 4).index(4) == 4\n\n pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3).index(4))\n pytest.raises(ValueError, lambda: Tuple(4, 0, 1, 2, 3).index(4, 1))\n pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3, 4).index(4, 1, 4))\n\n\ndef test_Tuple_mul():\n assert Tuple(1, 2, 3)*2 == Tuple(1, 2, 3, 1, 2, 3)\n assert 2*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)\n assert Tuple(1, 2, 3)*Integer(2) == Tuple(1, 2, 3, 1, 2, 3)\n assert Integer(2)*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)\n\n pytest.raises(TypeError, lambda: Tuple(1, 2, 3)*Rational(1, 2))\n pytest.raises(TypeError, lambda: Rational(1, 2)*Tuple(1, 2, 3))\n\n\ndef test_tuple_wrapper():\n\n @tuple_wrapper\n def wrap_tuples_and_return(*t):\n return t\n\n assert wrap_tuples_and_return(p, 1) == (p, 1)\n assert wrap_tuples_and_return((p, 1)) == (Tuple(p, 1),)\n assert wrap_tuples_and_return(1, (p, 2), 3) == (1, Tuple(p, 2), 3)\n\n\ndef test_iterable_is_sequence():\n ordered = [[], (), Tuple(), Matrix([[]])]\n unordered = [set()]\n not_diofant_iterable = [{}, '']\n assert all(is_sequence(i) for i in ordered)\n assert all(not is_sequence(i) for i in unordered)\n assert all(iterable(i) for i in ordered + unordered)\n assert all(not iterable(i) for i in not_diofant_iterable)\n assert all(iterable(i, exclude=None) for i in not_diofant_iterable)\n\n\ndef test_Dict():\n d = Dict({x: 1, y: 2, z: 3})\n assert d[x] == 1\n assert d[y] == 2\n pytest.raises(KeyError, lambda: d[2])\n assert len(d) == 3\n assert set(d.keys()) == {x, y, z}\n assert set(d.values()) == {1, 2, 3}\n assert d.get(5, 'default') == 'default'\n assert x in d\n assert z in d\n assert 5 not in d\n assert d.has(x)\n assert d.has(1) # Diofant Basic .has method\n\n # Test input types\n # input - a python dict\n # input - items as args - Diofant style\n assert (Dict({x: 1, y: 2, z: 3}) ==\n Dict((x, 1), (y, 2), (z, 3)))\n\n pytest.raises(TypeError, lambda: Dict(((x, 1), (y, 2), (z, 3))))\n with pytest.raises(NotImplementedError):\n d[5] = 6 # assert immutability\n\n assert set(d.items()) == {Tuple(x, 1), Tuple(y, 2), Tuple(z, 3)}\n assert set(d) == {x, y, z}\n assert str(d) == '{x: 1, y: 2, z: 3}'\n assert repr(d) == (\"Dict(Tuple(Symbol('x'), Integer(1)), \"\n \"Tuple(Symbol('y'), Integer(2)), \"\n \"Tuple(Symbol('z'), Integer(3)))\")\n\n # Test creating a Dict from a Dict.\n d = Dict({x: 1, y: 2, z: 3})\n assert d == Dict(d)\n\n # Test for supporting defaultdict\n d = defaultdict(int)\n assert d[x] == 0\n assert d[y] == 0\n assert d[z] == 0\n assert Dict(d)\n d = Dict(d)\n assert len(d) == 3\n assert set(d) == {x, y, z}\n assert set(d.values()) == {0}\n\n assert list(FiniteSet(*[Dict({x: 1}), Dict({y: 2})]))[0] == Dict({x: 1})\n\n\ndef test_eq_and_args():\n # issue sympy/sympy#5788\n args = [(1, 2), (2, 1)]\n for o in [Dict, Tuple, FiniteSet]:\n if o != Tuple:\n assert o(*args) == o(*reversed(args))\n pair = [o(*args), o(*reversed(args))]\n rpair = reversed(pair)\n assert sorted(pair) == sorted(rpair)\n assert set(o(*args)) # doesn't fail"}}},{"rowIdx":2009,"cells":{"id":{"kind":"number","value":2009,"string":"2,009"},"label":{"kind":"string","value":"test object mutation"},"text":{"kind":"string","value":"# stdlib\nfrom textwrap import dedent\n\n# third party\nfrom faker import Faker\nimport pytest\n\n# syft absolute\nimport syft\nfrom syft.client.client import SyftClient\nfrom syft.node.worker import Worker\nfrom syft.service.action.action_object import ActionObject\nfrom syft.service.action.action_permissions import ActionPermission\nfrom syft.service.code.user_code import UserCodeStatus\nfrom syft.service.context import ChangeContext\nfrom syft.service.request.request import ActionStoreChange\nfrom syft.service.request.request import ObjectMutation\nfrom syft.service.request.request import RequestStatus\nfrom syft.service.request.request import UserCodeStatusChange\nfrom syft.service.request.request_service import RequestService\nfrom syft.service.response import SyftError\nfrom syft.service.response import SyftSuccess\nfrom syft.service.settings.settings_service import SettingsService\nfrom syft.store.document_store import DocumentStore\nfrom syft.store.linked_obj import LinkedObject\n\n\n@pytest.fixture\ndef request_service(document_store: DocumentStore):\n return RequestService(store=document_store)\n\n\ndef get_ds_client(faker: Faker, root_client: SyftClient, guest_client: SyftClient):\n guest_email = faker.email()\n password = \"mysecretpassword\"\n result = root_client.register(\n name=faker.name(),\n email=guest_email,\n password=password,\n password_verify=password,\n )\n assert isinstance(result, SyftSuccess)\n guest_client.login(email=guest_email, password=password)\n return guest_client\n\n\ndef METHOD_NAME(worker: Worker):\n root_client = worker.root_client\n setting = root_client.api.services.settings.get()\n linked_obj = LinkedObject.from_obj(setting, SettingsService, node_uid=worker.id)\n original_name = setting.organization\n new_name = \"Test Organization\"\n\n object_mutation = ObjectMutation(\n linked_obj=linked_obj,\n attr_name=\"organization\",\n match_type=True,\n value=new_name,\n )\n\n change_context = ChangeContext(\n node=worker,\n approving_user_credentials=root_client.credentials.verify_key,\n )\n\n result = object_mutation.apply(change_context)\n\n assert result.is_ok()\n\n setting = root_client.api.services.settings.get()\n\n assert setting.organization == new_name\n\n object_mutation.undo(context=change_context)\n\n setting = root_client.api.services.settings.get()\n\n assert setting.organization == original_name\n\n\ndef test_action_store_change(faker: Faker, worker: Worker):\n root_client = worker.root_client\n dummy_data = [1, 2, 3]\n data = ActionObject.from_obj(dummy_data)\n action_obj = root_client.api.services.action.set(data)\n\n assert action_obj.get() == dummy_data\n\n ds_client = get_ds_client(faker, root_client, worker.guest_client)\n\n action_object_link = LinkedObject.from_obj(\n action_obj, node_uid=action_obj.syft_node_uid\n )\n permission_change = ActionStoreChange(\n linked_obj=action_object_link,\n apply_permission_type=ActionPermission.READ,\n )\n\n change_context = ChangeContext(\n node=worker,\n approving_user_credentials=root_client.credentials.verify_key,\n requesting_user_credentials=ds_client.credentials.verify_key,\n )\n\n result = permission_change.apply(change_context)\n\n assert result.is_ok()\n\n action_obj_ptr = ds_client.api.services.action.get_pointer(action_obj.id)\n\n result = action_obj_ptr.get()\n assert result == dummy_data\n\n result = permission_change.undo(change_context)\n assert result.is_ok()\n\n result = action_obj_ptr.get()\n assert isinstance(result, SyftError)\n\n\ndef test_user_code_status_change(faker: Faker, worker: Worker):\n root_client = worker.root_client\n dummy_data = [1, 2, 3]\n data = ActionObject.from_obj(dummy_data)\n action_obj = root_client.api.services.action.set(data)\n\n ds_client = get_ds_client(faker, root_client, worker.guest_client)\n\n @syft.syft_function(\n input_policy=syft.ExactMatch(data=action_obj),\n output_policy=syft.SingleExecutionExactOutput(),\n )\n def simple_function(data):\n return sum(data)\n\n simple_function.code = dedent(simple_function.code)\n result = ds_client.code.submit(simple_function)\n assert isinstance(result, SyftSuccess)\n\n user_code = ds_client.code.get_all()[0]\n\n linked_obj = LinkedObject.from_obj(user_code, node_uid=worker.id)\n\n user_code_change = UserCodeStatusChange(\n value=UserCodeStatus.APPROVED, linked_obj=linked_obj\n )\n\n change_context = ChangeContext(\n node=worker,\n approving_user_credentials=root_client.credentials.verify_key,\n requesting_user_credentials=ds_client.credentials.verify_key,\n )\n\n result = user_code_change.apply(change_context)\n\n user_code = ds_client.code.get_all()[0]\n\n assert user_code.status.approved\n\n result = user_code_change.undo(change_context)\n assert result.is_ok()\n\n user_code = ds_client.code.get_all()[0]\n\n assert not user_code.status.approved\n\n\ndef test_code_accept_deny(faker: Faker, worker: Worker):\n root_client = worker.root_client\n dummy_data = [1, 2, 3]\n data = ActionObject.from_obj(dummy_data)\n action_obj = root_client.api.services.action.set(data)\n\n ds_client = get_ds_client(faker, root_client, worker.guest_client)\n\n @syft.syft_function(\n input_policy=syft.ExactMatch(data=action_obj),\n output_policy=syft.SingleExecutionExactOutput(),\n )\n def simple_function(data):\n return sum(data)\n\n simple_function.code = dedent(simple_function.code)\n\n result = ds_client.code.request_code_execution(simple_function)\n assert not isinstance(result, SyftError)\n\n request = root_client.requests.get_all()[0]\n result = request.accept_by_depositing_result(result=10)\n assert isinstance(result, SyftSuccess)\n\n request = root_client.requests.get_all()[0]\n assert request.status == RequestStatus.APPROVED\n result = ds_client.code.simple_function(data=action_obj)\n assert result.get() == 10\n\n result = request.deny(reason=\"Function output needs differential privacy !!\")\n assert isinstance(result, SyftSuccess)\n\n request = root_client.requests.get_all()[0]\n assert request.status == RequestStatus.REJECTED\n\n user_code = ds_client.code.get_all()[0]\n assert not user_code.status.approved\n\n result = ds_client.code.simple_function(data=action_obj)\n assert isinstance(result, SyftError)\n assert \"UserCodeStatus.DENIED\" in result.message"}}},{"rowIdx":2010,"cells":{"id":{"kind":"number","value":2010,"string":"2,010"},"label":{"kind":"string","value":"test head response doesnt support content"},"text":{"kind":"string","value":"from pathlib import PurePosixPath\nfrom typing import Any, Optional\n\nimport pytest\n\nfrom litestar import MediaType, get\nfrom litestar.datastructures import Cookie\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.response import Response\nfrom litestar.response.base import ASGIResponse\nfrom litestar.serialization import default_serializer, get_serializer\nfrom litestar.status_codes import (\n HTTP_100_CONTINUE,\n HTTP_101_SWITCHING_PROTOCOLS,\n HTTP_102_PROCESSING,\n HTTP_103_EARLY_HINTS,\n HTTP_200_OK,\n HTTP_204_NO_CONTENT,\n HTTP_304_NOT_MODIFIED,\n HTTP_500_INTERNAL_SERVER_ERROR,\n)\nfrom litestar.testing import create_test_client\nfrom litestar.types import Empty\n\n\ndef test_response_headers() -> None:\n @get(\"/\")\n def handler() -> Response:\n return Response(content=\"hello world\", media_type=MediaType.TEXT, headers={\"first\": \"123\", \"second\": \"456\"})\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n assert response.headers[\"first\"] == \"123\"\n assert response.headers[\"second\"] == \"456\"\n assert response.headers[\"content-length\"] == \"11\"\n assert response.headers[\"content-type\"] == \"text/plain; charset=utf-8\"\n\n\ndef test_response_headers_do_not_lowercase_values() -> None:\n # reproduces: https://github.com/litestar-org/litestar/issues/693\n\n @get(\"/\")\n def handler() -> Response:\n return Response(content=\"hello world\", media_type=MediaType.TEXT, headers={\"foo\": \"BaR\"})\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n assert response.headers[\"foo\"] == \"BaR\"\n\n\n@pytest.mark.parametrize(\"as_instance\", [True, False])\ndef test_set_cookie(as_instance: bool) -> None:\n @get(\"/\")\n def handler() -> Response:\n response = Response(content=None)\n if as_instance:\n response.set_cookie(Cookie(key=\"test\", value=\"abc\", max_age=60, expires=60, secure=True, httponly=True))\n else:\n response.set_cookie(key=\"test\", value=\"abc\", max_age=60, expires=60, secure=True, httponly=True)\n assert len(response.cookies) == 1\n return response\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n assert response.cookies.get(\"test\") == \"abc\"\n\n\ndef test_delete_cookie() -> None:\n @get(\"/create\")\n def create_cookie_handler() -> Response:\n response = Response(content=None)\n response.set_cookie(\"test\", \"abc\", max_age=60, expires=60, secure=True, httponly=True)\n assert len(response.cookies) == 1\n return response\n\n @get(\"/delete\")\n def delete_cookie_handler() -> Response:\n response = Response(content=None)\n response.delete_cookie(\n \"test\",\n \"abc\",\n )\n assert len(response.cookies) == 1\n return response\n\n with create_test_client(route_handlers=[create_cookie_handler, delete_cookie_handler]) as client:\n response = client.get(\"/create\")\n assert response.cookies.get(\"test\") == \"abc\"\n assert client.cookies.get(\"test\") == \"abc\"\n response = client.get(\"/delete\")\n assert response.cookies.get(\"test\") is None\n # the commented out assert fails, because of the starlette test client's behaviour - which doesn't clear\n # cookies.\n\n\n@pytest.mark.parametrize(\n \"media_type, expected, should_have_content_length\",\n ((MediaType.TEXT, b\"\", False), (MediaType.HTML, b\"\", False), (MediaType.JSON, b\"null\", True)),\n)\ndef test_empty_response(media_type: MediaType, expected: bytes, should_have_content_length: bool) -> None:\n @get(\"/\", media_type=media_type)\n def handler() -> None:\n return\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n assert response.content == expected\n assert response.headers[\"content-length\"] == str(len(expected))\n\n\n@pytest.mark.parametrize(\"status_code\", (HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED))\ndef test_response_without_payload(status_code: int) -> None:\n @get(\"/\")\n def handler() -> Response:\n return Response(b\"\", status_code=status_code)\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n\n assert \"content-type\" not in response.headers\n assert \"content-length\" not in response.headers\n\n\n@pytest.mark.parametrize(\n \"status, body, should_raise\",\n (\n (HTTP_100_CONTINUE, None, False),\n (HTTP_101_SWITCHING_PROTOCOLS, None, False),\n (HTTP_102_PROCESSING, None, False),\n (HTTP_103_EARLY_HINTS, None, False),\n (HTTP_204_NO_CONTENT, None, False),\n (HTTP_100_CONTINUE, \"1\", True),\n (HTTP_101_SWITCHING_PROTOCOLS, \"1\", True),\n (HTTP_102_PROCESSING, \"1\", True),\n (HTTP_103_EARLY_HINTS, \"1\", True),\n (HTTP_204_NO_CONTENT, \"1\", True),\n ),\n)\ndef test_statuses_without_body(status: int, body: Optional[str], should_raise: bool) -> None:\n @get(\"/\")\n def handler() -> Response:\n return Response(content=body, status_code=status)\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n if should_raise:\n assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR\n else:\n assert response.status_code == status\n assert \"content-length\" not in response.headers\n\n\n@pytest.mark.parametrize(\n \"body, media_type, should_raise\",\n (\n (\"\", MediaType.TEXT, False),\n (\"abc\", MediaType.TEXT, False),\n (b\"\", MediaType.HTML, False),\n (b\"abc\", MediaType.HTML, False),\n ({\"key\": \"value\"}, MediaType.TEXT, True),\n ([1, 2, 3], MediaType.TEXT, True),\n ({\"key\": \"value\"}, MediaType.HTML, True),\n ([1, 2, 3], MediaType.HTML, True),\n ([], MediaType.HTML, False),\n ([], MediaType.TEXT, False),\n ({}, MediaType.HTML, False),\n ({}, MediaType.TEXT, False),\n ({\"abc\": \"def\"}, MediaType.JSON, False),\n (Empty, MediaType.JSON, True),\n ),\n)\ndef test_render_method(body: Any, media_type: MediaType, should_raise: bool) -> None:\n @get(\"/\", media_type=media_type)\n def handler() -> Any:\n return body\n\n with create_test_client(handler) as client:\n response = client.get(\"/\")\n if should_raise:\n assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR\n else:\n assert response.status_code == HTTP_200_OK\n\n\ndef test_get_serializer() -> None:\n class Foo:\n pass\n\n foo_encoder = {Foo: lambda f: \"it's a foo\"}\n path_encoder = {PurePosixPath: lambda p: \"it's a path\"}\n\n class FooResponse(Response):\n type_encoders = foo_encoder\n\n assert get_serializer() is default_serializer\n\n assert get_serializer(type_encoders=foo_encoder)(Foo()) == \"it's a foo\"\n assert get_serializer(type_encoders=path_encoder)(PurePosixPath()) == \"it's a path\"\n\n assert get_serializer(FooResponse(None).type_encoders)(Foo()) == \"it's a foo\"\n assert (\n get_serializer(FooResponse(None, type_encoders={Foo: lambda f: \"foo\"}).response_type_encoders)(Foo()) == \"foo\"\n )\n\n\ndef METHOD_NAME() -> None:\n with pytest.raises(ImproperlyConfiguredException):\n ASGIResponse(body=b\"hello world\", media_type=MediaType.TEXT, is_head_response=True)"}}},{"rowIdx":2011,"cells":{"id":{"kind":"number","value":2011,"string":"2,011"},"label":{"kind":"string","value":"provisioning state"},"text":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by pulumi. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom ... import _utilities\nfrom . import outputs\n\n__all__ = [\n 'GetOutboundEndpointResult',\n 'AwaitableGetOutboundEndpointResult',\n 'get_outbound_endpoint',\n 'get_outbound_endpoint_output',\n]\n\n@pulumi.output_type\nclass GetOutboundEndpointResult:\n \"\"\"\n Describes an outbound endpoint for a DNS resolver.\n \"\"\"\n def __init__(__self__, etag=None, id=None, location=None, name=None, METHOD_NAME=None, resource_guid=None, subnet=None, system_data=None, tags=None, type=None):\n if etag and not isinstance(etag, str):\n raise TypeError(\"Expected argument 'etag' to be a str\")\n pulumi.set(__self__, \"etag\", etag)\n if id and not isinstance(id, str):\n raise TypeError(\"Expected argument 'id' to be a str\")\n pulumi.set(__self__, \"id\", id)\n if location and not isinstance(location, str):\n raise TypeError(\"Expected argument 'location' to be a str\")\n pulumi.set(__self__, \"location\", location)\n if name and not isinstance(name, str):\n raise TypeError(\"Expected argument 'name' to be a str\")\n pulumi.set(__self__, \"name\", name)\n if METHOD_NAME and not isinstance(METHOD_NAME, str):\n raise TypeError(\"Expected argument 'provisioning_state' to be a str\")\n pulumi.set(__self__, \"provisioning_state\", METHOD_NAME)\n if resource_guid and not isinstance(resource_guid, str):\n raise TypeError(\"Expected argument 'resource_guid' to be a str\")\n pulumi.set(__self__, \"resource_guid\", resource_guid)\n if subnet and not isinstance(subnet, dict):\n raise TypeError(\"Expected argument 'subnet' to be a dict\")\n pulumi.set(__self__, \"subnet\", subnet)\n if system_data and not isinstance(system_data, dict):\n raise TypeError(\"Expected argument 'system_data' to be a dict\")\n pulumi.set(__self__, \"system_data\", system_data)\n if tags and not isinstance(tags, dict):\n raise TypeError(\"Expected argument 'tags' to be a dict\")\n pulumi.set(__self__, \"tags\", tags)\n if type and not isinstance(type, str):\n raise TypeError(\"Expected argument 'type' to be a str\")\n pulumi.set(__self__, \"type\", type)\n\n @property\n @pulumi.getter\n def etag(self) -> str:\n \"\"\"\n ETag of the outbound endpoint.\n \"\"\"\n return pulumi.get(self, \"etag\")\n\n @property\n @pulumi.getter\n def id(self) -> str:\n \"\"\"\n Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}\n \"\"\"\n return pulumi.get(self, \"id\")\n\n @property\n @pulumi.getter\n def location(self) -> str:\n \"\"\"\n The geo-location where the resource lives\n \"\"\"\n return pulumi.get(self, \"location\")\n\n @property\n @pulumi.getter\n def name(self) -> str:\n \"\"\"\n The name of the resource\n \"\"\"\n return pulumi.get(self, \"name\")\n\n @property\n @pulumi.getter(name=\"provisioningState\")\n def METHOD_NAME(self) -> str:\n \"\"\"\n The current provisioning state of the outbound endpoint. This is a read-only property and any attempt to set this value will be ignored.\n \"\"\"\n return pulumi.get(self, \"provisioning_state\")\n\n @property\n @pulumi.getter(name=\"resourceGuid\")\n def resource_guid(self) -> str:\n \"\"\"\n The resourceGuid property of the outbound endpoint resource.\n \"\"\"\n return pulumi.get(self, \"resource_guid\")\n\n @property\n @pulumi.getter\n def subnet(self) -> 'outputs.SubResourceResponse':\n \"\"\"\n The reference to the subnet used for the outbound endpoint.\n \"\"\"\n return pulumi.get(self, \"subnet\")\n\n @property\n @pulumi.getter(name=\"systemData\")\n def system_data(self) -> 'outputs.SystemDataResponse':\n \"\"\"\n Metadata pertaining to creation and last modification of the resource.\n \"\"\"\n return pulumi.get(self, \"system_data\")\n\n @property\n @pulumi.getter\n def tags(self) -> Optional[Mapping[str, str]]:\n \"\"\"\n Resource tags.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @property\n @pulumi.getter\n def type(self) -> str:\n \"\"\"\n The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\"\n \"\"\"\n return pulumi.get(self, \"type\")\n\n\nclass AwaitableGetOutboundEndpointResult(GetOutboundEndpointResult):\n # pylint: disable=using-constant-test\n def __await__(self):\n if False:\n yield self\n return GetOutboundEndpointResult(\n etag=self.etag,\n id=self.id,\n location=self.location,\n name=self.name,\n METHOD_NAME=self.METHOD_NAME,\n resource_guid=self.resource_guid,\n subnet=self.subnet,\n system_data=self.system_data,\n tags=self.tags,\n type=self.type)\n\n\ndef get_outbound_endpoint(dns_resolver_name: Optional[str] = None,\n outbound_endpoint_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutboundEndpointResult:\n \"\"\"\n Gets properties of an outbound endpoint for a DNS resolver.\n\n\n :param str dns_resolver_name: The name of the DNS resolver.\n :param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver.\n :param str resource_group_name: The name of the resource group. The name is case insensitive.\n \"\"\"\n __args__ = dict()\n __args__['dnsResolverName'] = dns_resolver_name\n __args__['outboundEndpointName'] = outbound_endpoint_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:network/v20220701:getOutboundEndpoint', __args__, opts=opts, typ=GetOutboundEndpointResult).value\n\n return AwaitableGetOutboundEndpointResult(\n etag=pulumi.get(__ret__, 'etag'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n name=pulumi.get(__ret__, 'name'),\n METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'),\n resource_guid=pulumi.get(__ret__, 'resource_guid'),\n subnet=pulumi.get(__ret__, 'subnet'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))\n\n\n@_utilities.lift_output_func(get_outbound_endpoint)\ndef get_outbound_endpoint_output(dns_resolver_name: Optional[pulumi.Input[str]] = None,\n outbound_endpoint_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOutboundEndpointResult]:\n \"\"\"\n Gets properties of an outbound endpoint for a DNS resolver.\n\n\n :param str dns_resolver_name: The name of the DNS resolver.\n :param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver.\n :param str resource_group_name: The name of the resource group. The name is case insensitive.\n \"\"\"\n ..."}}},{"rowIdx":2012,"cells":{"id":{"kind":"number","value":2012,"string":"2,012"},"label":{"kind":"string","value":"is sequence finished"},"text":{"kind":"string","value":"from typing import Any, Callable, Optional\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\n\ntry:\n from transformers.generation_logits_process import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n )\nexcept ImportError:\n from transformers.generation import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n )\n\n\ndef prepare_logits_processor(\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n temperature: Optional[float] = None,\n) -> LogitsProcessorList:\n processor_list = LogitsProcessorList()\n if temperature is not None and temperature != 1.0:\n processor_list.append(TemperatureLogitsWarper(temperature))\n if top_k is not None and top_k != 0:\n processor_list.append(TopKLogitsWarper(top_k))\n if top_p is not None and top_p < 1.0:\n processor_list.append(TopPLogitsWarper(top_p))\n return processor_list\n\n\ndef METHOD_NAME(unfinished_sequences: torch.Tensor) -> bool:\n if dist.is_initialized() and dist.get_world_size() > 1:\n # consider DP\n unfinished_sequences = unfinished_sequences.clone()\n dist.all_reduce(unfinished_sequences)\n return unfinished_sequences.max() == 0\n\n\ndef sample(\n model: nn.Module,\n input_ids: torch.Tensor,\n max_length: int,\n early_stopping: bool = False,\n eos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n temperature: Optional[float] = None,\n prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,\n update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,\n **model_kwargs\n) -> torch.Tensor:\n if input_ids.size(1) >= max_length:\n return input_ids\n\n logits_processor = prepare_logits_processor(top_k, top_p, temperature)\n unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n\n for _ in range(input_ids.size(1), max_length):\n model_inputs = (\n prepare_inputs_fn(input_ids, **model_kwargs)\n if prepare_inputs_fn is not None\n else {\"input_ids\": input_ids}\n )\n outputs = model(**model_inputs)\n\n next_token_logits = outputs[\"logits\"][:, -1, :]\n # pre-process distribution\n next_token_logits = logits_processor(input_ids, next_token_logits)\n # sample\n probs = torch.softmax(next_token_logits, dim=-1, dtype=torch.float)\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n\n # finished sentences should have their next token be a padding token\n if eos_token_id is not None:\n if pad_token_id is None:\n raise ValueError(\n \"If `eos_token_id` is defined, make sure that `pad_token_id` is defined.\"\n )\n next_tokens = next_tokens * unfinished_sequences + pad_token_id * (\n 1 - unfinished_sequences\n )\n\n # update generated ids, model inputs for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n if update_model_kwargs_fn is not None:\n model_kwargs = update_model_kwargs_fn(outputs, **model_kwargs)\n\n # if eos_token was found in one sentence, set sentence to finished\n if eos_token_id is not None:\n unfinished_sequences = unfinished_sequences.mul(\n (next_tokens != eos_token_id).long()\n )\n\n # stop when each sentence is finished if early_stopping=True\n if early_stopping and METHOD_NAME(unfinished_sequences):\n break\n\n return input_ids\n\n\ndef generate(\n model: nn.Module,\n input_ids: torch.Tensor,\n max_length: int,\n num_beams: int = 1,\n do_sample: bool = True,\n early_stopping: bool = False,\n eos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n temperature: Optional[float] = None,\n prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None,\n update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None,\n **model_kwargs\n) -> torch.Tensor:\n \"\"\"Generate token sequence. The returned sequence is input_ids + generated_tokens.\n\n Args:\n model (nn.Module): model\n input_ids (torch.Tensor): input sequence\n max_length (int): max length of the returned sequence\n num_beams (int, optional): number of beams. Defaults to 1.\n do_sample (bool, optional): whether to do sample. Defaults to True.\n early_stopping (bool, optional): if True, the sequence length may be smaller than max_length due to finding eos. Defaults to False.\n eos_token_id (Optional[int], optional): end of sequence token id. Defaults to None.\n pad_token_id (Optional[int], optional): pad token id. Defaults to None.\n top_k (Optional[int], optional): the number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None.\n top_p (Optional[float], optional): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. Defaults to None.\n temperature (Optional[float], optional): The value used to module the next token probabilities. Defaults to None.\n prepare_inputs_fn (Optional[Callable[[torch.Tensor, Any], dict]], optional): Function to preprocess model inputs. Arguments of this function should be input_ids and model_kwargs. Defaults to None.\n update_model_kwargs_fn (Optional[Callable[[dict, Any], dict]], optional): Function to update model_kwargs based on outputs. Arguments of this function should be outputs and model_kwargs. Defaults to None.\n \"\"\"\n is_greedy_gen_mode = (num_beams == 1) and do_sample is False\n is_sample_gen_mode = (num_beams == 1) and do_sample is True\n is_beam_gen_mode = (num_beams > 1) and do_sample is False\n if is_greedy_gen_mode:\n # run greedy search\n raise NotImplementedError\n elif is_sample_gen_mode:\n # run sample\n return sample(\n model,\n input_ids,\n max_length,\n early_stopping=early_stopping,\n eos_token_id=eos_token_id,\n pad_token_id=pad_token_id,\n top_k=top_k,\n top_p=top_p,\n temperature=temperature,\n prepare_inputs_fn=prepare_inputs_fn,\n update_model_kwargs_fn=update_model_kwargs_fn,\n **model_kwargs\n )\n elif is_beam_gen_mode:\n raise NotImplementedError\n else:\n raise ValueError(\"Unsupported generation mode\")"}}},{"rowIdx":2013,"cells":{"id":{"kind":"number","value":2013,"string":"2,013"},"label":{"kind":"string","value":"test trotter hamiltonian scalar mul"},"text":{"kind":"string","value":"\"\"\"Test Trotter Hamiltonian methods from `qibo/core/hamiltonians.py`.\"\"\"\nimport numpy as np\nimport pytest\n\nfrom qibo import hamiltonians\nfrom qibo.backends import NumpyBackend\nfrom qibo.quantum_info import random_hermitian, random_statevector\n\nfrom .utils import random_complex\n\n\n@pytest.mark.parametrize(\"nqubits\", [3, 4])\n@pytest.mark.parametrize(\"model\", [\"TFIM\", \"XXZ\", \"Y\", \"MaxCut\"])\ndef test_trotter_hamiltonian_to_dense(backend, nqubits, model):\n \"\"\"Test that Trotter Hamiltonian dense form agrees with normal Hamiltonian.\"\"\"\n local_ham = getattr(hamiltonians, model)(nqubits, dense=False, backend=backend)\n target_ham = getattr(hamiltonians, model)(nqubits, backend=backend)\n final_ham = local_ham.dense\n backend.assert_allclose(final_ham.matrix, target_ham.matrix, atol=1e-15)\n\n\ndef METHOD_NAME(backend, nqubits=3):\n \"\"\"Test multiplication of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, backend=backend)\n local_dense = (2 * local_ham).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n local_dense = (local_ham * 2).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_scalar_add(backend, nqubits=4):\n \"\"\"Test addition of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0, backend=backend)\n local_dense = (2 + local_ham).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n local_dense = (local_ham + 2).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_scalar_sub(backend, nqubits=3):\n \"\"\"Test subtraction of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n target_ham = 2 - hamiltonians.TFIM(nqubits, h=1.0, backend=backend)\n local_dense = (2 - local_ham).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - 2\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n local_dense = (local_ham - 2).dense\n backend.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_operator_add_and_sub(backend, nqubits=3):\n \"\"\"Test addition and subtraction between Trotter Hamiltonians.\"\"\"\n local_ham1 = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n local_ham2 = hamiltonians.TFIM(nqubits, h=0.5, dense=False, backend=backend)\n\n local_ham = local_ham1 + local_ham2\n target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) + hamiltonians.TFIM(\n nqubits, h=0.5, backend=backend\n )\n dense = local_ham.dense\n backend.assert_allclose(dense.matrix, target_ham.matrix)\n\n local_ham = local_ham1 - local_ham2\n target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - hamiltonians.TFIM(\n nqubits, h=0.5, backend=backend\n )\n dense = local_ham.dense\n backend.assert_allclose(dense.matrix, target_ham.matrix)\n\n\n@pytest.mark.parametrize(\"nqubits,normalize\", [(3, False), (4, False)])\ndef test_trotter_hamiltonian_matmul(backend, nqubits, normalize):\n \"\"\"Test Trotter Hamiltonian expectation value.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend)\n dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend)\n\n state = backend.cast(random_complex((2**nqubits,)))\n trotter_ev = local_ham.expectation(state, normalize)\n target_ev = dense_ham.expectation(state, normalize)\n backend.assert_allclose(trotter_ev, target_ev)\n\n state = random_complex((2**nqubits,))\n trotter_ev = local_ham.expectation(state, normalize)\n target_ev = dense_ham.expectation(state, normalize)\n backend.assert_allclose(trotter_ev, target_ev)\n\n trotter_matmul = local_ham @ state\n target_matmul = dense_ham @ state\n backend.assert_allclose(trotter_matmul, target_matmul)\n\n\ndef test_trotter_hamiltonian_three_qubit_term(backend):\n \"\"\"Test creating ``TrotterHamiltonian`` with three qubit term.\"\"\"\n from scipy.linalg import expm\n\n from qibo.hamiltonians.terms import HamiltonianTerm\n\n numpy_backend = NumpyBackend()\n\n m1 = random_hermitian(2**3, backend=numpy_backend)\n m2 = random_hermitian(2**2, backend=numpy_backend)\n m3 = random_hermitian(2**1, backend=numpy_backend)\n\n terms = [\n HamiltonianTerm(m1, 0, 1, 2),\n HamiltonianTerm(m2, 2, 3),\n HamiltonianTerm(m3, 1),\n ]\n m1 = backend.cast(m1, dtype=m1.dtype)\n m2 = backend.cast(m2, dtype=m2.dtype)\n m3 = backend.cast(m3, dtype=m3.dtype)\n\n ham = hamiltonians.SymbolicHamiltonian(backend=backend)\n ham.terms = terms\n\n # Test that the `TrotterHamiltonian` dense matrix is correct\n eye = np.eye(2, dtype=complex)\n eye = backend.cast(eye, dtype=eye.dtype)\n mm1 = np.kron(m1, eye)\n mm2 = np.kron(np.kron(eye, eye), m2)\n mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))\n target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3, backend=backend)\n backend.assert_allclose(ham.matrix, target_ham.matrix)\n\n dt = 1e-2\n initial_state = random_statevector(2**4, backend=backend)\n circuit = ham.circuit(dt=dt)\n final_state = backend.execute_circuit(circuit, np.copy(initial_state))\n mm1 = backend.to_numpy(mm1)\n mm2 = backend.to_numpy(mm2)\n mm3 = backend.to_numpy(mm3)\n u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]\n u = backend.cast(u)\n target_state = np.dot(u[1], np.dot(u[0], initial_state))\n target_state = np.dot(u[0], np.dot(u[1], target_state))\n backend.assert_allclose(final_state, target_state)\n\n\ndef test_old_trotter_hamiltonian_errors():\n \"\"\"Check errors when creating the deprecated ``TrotterHamiltonian`` object.\"\"\"\n with pytest.raises(NotImplementedError):\n h = hamiltonians.TrotterHamiltonian()\n with pytest.raises(NotImplementedError):\n h = hamiltonians.TrotterHamiltonian.from_symbolic(0, 1)"}}},{"rowIdx":2014,"cells":{"id":{"kind":"number","value":2014,"string":"2,014"},"label":{"kind":"string","value":"test given recognizer result then one is"},"text":{"kind":"string","value":"import pytest\n\nfrom presidio_anonymizer.entities import InvalidParamException, RecognizerResult\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (0, 10),\n (2, 8),\n (0, 8),\n (0, 10),\n ],\n # fmt: on\n)\ndef test_given_recognizer_results_then_one_contains_another(start, end):\n first = create_recognizer_result(\"entity\", 0, 0, 10)\n second = create_recognizer_result(\"entity\", 0, start, end)\n\n assert first.contains(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (4, 10),\n (5, 11),\n (0, 5),\n (0, 6),\n ],\n # fmt: on\n)\ndef test_given_recognizer_result_then_they_do_not_contain_one_another(start, end):\n first = create_recognizer_result(\"entity\", 0, 5, 10)\n second = create_recognizer_result(\"entity\", 0, start, end)\n\n assert not first.contains(second)\n\n\ndef test_given_recognizer_results_with_same_indices_then_indices_are_equal():\n first = create_recognizer_result(\"entity\", 0, 0, 10)\n second = create_recognizer_result(\"entity\", 0, 0, 10)\n\n assert first.equal_indices(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (4, 10),\n (5, 11),\n (0, 5),\n (0, 6),\n ],\n # fmt: on\n)\ndef test_given_recognizer_results_with_different_indices_then_indices_are_not_equal(\n start, end\n):\n first = create_recognizer_result(\"entity\", 0, 5, 10)\n second = create_recognizer_result(\"entity\", 0, start, end)\n\n assert not first.equal_indices(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end, err\",\n [\n (\"0\", 10,\n \"Invalid parameter value for start. Expecting 'number', but got 'string'.\"),\n (0, \"10\",\n \"Invalid parameter value for end. Expecting 'number', but got 'string'.\"),\n ],\n # fmt: on\n)\ndef test_given_invalid_string_start_instead_of_int_then_we_fail(start, end, err):\n with pytest.raises(InvalidParamException, match=err):\n create_recognizer_result(\"bla\", 0.2, start, end)\n\n\ndef test_given_identical_recognizer_results_then_they_are_equal():\n first = create_recognizer_result(\"bla\", 0.2, 0, 10)\n second = create_recognizer_result(\"bla\", 0.2, 0, 10)\n\n assert first == second\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"entity_type, score, start, end\",\n [\n (\"bla\", 0.2, 4, 10),\n (\"changed\", 0.2, 0, 10),\n (\"bla\", 0.2, 0, 11),\n (\"bla\", 0.3, 0, 10),\n ],\n # fmt: on\n)\ndef test_given_different_recognizer_result_then_they_are_not_equal(\n entity_type, score, start, end\n):\n first = create_recognizer_result(\"bla\", 0.2, 0, 10)\n second = create_recognizer_result(entity_type, score, start, end)\n\n assert first != second\n\n\ndef test_given_recognizer_result_then_their_hash_is_equal():\n first = create_recognizer_result(\"entity\", 0, 0, 10)\n second = create_recognizer_result(\"entity\", 0, 0, 10)\n\n assert first.__hash__() == second.__hash__()\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"entity_type, score, start, end\",\n [\n (\"bla\", 0.2, 4, 10),\n (\"changed\", 0.2, 0, 10),\n (\"bla\", 0.2, 0, 11),\n (\"bla\", 0.3, 0, 10),\n ],\n # fmt: on\n)\ndef test_given_different_recognizer_results_then_hash_is_not_equal(\n entity_type, score, start, end\n):\n first = create_recognizer_result(\"bla\", 0.2, 0, 10)\n second = create_recognizer_result(entity_type, score, start, end)\n\n assert first.__hash__() != second.__hash__()\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"entity_type, score, start, end\",\n [\n (\"bla\", 0.2, 0, 10),\n (\"changed\", 0.2, 2, 10),\n (\"bla\", 0.3, 0, 11),\n (\"bla\", 0.1, 0, 10),\n ],\n # fmt: on\n)\ndef test_given_recognizer_results_with_conflicting_indices_then_there_is_a_conflict(\n entity_type, score, start, end\n):\n first = create_recognizer_result(\"bla\", 0.2, 2, 10)\n second = create_recognizer_result(entity_type, score, start, end)\n\n assert first.has_conflict(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"entity_type, score, start, end\",\n [\n (\"bla\", 0.2, 3, 10),\n (\"changed\", 0.1, 2, 10),\n (\"bla\", 0.3, 0, 9),\n ],\n # fmt: on\n)\ndef test_given_recognizer_results_with_no_conflicting_indices_then_there_is_no_conflict(\n entity_type, score, start, end\n):\n first = create_recognizer_result(\"bla\", 0.2, 2, 10)\n second = create_recognizer_result(entity_type, score, start, end)\n\n assert not first.has_conflict(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"request_json, result_text\",\n [\n ({}, \"Invalid input, result must contain start\",),\n ({\n \"end\": 32,\n \"score\": 0.8,\n \"entity_type\": \"NUMBER\"\n }, \"Invalid input, result must contain start\",),\n ({\n \"start\": 28,\n \"score\": 0.8,\n \"entity_type\": \"NUMBER\"\n }, \"Invalid input, result must contain end\",),\n ({\n \"start\": 28,\n \"end\": 32,\n \"entity_type\": \"NUMBER\"\n }, \"Invalid input, analyzer result must contain score\",),\n ({\n \"start\": 28,\n \"end\": 32,\n \"score\": 0.8,\n }, \"Invalid input, result must contain entity_type\",),\n ],\n # fmt: on\n)\ndef test_given_json_for_creating_recognizer_result_without_text_then_creation_fails(\n request_json, result_text\n):\n with pytest.raises(InvalidParamException) as e:\n RecognizerResult.from_json(request_json)\n assert result_text == e.value.err_msg\n\n\ndef test_given_valid_json_for_creating_recognizer_result_then_creation_is_successful():\n data = create_recognizer_result(\"NUMBER\", 0.8, 0, 32)\n assert data.start == 0\n assert data.end == 32\n assert data.score == 0.8\n assert data.entity_type == \"NUMBER\"\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (4, 10),\n (4, 9),\n (0, 2),\n (5, 9),\n ],\n # fmt: on\n)\ndef test_given_recognizer_results_then_one_is_greater_then_another(start, end):\n first = create_recognizer_result(\"entity\", 0, 5, 10)\n second = create_recognizer_result(\"entity\", 0, start, end)\n\n assert first.__gt__(second)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (5, 10),\n (6, 12),\n (6, 7),\n ],\n # fmt: on\n)\ndef METHOD_NAME(start, end):\n first = create_recognizer_result(\"entity\", 0, 5, 10)\n second = create_recognizer_result(\"entity\", 0, start, end)\n\n assert not first.__gt__(second)\n\n\ndef test_given_endpoint_larger_then_start_point_then_we_fail():\n with pytest.raises(InvalidParamException) as e:\n create_recognizer_result(\"entity\", 0, 10, 0)\n assert (\n e.value.err_msg == \"Invalid input, start index '10' \"\n \"must be smaller than end index '0'\"\n )\n\n\ndef test_given_endpoint_equal_to_start_point_then_we_succeed():\n assert create_recognizer_result(\"entity\", 0, 0, 0)\n\n\n@pytest.mark.parametrize(\n # fmt: off\n \"start, end\",\n [\n (-1, 10),\n (6, -12),\n (-2, -2),\n ],\n # fmt: on\n)\ndef test_given_negative_start_or_endpoint_then_we_fail(start, end):\n with pytest.raises(\n InvalidParamException,\n match=\"Invalid input, result start and end must be positive\",\n ):\n create_recognizer_result(\"entity\", 0, start, end)\n\n\ndef create_recognizer_result(entity_type: str, score: float, start: int, end: int):\n data = {\"entity_type\": entity_type, \"score\": score, \"start\": start, \"end\": end}\n return RecognizerResult.from_json(data)"}}},{"rowIdx":2015,"cells":{"id":{"kind":"number","value":2015,"string":"2,015"},"label":{"kind":"string","value":"dict has value"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# widgets.py - Mycodo core utils\n#\n# Copyright (C) 2015-2020 Kyle T. Gabriel \n#\n# This file is part of Mycodo\n#\n# Mycodo is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycodo is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycodo. If not, see .\n#\n# Contact at kylegabriel.com\nimport logging\nimport os\n\nfrom mycodo.config import PATH_WIDGETS\nfrom mycodo.config import PATH_WIDGETS_CUSTOM\nfrom mycodo.utils.modules import load_module_from_file\n\nlogger = logging.getLogger(\"mycodo.utils.widgets\")\n\n\ndef parse_widget_information(exclude_custom=False):\n \"\"\"Parses the variables assigned in each Widget and return a dictionary of IDs and values.\"\"\"\n def METHOD_NAME(dict_inp, widget_cus, key, force_type=None):\n if (key in widget_cus.WIDGET_INFORMATION and\n (widget_cus.WIDGET_INFORMATION[key] or\n widget_cus.WIDGET_INFORMATION[key] == 0)):\n if force_type == 'list':\n if isinstance(widget_cus.WIDGET_INFORMATION[key], list):\n dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \\\n widget_cus.WIDGET_INFORMATION[key]\n else:\n dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \\\n [widget_cus.WIDGET_INFORMATION[key]]\n else:\n dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \\\n widget_cus.WIDGET_INFORMATION[key]\n return dict_inp\n\n excluded_files = [\n '__init__.py', '__pycache__', 'base_widget.py', 'custom_widgets',\n 'examples', 'tmp_widgets'\n ]\n\n widget_paths = [PATH_WIDGETS]\n\n if not exclude_custom:\n widget_paths.append(PATH_WIDGETS_CUSTOM)\n\n dict_widgets = {}\n\n for each_path in widget_paths:\n\n real_path = os.path.realpath(each_path)\n\n for each_file in os.listdir(real_path):\n if each_file in excluded_files:\n continue\n\n full_path = f\"{real_path}/{each_file}\"\n widget_custom, status = load_module_from_file(full_path, 'widgets')\n\n if not widget_custom or not hasattr(widget_custom, 'WIDGET_INFORMATION'):\n continue\n\n # Populate dictionary of widget information\n if widget_custom.WIDGET_INFORMATION['widget_name_unique'] in dict_widgets:\n logger.error(f\"Error: Cannot add widget modules because it does not have \"\n f\"a unique name: {widget_custom.WIDGET_INFORMATION['widget_name_unique']}\")\n else:\n dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']] = {}\n\n dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']]['file_path'] = full_path\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_name')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_library')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'no_class')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_height')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_width')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'message')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_datasheet', force_type='list')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_manufacturer', force_type='list')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_product_purchase', force_type='list')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_additional', force_type='list')\n\n # Dependencies\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_module')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_message')\n\n # Which form options to display and whether each option is enabled\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_enabled')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_disabled')\n\n # Misc\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'period')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'endpoints')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_creation')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_modification')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_deletion')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'generate_page_variables')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options_message')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands_message')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands')\n\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_head')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_title_bar')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_body')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_configure_options')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready')\n dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready_end')\n\n return dict_widgets"}}},{"rowIdx":2016,"cells":{"id":{"kind":"number","value":2016,"string":"2,016"},"label":{"kind":"string","value":"run container"},"text":{"kind":"string","value":"import pytest\nimport docker\nimport os\nfrom time import sleep\nfrom subprocess import Popen, PIPE\n\nfrom sinspqa import LOGS_PATH, is_containerized\nfrom sinspqa.sinsp import SinspStreamerBuilder\n\n\ndef pytest_addoption(parser):\n parser.addoption('--no-kmod', action='store_true',\n default=False, help='Skip tests with kernel module')\n parser.addoption('--no-ebpf', action='store_true',\n default=False, help='Skip tests with eBPF')\n parser.addoption('--no-modern', action='store_true',\n default=False, help='Skip tests with modern eBPF')\n\n\ndef pytest_collection_modifyitems(config, items):\n no_kmod = config.getoption('--no-kmod')\n no_ebpf = config.getoption('--no-ebpf')\n no_modern = config.getoption('--no-modern')\n\n if not no_kmod and not no_ebpf and not no_modern:\n # We are not skipping any tests\n return\n\n skip_kmod = pytest.mark.skip(\n reason='Skipping tests with kernel module driver')\n skip_ebpf = pytest.mark.skip(reason='Skipping tests with eBPF driver')\n skip_modern = pytest.mark.skip(\n reason='Skipping tests with modern eBPF driver')\n\n for item in items:\n if no_kmod:\n for kw in item.keywords:\n if 'kmod' in kw:\n item.add_marker(skip_kmod)\n break\n if no_ebpf:\n for kw in item.keywords:\n if 'ebpf' in kw:\n item.add_marker(skip_ebpf)\n break\n if no_modern:\n for kw in item.keywords:\n if 'modern_bpf' in kw:\n item.add_marker(skip_modern)\n break\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef check_root():\n assert os.geteuid() == 0, 'e2e tests need to be run as root'\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef docker_client():\n \"\"\"\n Create a docker client to be used by the tests.\n\n Returns:\n A docker.DockerClient object created from the environment the tests run on.\n \"\"\"\n return docker.from_env()\n\n\ndef wait_container_running(container: docker.models.containers.Container, additional_wait: int = 0, retries: int = 5):\n success = False\n\n for _ in range(retries):\n container.reload()\n\n if container.status == 'running':\n success = True\n break\n\n sleep(0.5)\n\n if not success:\n raise TimeoutError\n\n if additional_wait:\n sleep(additional_wait)\n\n\ndef METHOD_NAME(docker_client: docker.client.DockerClient, name: str, container: dict):\n image = container['image']\n args = container.get('args', '')\n privileged = container.get('privileged', False)\n mounts = container.get('mounts', [])\n environment = container.get('env', {})\n user = container.get('user', '')\n pid_mode = container.get('pid_mode', '')\n network_mode = container.get('network_mode', '')\n\n additional_wait = container.get('init_wait', 0)\n post_validation = container.get('post_validation', None)\n stop_signal = container.get('signal', None)\n\n handle = docker_client.containers.run(\n image,\n args,\n name=name,\n detach=True,\n privileged=privileged,\n mounts=mounts,\n environment=environment,\n user=user,\n pid_mode=pid_mode,\n network_mode=network_mode,\n )\n\n post = {\n 'validation': post_validation,\n 'signal': stop_signal\n }\n\n try:\n wait_container_running(handle, additional_wait)\n except TimeoutError:\n print(f'{name} failed to start, the test will fail')\n\n return (handle, post)\n\n\ndef teardown_container(name, container, validation, stop_signal):\n if stop_signal:\n container.kill(stop_signal)\n\n # The stop command is issued regardless of the kill command to ensure\n # the container stops\n container.stop()\n\n logs = container.logs().decode('utf-8')\n if logs:\n with open(os.path.join(LOGS_PATH, f'{name}.log'), 'w') as f:\n f.write(logs)\n\n result = ''\n if validation:\n try:\n validation(container)\n except AssertionError as e:\n result = f'{name}: {e}'\n\n container.remove()\n return result\n\n\n@pytest.fixture(scope=\"function\")\ndef run_containers(request, docker_client: docker.client.DockerClient):\n \"\"\"\n Runs containers, dumps their logs and cleans'em up\n \"\"\"\n containers = {}\n post = {}\n\n for name, container in request.param.items():\n handle, post_validation = METHOD_NAME(docker_client, name, container)\n\n containers[name] = handle\n post[name] = post_validation\n\n yield containers\n\n success = True\n errors = []\n\n for name, container in containers.items():\n validation = post[name]['validation']\n stop_signal = post[name]['signal']\n\n result = teardown_container(name, container, validation, stop_signal)\n\n if result != '':\n errors.append(result)\n success = False\n\n assert success, '\\n'.join(errors)\n\n\n@pytest.fixture(scope='function')\ndef sinsp(request, docker_client: docker.client.DockerClient):\n \"\"\"\n Runs an instance of sinsp-example, either in a container or as a regular\n process\n \"\"\"\n if is_containerized():\n container = request.param\n handle, post = METHOD_NAME(docker_client, 'sinsp', container)\n\n yield SinspStreamerBuilder() \\\n .setContainerized(True) \\\n .setSinsp(handle) \\\n .setTimeout(10) \\\n .build()\n\n validation = container.get('post_validation', None)\n stop_signal = container.get('signal', None)\n\n result = teardown_container(\n 'sinsp', handle, validation, stop_signal)\n assert result == '', result\n\n else:\n process = request.param\n args = process['args']\n args.insert(0, process['path'])\n env = os.environ.copy()\n additional_wait = process.get('init_wait', 0)\n for k, v in process['env'].items():\n env[k] = v\n process = Popen(args, env=env, stdout=PIPE, universal_newlines=True)\n\n if additional_wait:\n sleep(additional_wait)\n\n reader = SinspStreamerBuilder() \\\n .setContainerized(False) \\\n .setSinsp(process) \\\n .setTimeout(10) \\\n .build()\n\n yield reader\n\n reader.stop()\n process.terminate()\n process.wait()\n assert process.returncode == 0, f'sinsp-example terminated with code {process.returncode}'\n\n\ndef pytest_html_report_title(report):\n report.title = \"sinsp e2e tests\"\n\n\ndef dump_logs(pytest_html, extra):\n \"\"\"\n Finds all logs dumped to LOGS_PATH and makes them available through the\n auto-generated report\n \"\"\"\n for file in os.listdir(LOGS_PATH):\n full_path = os.path.join(LOGS_PATH, file)\n if not os.path.isfile(full_path):\n continue\n\n with open(full_path, 'r', errors='replace') as f:\n logs = f.read()\n extra.append(pytest_html.extras.text(logs, name=file))\n\n # Remove file so it doesn't bleed to following tests\n os.remove(full_path)\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n pytest_html = item.config.pluginmanager.getplugin(\"html\")\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, \"extra\", [])\n\n if report.when == \"teardown\":\n dump_logs(pytest_html, extra)\n\n report.extra = extra"}}},{"rowIdx":2017,"cells":{"id":{"kind":"number","value":2017,"string":"2,017"},"label":{"kind":"string","value":"test initialise with list kwarg"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n# pylint: disable=redefined-outer-name\n\"\"\"Tests for :class:`aiida.orm.nodes.data.list.List` class.\"\"\"\nimport pytest\n\nfrom aiida.common.exceptions import ModificationNotAllowed\nfrom aiida.orm import List, load_node\n\n\n@pytest.fixture\ndef listing():\n return ['a', 2, True]\n\n\n@pytest.fixture\ndef int_listing():\n return [2, 1, 3]\n\n\ndef test_creation():\n \"\"\"Test the creation of an empty ``List`` node.\"\"\"\n node = List()\n assert len(node) == 0\n with pytest.raises(IndexError):\n node[0] # pylint: disable=pointless-statement\n\n\ndef test_mutability():\n \"\"\"Test list's mutability before and after storage.\"\"\"\n node = List()\n node.append(5)\n node.store()\n\n # Test all mutable calls are now disallowed\n with pytest.raises(ModificationNotAllowed):\n node.append(5)\n with pytest.raises(ModificationNotAllowed):\n node.extend([5])\n with pytest.raises(ModificationNotAllowed):\n node.insert(0, 2)\n with pytest.raises(ModificationNotAllowed):\n node.remove(5)\n with pytest.raises(ModificationNotAllowed):\n node.pop()\n with pytest.raises(ModificationNotAllowed):\n node.sort()\n with pytest.raises(ModificationNotAllowed):\n node.reverse()\n\n\ndef test_store_load(listing):\n \"\"\"Test load_node on just stored object.\"\"\"\n node = List(listing)\n node.store()\n\n node_loaded = load_node(node.pk)\n assert node.get_list() == node_loaded.get_list()\n\n\ndef test_special_methods(listing):\n \"\"\"Test the special methods of the ``List`` class.\"\"\"\n node = List(listing)\n\n # __getitem__\n for i, value in enumerate(listing):\n assert node[i] == value\n\n # __setitem__\n node[0] = 'b'\n assert node[0] == 'b'\n\n # __delitem__\n del node[0]\n assert node.get_list() == listing[1:]\n\n # __len__\n assert len(node) == 2\n\n\ndef test_equality(listing):\n \"\"\"Test equality comparison for ``List`` nodes.\"\"\"\n different_list = ['I', 'am', 'different']\n node = List(listing)\n different_node = List(different_list)\n clone = List(listing)\n\n # Test equality comparison with Python base type\n assert node == listing\n assert node != different_list\n\n # Test equality comparison with other `BaseType` nodes\n assert node == clone\n assert node != different_node\n\n\ndef test_append(listing):\n \"\"\"Test the ``List.append()`` method.\"\"\"\n\n def do_checks(node):\n assert len(node) == 1\n assert node[0] == 4\n\n node = List()\n node.append(4)\n do_checks(node)\n\n # Try the same after storing\n node.store()\n do_checks(node)\n\n node = List(listing)\n node.append('more')\n assert node[-1] == 'more'\n\n\ndef test_extend(listing):\n \"\"\"Test extend() member function.\"\"\"\n\n def do_checks(node, lst):\n assert len(node) == len(lst)\n # Do an element wise comparison\n for lst_el, node_el in zip(lst, node):\n assert lst_el == node_el\n\n node = List()\n node.extend(listing)\n do_checks(node, listing)\n\n # Further extend\n node.extend(listing)\n do_checks(node, listing * 2)\n\n # Now try after storing\n node.store()\n do_checks(node, listing * 2)\n\n\ndef test_insert(listing):\n \"\"\"Test the ``List.insert()`` method.\"\"\"\n node = List(listing)\n node.insert(1, 'new')\n assert node[1] == 'new'\n assert len(node) == 4\n\n\ndef test_remove(listing):\n \"\"\"Test the ``List.remove()`` method.\"\"\"\n node = List(listing)\n node.remove(1)\n listing.remove(1)\n assert node.get_list() == listing\n\n with pytest.raises(ValueError, match=r'list.remove\\(x\\): x not in list'):\n node.remove('non-existent')\n\n\ndef test_pop(listing):\n \"\"\"Test the ``List.pop()`` method.\"\"\"\n node = List(listing)\n node.pop()\n assert node.get_list() == listing[:-1]\n\n\ndef test_index(listing):\n \"\"\"Test the ``List.index()`` method.\"\"\"\n node = List(listing)\n\n assert node.index(True) == listing.index(True)\n\n\ndef test_count(listing):\n \"\"\"Test the ``List.count()`` method.\"\"\"\n node = List(listing)\n for value in listing:\n assert node.count(value) == listing.count(value)\n\n\ndef test_sort(listing, int_listing):\n \"\"\"Test the ``List.sort()`` method.\"\"\"\n node = List(int_listing)\n node.sort()\n int_listing.sort()\n assert node.get_list() == int_listing\n\n node = List(listing)\n with pytest.raises(TypeError, match=r\"'<' not supported between instances of 'int' and 'str'\"):\n node.sort()\n\n\ndef test_reverse(listing):\n \"\"\"Test the ``List.reverse()`` method.\"\"\"\n node = List(listing)\n node.reverse()\n listing.reverse()\n assert node.get_list() == listing\n\n\ndef METHOD_NAME(listing):\n \"\"\"Test that the ``List`` node can be initialized with the ``list`` keyword argument for backwards compatibility.\"\"\"\n node = List(listing)\n assert node.get_list() == listing"}}},{"rowIdx":2018,"cells":{"id":{"kind":"number","value":2018,"string":"2,018"},"label":{"kind":"string","value":"test validate slug and generate if needed"},"text":{"kind":"string","value":"from datetime import timedelta\nfrom decimal import Decimal\n\nimport pytest\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom graphql.error import GraphQLError\n\nfrom ....product.models import Category\nfrom ..validators import (\n clean_seo_fields,\n validate_end_is_after_start,\n validate_one_of_args_is_in_query,\n validate_price_precision,\n validate_slug_and_generate_if_needed,\n)\n\n\n@pytest.mark.parametrize(\n \"value, currency\",\n [\n (Decimal(\"1.1200\"), \"USD\"),\n (Decimal(\"1.12\"), \"USD\"),\n (Decimal(\"1\"), \"USD\"),\n (Decimal(\"1\"), \"ISK\"),\n (Decimal(\"1.00\"), \"ISK\"),\n (Decimal(\"5.12\"), None),\n (Decimal(\"1000\"), \"USD\"),\n ],\n)\ndef test_validate_price_precision(value, currency):\n # when\n result = validate_price_precision(value, currency)\n\n # then\n assert result is None\n\n\n@pytest.mark.parametrize(\n \"value, currency\",\n [\n (Decimal(\"1.1212\"), \"USD\"),\n (Decimal(\"1.128\"), \"USD\"),\n (Decimal(\"1.1\"), \"ISK\"),\n (Decimal(\"1.11\"), \"ISK\"),\n (Decimal(\"5.123\"), None),\n ],\n)\ndef test_validate_price_precision_raise_error(value, currency):\n with pytest.raises(ValidationError):\n validate_price_precision(value, currency)\n\n\ndef test_validate_end_is_after_start_raise_error():\n start_date = timezone.now() + timedelta(days=365)\n end_date = timezone.now() - timedelta(days=365)\n\n with pytest.raises(ValidationError) as error:\n validate_end_is_after_start(start_date, end_date)\n assert error.value.message == \"End date cannot be before the start date.\"\n\n\ndef test_validate_one_of_args_is_in_query():\n assert validate_one_of_args_is_in_query(\"arg1\", \"present\", \"arg2\", None) is None\n\n\ndef test_validate_one_of_args_is_in_query_false_args():\n with pytest.raises(GraphQLError) as error:\n validate_one_of_args_is_in_query(\"arg1\", None, \"arg2\", \"\")\n assert (\n error.value.message == \"At least one of arguments is required: 'arg1', 'arg2'.\"\n )\n\n\ndef test_validate_one_of_args_is_in_query_more_than_one_true():\n with pytest.raises(GraphQLError) as error:\n validate_one_of_args_is_in_query(\n \"arg1\", \"present\", \"arg2\", \"present\", \"arg3\", \"present\"\n )\n assert (\n error.value.message == \"Argument 'arg1' cannot be combined with 'arg2', 'arg3'\"\n )\n\n\ndef test_validate_one_of_args_is_in_query_single_arg():\n assert validate_one_of_args_is_in_query(\"arg1\", \"present\") is None\n\n\ndef test_validate_one_of_args_is_in_query_single_arg_absent():\n with pytest.raises(GraphQLError) as error:\n validate_one_of_args_is_in_query(\"arg1\", None) is None\n assert error.value.message == \"At least one of arguments is required: 'arg1'.\"\n\n\ndef test_clean_seo_fields():\n title = \"lady title\"\n description = \"fantasy description\"\n data = {\"seo\": {\"title\": title, \"description\": description}}\n clean_seo_fields(data)\n assert data[\"seo_title\"] == title\n assert data[\"seo_description\"] == description\n\n\ndef test_clean_seo_fields_accepts_null():\n data = {\"seo\": None}\n clean_seo_fields(data)\n assert not data\n\n\n@pytest.mark.parametrize(\n \"cleaned_input\",\n [\n {\"slug\": None, \"name\": \"test\"},\n {\"slug\": \"\", \"name\": \"test\"},\n {\"slug\": \"\"},\n {\"slug\": None},\n ],\n)\ndef test_validate_slug_and_generate_if_needed_raises_errors(category, cleaned_input):\n with pytest.raises(ValidationError):\n validate_slug_and_generate_if_needed(category, \"name\", cleaned_input)\n\n\n@pytest.mark.parametrize(\n \"cleaned_input\", [{\"slug\": \"test-slug\"}, {\"slug\": \"test-slug\", \"name\": \"test\"}]\n)\ndef test_validate_slug_and_generate_if_needed_not_raises_errors(\n category, cleaned_input\n):\n validate_slug_and_generate_if_needed(category, \"name\", cleaned_input)\n\n\n@pytest.mark.parametrize(\n \"cleaned_input\",\n [\n {\"slug\": None, \"name\": \"test\"},\n {\"slug\": \"\", \"name\": \"test\"},\n ],\n)\ndef METHOD_NAME(cleaned_input):\n # given\n category = Category(name=\"test\")\n previous_slug_value = cleaned_input[\"slug\"]\n\n # when\n validate_slug_and_generate_if_needed(category, \"name\", cleaned_input)\n\n # then\n assert previous_slug_value != cleaned_input[\"slug\"]\n assert cleaned_input[\"slug\"] == cleaned_input[\"name\"]\n\n\n@pytest.mark.parametrize(\n \"cleaned_input\",\n [\n {\"slug\": \"\"},\n {\"slug\": None},\n {\"slug\": \"test-slug\"},\n {\"slug\": \"test-slug\", \"name\": \"test\"},\n ],\n)\ndef test_validate_slug_and_generate_if_needed_slug_not_changed(cleaned_input):\n # given\n category = Category(name=\"test\")\n previous_slug_value = cleaned_input[\"slug\"]\n\n # when\n validate_slug_and_generate_if_needed(category, \"name\", cleaned_input)\n\n # then\n assert cleaned_input[\"slug\"] == previous_slug_value"}}},{"rowIdx":2019,"cells":{"id":{"kind":"number","value":2019,"string":"2,019"},"label":{"kind":"string","value":"get"},"text":{"kind":"string","value":"# Copyright (c) Meta Platforms, Inc. and affiliates.\nfrom .request import Broker\n\nfrom .vocabulary import ThreatExchange as t\nfrom .vocabulary import ThreatExchangeMember as tem\nfrom .errors import pytxAttributeError\n\n\nclass ThreatExchangeMember(object):\n\n _URL = t.URL + t.VERSION + t.THREAT_EXCHANGE_MEMBERS\n\n _internal = [\n \"_access_token\",\n ]\n\n _fields = [\n tem.ID,\n tem.NAME,\n tem.EMAIL,\n ]\n\n _default_fields = [\n tem.ID,\n tem.NAME,\n tem.EMAIL,\n ]\n\n _unique = []\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize the object. Set the _access_token and any attributes that\n were provided.\n \"\"\"\n\n for name, value in kwargs.items():\n self.__setattr__(name, value)\n\n def __getattr__(self, attr):\n \"\"\"\n Get an attribute. If the attribute does not exist, return None\n \"\"\"\n\n if attr not in self._fields and attr not in self._internal:\n raise pytxAttributeError(\"%s is not a valid attribute\" % attr)\n\n try:\n return object.__getattribute__(self, attr)\n except:\n return None\n\n def METHOD_NAME(self, attr):\n \"\"\"\n Wrapper around __getattr__ making it easier to use the vocabulary to get\n class attributes.\n\n :param attr: The name of the attribute to get.\n :type attr: str\n \"\"\"\n\n return self.__getattr__(attr)\n\n @classmethod\n def _get_generator(\n cls,\n url,\n to_dict=False,\n params=None,\n retries=None,\n headers=None,\n proxies=None,\n verify=None,\n ):\n \"\"\"\n Send the GET request and return a generator.\n\n :param url: The URL to send the GET request to.\n :type url: str\n :param to_dict: Return a dictionary instead of an instantiated class.\n :type to_dict: bool\n :param params: The GET parameters to send in the request.\n :type params: dict\n :param retries: Number of retries to fetch a page before stopping.\n :type retries: int\n :param headers: header info for requests.\n :type headers: dict\n :param proxies: proxy info for requests.\n :type proxies: dict\n :param verify: verify info for requests.\n :type verify: bool, str\n :returns: Generator, dict (using json.loads())\n \"\"\"\n\n if not params:\n params = dict()\n\n members = Broker.METHOD_NAME(\n url,\n params=params,\n retries=retries,\n headers=headers,\n proxies=proxies,\n verify=verify,\n ).METHOD_NAME(t.DATA, [])\n total = len(members)\n if total == t.MIN_TOTAL:\n yield None\n else:\n for member in members:\n if to_dict:\n yield member\n else:\n yield Broker.get_new(cls, member)\n\n @classmethod\n def objects(\n cls,\n full_response=False,\n dict_generator=False,\n retries=None,\n headers=None,\n proxies=None,\n verify=None,\n ):\n \"\"\"\n Get a list of Threat Exchange Members\n\n :param full_response: Return the full response instead of the generator.\n Takes precedence over dict_generator.\n :type full_response: bool\n :param dict_generator: Return a dictionary instead of an instantiated\n object.\n :type dict_generator: bool\n :param retries: Number of retries to fetch a page before stopping.\n :type retries: int\n :param headers: header info for requests.\n :type headers: dict\n :param proxies: proxy info for requests.\n :type proxies: dict\n :param verify: verify info for requests.\n :type verify: bool, str\n :returns: Generator, dict (using json.loads())\n \"\"\"\n\n if full_response:\n return Broker.METHOD_NAME(\n cls._URL,\n retries=retries,\n headers=headers,\n proxies=proxies,\n verify=verify,\n )\n else:\n return cls._get_generator(\n cls._URL,\n to_dict=dict_generator,\n retries=retries,\n headers=headers,\n proxies=proxies,\n verify=verify,\n )\n\n def to_dict(self):\n \"\"\"\n Convert this object into a dictionary.\n\n :returns: dict\n \"\"\"\n\n d = dict((n, getattr(self, n, None)) for n in self._fields)\n return d"}}},{"rowIdx":2020,"cells":{"id":{"kind":"number","value":2020,"string":"2,020"},"label":{"kind":"string","value":"combine frontiers"},"text":{"kind":"string","value":"import itertools\nimport random\nimport sys\nimport time\nimport unittest\n\nimport backend as F\n\nimport dgl\nimport networkx as nx\nimport numpy as np\nimport scipy.sparse as sp\nfrom utils import parametrize_idtype\n\nnp.random.seed(42)\n\n\ndef toset(x):\n # F.zerocopy_to_numpy may return a int\n return set(F.zerocopy_to_numpy(x).tolist())\n\n\n@parametrize_idtype\ndef test_bfs(idtype, n=100):\n def _bfs_nx(g_nx, src):\n edges = nx.bfs_edges(g_nx, src)\n layers_nx = [set([src])]\n edges_nx = []\n frontier = set()\n edge_frontier = set()\n for u, v in edges:\n if u in layers_nx[-1]:\n frontier.add(v)\n edge_frontier.add(g.edge_ids(int(u), int(v)))\n else:\n layers_nx.append(frontier)\n edges_nx.append(edge_frontier)\n frontier = set([v])\n edge_frontier = set([g.edge_ids(u, v)])\n # avoids empty successors\n if len(frontier) > 0 and len(edge_frontier) > 0:\n layers_nx.append(frontier)\n edges_nx.append(edge_frontier)\n return layers_nx, edges_nx\n\n a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))\n g = dgl.from_scipy(a).astype(idtype)\n\n g_nx = g.to_networkx()\n src = random.choice(range(n))\n layers_nx, _ = _bfs_nx(g_nx, src)\n layers_dgl = dgl.bfs_nodes_generator(g, src)\n assert len(layers_dgl) == len(layers_nx)\n assert all(toset(x) == y for x, y in zip(layers_dgl, layers_nx))\n\n g_nx = nx.random_tree(n, seed=42)\n g = dgl.from_networkx(g_nx).astype(idtype)\n src = 0\n _, edges_nx = _bfs_nx(g_nx, src)\n edges_dgl = dgl.bfs_edges_generator(g, src)\n assert len(edges_dgl) == len(edges_nx)\n assert all(toset(x) == y for x, y in zip(edges_dgl, edges_nx))\n\n\n@parametrize_idtype\ndef test_topological_nodes(idtype, n=100):\n a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n))\n b = sp.tril(a, -1).tocoo()\n g = dgl.from_scipy(b).astype(idtype)\n\n layers_dgl = dgl.topological_nodes_generator(g)\n\n adjmat = g.adj_external(transpose=True)\n\n def tensor_topo_traverse():\n n = g.num_nodes()\n mask = F.copy_to(F.ones((n, 1)), F.cpu())\n degree = F.spmm(adjmat, mask)\n while F.reduce_sum(mask) != 0.0:\n v = F.astype((degree == 0.0), F.float32)\n v = v * mask\n mask = mask - v\n frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu())\n yield frontier\n degree -= F.spmm(adjmat, v)\n\n layers_spmv = list(tensor_topo_traverse())\n\n assert len(layers_dgl) == len(layers_spmv)\n assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv))\n\n\nDFS_LABEL_NAMES = [\"forward\", \"reverse\", \"nontree\"]\n\n\n@parametrize_idtype\ndef test_dfs_labeled_edges(idtype, example=False):\n dgl_g = dgl.graph([]).astype(idtype)\n dgl_g.add_nodes(6)\n dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5])\n dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator(\n dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True\n )\n dgl_edges = [toset(t) for t in dgl_edges]\n dgl_labels = [toset(t) for t in dgl_labels]\n g1_solutions = [\n # edges labels\n [[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]],\n [[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]],\n ]\n g2_solutions = [\n # edges labels\n [[3, 3, 4, 4], [0, 1, 0, 1]],\n [[4, 4, 3, 3], [0, 1, 0, 1]],\n ]\n\n def METHOD_NAME(sol):\n es, ls = zip(*sol)\n es = [\n set(i for i in t if i is not None)\n for t in itertools.zip_longest(*es)\n ]\n ls = [\n set(i for i in t if i is not None)\n for t in itertools.zip_longest(*ls)\n ]\n return es, ls\n\n for sol_set in itertools.product(g1_solutions, g2_solutions):\n es, ls = METHOD_NAME(sol_set)\n if es == dgl_edges and ls == dgl_labels:\n break\n else:\n assert False\n\n\nif __name__ == \"__main__\":\n test_bfs(idtype=\"int32\")\n test_topological_nodes(idtype=\"int32\")\n test_dfs_labeled_edges(idtype=\"int32\")"}}},{"rowIdx":2021,"cells":{"id":{"kind":"number","value":2021,"string":"2,021"},"label":{"kind":"string","value":"test edit post belongs to thread and"},"text":{"kind":"string","value":"from guardian.shortcuts import assign_perm\n\nfrom kitsune.forums.tests import ForumFactory, ForumTestCase, PostFactory, ThreadFactory\nfrom kitsune.sumo.tests import get, post\nfrom kitsune.users.tests import GroupFactory, UserFactory\n\n\nclass BelongsTestCase(ForumTestCase):\n \"\"\"\n Mixing and matching thread, forum, and post data in URLs should fail.\n \"\"\"\n\n def test_posts_thread_belongs_to_forum(self):\n \"\"\"Posts view - redirect if thread does not belong to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n\n r = get(self.client, \"forums.posts\", args=[f.slug, t.id])\n self.assertEqual(200, r.status_code)\n u = r.redirect_chain[0][0]\n assert u.endswith(t.get_absolute_url())\n\n def test_reply_thread_belongs_to_forum(self):\n \"\"\"Reply action - thread belongs to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n u = UserFactory()\n\n self.client.login(username=u.username, password=\"testpass\")\n r = post(self.client, \"forums.reply\", {}, args=[f.slug, t.id])\n self.assertEqual(404, r.status_code)\n\n def test_locked_thread_belongs_to_forum(self):\n \"\"\"Lock action - thread belongs to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n u = UserFactory()\n\n # Give the user the permission to lock threads.\n g = GroupFactory()\n g.user_set.add(u)\n assign_perm(\"forums.lock_forum_thread\", g, f)\n assign_perm(\"forums.lock_forum_thread\", g, t.forum)\n\n self.client.login(username=u.username, password=\"testpass\")\n r = post(self.client, \"forums.lock_thread\", {}, args=[f.slug, t.id])\n self.assertEqual(404, r.status_code)\n\n def test_sticky_thread_belongs_to_forum(self):\n \"\"\"Sticky action - thread belongs to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n u = UserFactory()\n\n # Give the user the permission to sticky threads.\n g = GroupFactory()\n g.user_set.add(u)\n assign_perm(\"forums.sticky_forum_thread\", g, f)\n assign_perm(\"forums.sticky_forum_thread\", g, t.forum)\n\n self.client.login(username=u.username, password=\"testpass\")\n r = post(self.client, \"forums.sticky_thread\", {}, args=[f.slug, t.id])\n self.assertEqual(404, r.status_code)\n\n def test_edit_thread_belongs_to_forum(self):\n \"\"\"Edit thread action - thread belongs to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n u = t.creator\n\n self.client.login(username=u.username, password=\"testpass\")\n r = get(self.client, \"forums.edit_thread\", args=[f.slug, t.id])\n self.assertEqual(404, r.status_code)\n\n def test_delete_thread_belongs_to_forum(self):\n \"\"\"Delete thread action - thread belongs to forum.\"\"\"\n f = ForumFactory()\n t = ThreadFactory() # Thread belongs to a different forum\n u = UserFactory()\n\n # Give the user the permission to delete threads.\n g = GroupFactory()\n g.user_set.add(u)\n assign_perm(\"forums.delete_forum_thread\", g, f)\n assign_perm(\"forums.delete_forum_thread\", g, t.forum)\n\n self.client.login(username=u.username, password=\"testpass\")\n r = get(self.client, \"forums.delete_thread\", args=[f.slug, t.id])\n self.assertEqual(404, r.status_code)\n\n def METHOD_NAME(self):\n # Edit post action - post belongs to thread and thread belongs\n # to forum.\n f = ForumFactory()\n t = ThreadFactory(forum=f)\n # Post belongs to a different forum and thread.\n p = PostFactory()\n u = p.author\n\n self.client.login(username=u.username, password=\"testpass\")\n\n # Post isn't in the passed forum:\n r = get(self.client, \"forums.edit_post\", args=[f.slug, p.thread.id, p.id])\n self.assertEqual(404, r.status_code)\n\n # Post isn't in the passed thread:\n r = get(self.client, \"forums.edit_post\", args=[p.thread.forum.slug, t.id, p.id])\n self.assertEqual(404, r.status_code)\n\n def test_delete_post_belongs_to_thread_and_forum(self):\n # Delete post action - post belongs to thread and thread\n # belongs to forum.\n f = ForumFactory()\n t = ThreadFactory(forum=f)\n # Post belongs to a different forum and thread.\n p = PostFactory()\n u = p.author\n\n # Give the user the permission to delete posts.\n g = GroupFactory()\n g.user_set.add(u)\n assign_perm(\"forums.delete_forum_thread_post\", g, f)\n assign_perm(\"forums.delete_forum_thread_post\", g, p.thread.forum)\n\n self.client.login(username=u.username, password=\"testpass\")\n\n # Post isn't in the passed forum:\n r = get(self.client, \"forums.delete_post\", args=[f.slug, p.thread.id, p.id])\n self.assertEqual(404, r.status_code)\n\n # Post isn't in the passed thread:\n r = get(self.client, \"forums.delete_post\", args=[p.thread.forum.slug, t.id, p.id])\n self.assertEqual(404, r.status_code)"}}},{"rowIdx":2022,"cells":{"id":{"kind":"number","value":2022,"string":"2,022"},"label":{"kind":"string","value":"test stock rule buy payment mode"},"text":{"kind":"string","value":"# Copyright 2013-2015 Tecnativa - Pedro M. Baeza\n# Copyright 2017 Tecnativa - Vicent Cubells\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom odoo import fields\nfrom odoo.tests import Form\n\nfrom odoo.addons.account_payment_purchase.tests.test_account_payment_purchase import (\n TestAccountPaymentPurchase,\n)\n\n\nclass TestAccountPaymentPurchaseStock(TestAccountPaymentPurchase):\n def test_purchase_stock_order_invoicing(self):\n self.purchase.onchange_partner_id()\n self.purchase.button_confirm()\n picking = self.purchase.picking_ids[0]\n picking.action_confirm()\n picking.move_lines.write({\"quantity_done\": 1.0})\n picking.button_validate()\n\n invoice = self.env[\"account.move\"].create(\n {\"partner_id\": self.partner.id, \"move_type\": \"in_invoice\"}\n )\n with Form(invoice) as inv:\n inv.purchase_id = self.purchase\n self.assertEqual(\n self.purchase.invoice_ids[0].payment_mode_id, self.payment_mode\n )\n\n def test_picking_from_purchase_order_invoicing(self):\n # Test payment mode\n stockable_product = self.env[\"product.product\"].create(\n {\"name\": \"Test stockable product\", \"type\": \"product\"}\n )\n self.purchase.order_line[0].product_id = stockable_product\n self.purchase.button_confirm()\n picking = self.purchase.picking_ids[0]\n picking.action_confirm()\n picking.move_lines.write({\"quantity_done\": 1.0})\n picking.button_validate()\n\n invoice = self.env[\"account.move\"].create(\n {\"partner_id\": self.partner.id, \"move_type\": \"in_invoice\"}\n )\n invoice.purchase_id = self.purchase\n invoice._onchange_purchase_auto_complete()\n self.assertEqual(invoice.payment_mode_id, self.payment_mode)\n purchase2 = self.purchase.copy()\n payment_mode2 = self.payment_mode.copy()\n purchase2.payment_mode_id = payment_mode2\n purchase2.button_confirm()\n picking = purchase2.picking_ids[0]\n picking.action_confirm()\n picking.move_lines.write({\"quantity_done\": 1.0})\n picking.button_validate()\n invoice.purchase_id = purchase2\n result = invoice._onchange_purchase_auto_complete()\n self.assertEqual(\n result and result.get(\"warning\", {}).get(\"title\", False), \"Warning\"\n )\n\n def test_picking_from_purchase_order_invoicing_bank(self):\n # Test partner_bank\n stockable_product = self.env[\"product.product\"].create(\n {\"name\": \"Test stockable product\", \"type\": \"product\"}\n )\n self.purchase.order_line[0].product_id = stockable_product\n self.purchase.supplier_partner_bank_id = self.bank\n self.purchase.button_confirm()\n picking = self.purchase.picking_ids[0]\n picking.action_confirm()\n picking.move_lines.write({\"quantity_done\": 1.0})\n picking.button_validate()\n\n invoice = self.env[\"account.move\"].create(\n {\"partner_id\": self.partner.id, \"move_type\": \"in_invoice\"}\n )\n invoice.purchase_id = self.purchase\n invoice._onchange_purchase_auto_complete()\n self.assertEqual(invoice.partner_bank_id, self.bank)\n purchase2 = self.purchase.copy()\n purchase2.supplier_partner_bank_id = self.bank2\n purchase2.button_confirm()\n picking = purchase2.picking_ids[0]\n picking.action_confirm()\n picking.move_lines.write({\"quantity_done\": 1.0})\n picking.button_validate()\n invoice.purchase_id = purchase2\n result = invoice._onchange_purchase_auto_complete()\n self.assertEqual(\n result and result.get(\"warning\", {}).get(\"title\", False), \"Warning\"\n )\n\n def METHOD_NAME(self):\n route = self.env.ref(\"purchase_stock.route_warehouse0_buy\")\n rule = self.env[\"stock.rule\"].search([(\"route_id\", \"=\", route.id)], limit=1)\n rule._run_buy(\n procurements=[\n (\n self.env[\"procurement.group\"].Procurement(\n self.mto_product,\n 1,\n self.mto_product.uom_id,\n self.env[\"stock.location\"].search([], limit=1),\n \"Procurement order test\",\n \"Test\",\n rule.company_id,\n {\n \"company_id\": rule.company_id,\n \"date_planned\": fields.Datetime.now(),\n },\n ),\n rule,\n )\n ]\n )\n purchase = self.env[\"purchase.order\"].search([(\"origin\", \"=\", \"Test\")])\n self.assertEqual(purchase.payment_mode_id, self.payment_mode)"}}},{"rowIdx":2023,"cells":{"id":{"kind":"number","value":2023,"string":"2,023"},"label":{"kind":"string","value":"init vars"},"text":{"kind":"string","value":"# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Regret Matching Approximate Nash Solver.\"\"\"\n\nfrom absl import logging # pylint:disable=unused-import\n\nimport numpy as np\n\n\nclass Solver(object):\n \"\"\"Regret-matching Solver.\"\"\"\n\n def __init__(self, optimism=True, discount=False, rnd_init=False, seed=None,\n **kwargs):\n \"\"\"Ctor.\"\"\"\n del kwargs\n self.num_players = None\n self.lrs = None\n self.optimism = optimism\n self.discount = discount\n self.rnd_init = rnd_init\n self.has_aux = True\n self.aux_errors = []\n\n self.seed = seed\n self.random = np.random.RandomState(seed)\n\n def METHOD_NAME(self, num_strats, num_players):\n \"\"\"Initialize solver parameters.\"\"\"\n self.num_players = num_players\n if self.rnd_init:\n init_dist = self.random.rand(num_strats)\n else:\n init_dist = np.ones(num_strats)\n init_dist /= init_dist.sum()\n init_regret = np.zeros(num_strats)\n return (init_dist, init_regret)\n\n def record_aux_errors(self, grads):\n \"\"\"Record errors for the auxiliary variables.\"\"\"\n grad_regret = grads[1]\n self.aux_errors.append([np.linalg.norm(grad_regret)])\n\n def compute_gradients(self, params, payoff_matrices):\n \"\"\"Compute and return gradients (and exploitabilities) for all parameters.\n\n Args:\n params: tuple of params (dist, regret), see regmatch.gradients\n payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action\n Returns:\n tuple of gradients (grad_dist, grad_regret), see ate.gradients\n unregularized exploitability (stochastic estimate)\n solver exploitability (stochastic estimate) - NaN\n \"\"\"\n return gradients(*params, payoff_matrices)\n\n def exploitability(self, params, payoff_matrices):\n \"\"\"Regret matching does not minimize any exploitability so return NaN.\n\n Args:\n params: tuple of params (dist,)\n payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action\n Returns:\n np.NaN\n \"\"\"\n del params\n del payoff_matrices\n return np.NaN\n\n def update(self, params, grads, t):\n \"\"\"Update cumulative regret and strategy (dist).\n\n Args:\n params: tuple of variables to be updated (dist, regret)\n grads: tuple of variable gradients (grad_dist, grad_regret)\n t: int, solver iteration (not used)\n Returns:\n new_params: tuple of update params (new_dist, new_regret)\n \"\"\"\n dist, regret = params\n regret_delta = grads[1]\n if self.discount:\n gamma = t / float(t + 1)\n else:\n gamma = 1\n new_regret = gamma * regret + regret_delta\n new_clipped_regrets = np.clip(new_regret + self.optimism * regret_delta,\n 0.,\n np.inf)\n if np.sum(new_clipped_regrets) > 0:\n new_dist = new_clipped_regrets / new_clipped_regrets.sum()\n else:\n new_dist = np.ones_like(dist) / dist.size\n new_params = (new_dist, new_regret)\n return new_params\n\n\ndef gradients(dist, regret, payoff_matrices):\n \"\"\"Computes regret delta to be added to regret in update.\n\n Args:\n dist: 1-d np.array, current estimate of nash distribution\n regret: 1-d np.array (same shape as dist), current estimate of regrets\n payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action\n Returns:\n deltas w.r.t. (dist, regret) as tuple\n unregularized exploitability (stochastic estimate)\n solver exploitability (stochastic estimate) - NaN\n \"\"\"\n del regret\n\n nabla = payoff_matrices[0].dot(dist)\n utility = nabla.dot(dist)\n\n grad_dist = np.NaN * np.ones_like(dist)\n grad_regret = nabla - utility\n\n unreg_exp = np.max(nabla) - nabla.dot(dist)\n\n return (grad_dist, grad_regret), unreg_exp, np.NaN"}}},{"rowIdx":2024,"cells":{"id":{"kind":"number","value":2024,"string":"2,024"},"label":{"kind":"string","value":"callback wazuhdb response"},"text":{"kind":"string","value":"# Copyright (C) 2015-2022, Wazuh Inc.\n# Created by Wazuh, Inc. .\n# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2\nimport functools\nimport hashlib\nimport json\nimport logging\nimport socket\nimport sqlite3\nimport time\n\nfrom wazuh_testing.tools import GLOBAL_DB_PATH, WAZUH_DB_SOCKET_PATH\nfrom wazuh_testing.tools.monitoring import wazuh_pack, wazuh_unpack\nfrom wazuh_testing.tools.services import control_service\n\n\ndef METHOD_NAME(item):\n if isinstance(item, tuple):\n data, response = item\n return response.decode()\n\n\ndef mock_db(func):\n \"\"\"Decorator used in any function that needs to mock a wazuh db\n\n This function will execute `func` after stopping wazuh-modulesd and wazuh-db. After that,\n it will start the daemons again\n\n Args:\n func (callable): function that will mock the cve.db\n\n Example:\n @vd.mock__db\n def mock_agent_status(request, agent_id, agent_status):\n \"\"\"\n @functools.wraps(func)\n def magic(*args, **kwargs):\n control_service('stop', daemon='wazuh-modulesd')\n func(*args, **kwargs)\n control_service('start', daemon='wazuh-modulesd')\n\n return magic\n\n\ndef mock_agent(\n agent_id, name=\"centos8-agent\", ip=\"127.0.0.1\", register_ip=\"127.0.0.1\", internal_key=\"\",\n os_name=\"CentOS Linux\", os_version=\"7.1\", os_major=\"7\", os_minor=\"1\", os_codename=\"centos-8\",\n os_build=\"4.18.0-147.8.1.el8_1.x86_64\", os_platform=\"#1 SMP Thu Apr 9 13:49:54 UTC 2020\",\n os_uname=\"x86_64\", os_arch=\"x86_64\", version=\"4.2\", config_sum=\"\", merged_sum=\"\",\n manager_host=\"centos-8\", node_name=\"node01\", date_add=\"1612942494\",\n last_keepalive=\"253402300799\", group=\"\", sync_status=\"synced\", connection_status=\"active\",\n client_key_secret=None):\n\n create_agent_query = f'''global sql INSERT OR REPLACE INTO AGENT\n (id, name, ip, register_ip, internal_key, os_name, os_version, os_major, os_minor,\n os_codename, os_build, os_platform, os_uname, os_arch, version, config_sum, merged_sum,\n manager_host, node_name, date_add, last_keepalive, \"group\", sync_status, connection_status)\n VALUES\n ( {agent_id}, \"{name}\", \"{ip}\", \"{register_ip}\", \"{internal_key}\", \"{os_name}\", \"{os_version}\",\n \"{os_major}\", \"{os_minor}\", \"{os_codename}\", \"{os_build}\", \"{os_platform}\", \"{os_uname}\",\n \"{os_arch}\", \"{version}\", \"{config_sum}\", \"{merged_sum}\", \"{manager_host}\", \"{node_name}\",\n \"{date_add}\", \"{last_keepalive}\", \"{group}\", \"{sync_status}\", \"{connection_status}\")\n '''\n try:\n query_wdb(create_agent_query)\n except sqlite3.IntegrityError:\n logging.error(\"Failed to mock agent in database!\")\n\n\ndef load_db(db_path):\n \"\"\"Load a database in db_path\n\n Args:\n db_path (str): path to the database\n \"\"\"\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n return conn, cursor\n\n\n@mock_db\ndef run_query(db_query, db_path=GLOBAL_DB_PATH):\n \"\"\"Method used to run sqlite queries on wazuh databases\n\n This function will execute the sqlite3 query `db_query` in `db_path` database.\n\n Args:\n db_query (string): sqlite3 valid query\n db_path (string): path to the database where the query will be run\n \"\"\"\n\n conn, _ = load_db(db_path)\n\n try:\n with conn:\n conn.execute(db_query)\n finally:\n conn.close()\n\n\ndef get_query_result(query, db_path=GLOBAL_DB_PATH):\n \"\"\"Return the result of a query in a specified DB\n\n Args:\n db_path (str): path to the database\n query (str): SQL query. (SELECT * ..)\n\n Returns:\n result (List[list]): each row is the query result row and each column is the query field value\n \"\"\"\n global cursor, db\n try:\n db, cursor = load_db(db_path)\n cursor.execute(query)\n records = cursor.fetchall()\n result = []\n\n for row in records:\n result.append(', '.join([f'{item}' for item in row]))\n\n return result\n\n finally:\n cursor.close()\n db.close()\n\n\ndef query_wdb(command):\n \"\"\"Make queries to wazuh-db using the wdb socket.\n\n Args:\n command (str): wazuh-db command alias. For example `global get-agent-info 000`.\n\n Returns:\n list: Query response data\n \"\"\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(WAZUH_DB_SOCKET_PATH)\n\n data = []\n\n try:\n sock.send(wazuh_pack(len(command)) + command.encode())\n\n rcv = sock.recv(4)\n\n if len(rcv) == 4:\n data_len = wazuh_unpack(rcv)\n\n data = sock.recv(data_len).decode()\n\n # Remove response header and cast str to list of dictionaries\n # From --> 'ok [ {data1}, {data2}...]' To--> [ {data1}, data2}...]\n if len(data.split()) > 1 and data.split()[0] == 'ok':\n data = json.loads(' '.join(data.split(' ')[1:]))\n finally:\n sock.close()\n\n return data\n\n\ndef clean_agents_from_db():\n \"\"\"\n Clean agents from DB\n \"\"\"\n command = 'global sql DELETE FROM agent WHERE id != 0'\n try:\n query_wdb(command)\n except Exception:\n raise Exception('Unable to clean agents')\n\n\ndef clean_groups_from_db():\n \"\"\"\n Clean groups table from global.db\n \"\"\"\n command = 'global sql DELETE FROM \"group\"'\n try:\n query_wdb(command)\n except Exception:\n raise Exception('Unable to clean groups table.')\n\n\ndef clean_belongs():\n \"\"\"\n Clean belong table from global.db\n \"\"\"\n command = 'global sql DELETE FROM belongs'\n try:\n query_wdb(command)\n except Exception:\n raise Exception('Unable to clean belongs table.')\n\n\ndef insert_agent_in_db(id=1, name='TestAgent', ip='any', registration_time=0, connection_status=0,\n disconnection_time=0):\n \"\"\"\n Write agent in global.db\n \"\"\"\n insert_command = f'global insert-agent {{\"id\":{id},\"name\":\"{name}\",\"ip\":\"{ip}\",\"date_add\":{registration_time}}}'\n update_command = f'global sql UPDATE agent SET connection_status = \"{connection_status}\",\\\n disconnection_time = \"{disconnection_time}\" WHERE id = {id};'\n try:\n query_wdb(insert_command)\n query_wdb(update_command)\n except Exception:\n raise Exception(f\"Unable to add agent {id}\")\n\n\n# Insert agents into DB and assign them into a group\ndef insert_agent_into_group(total_agents):\n for i in range(total_agents):\n id = i + 1\n name = 'Agent-test' + str(id)\n date = time.time()\n command = f'global insert-agent {{\"id\":{id},\"name\":\"{name}\",\"date_add\":{date}}}'\n results = query_wdb(command)\n assert results == 'ok'\n\n command = f'''global set-agent-groups {{\"mode\":\"append\",\"sync_status\":\"syncreq\",\n \"source\":\"remote\",\"data\":[{{\"id\":{id},\"groups\":[\"Test_group{id}\"]}}]}}'''\n results = query_wdb(command)\n assert results == 'ok'\n\n\ndef remove_agent(agent_id):\n \"\"\"Function that wraps the needed queries to remove an agent.\n\n Args:\n agent_id(int): Unique identifier of an agent\n \"\"\"\n data = query_wdb(f\"global delete-agent {agent_id}\").split()\n assert data[0] == 'ok', f\"Unable to remove agent {agent_id} - {data[1]}\"\n\n\ndef calculate_global_hash():\n \"\"\"Function that calculates and retrieves the actual global groups hash.\n\n Returns:\n str: Actual global groups hash.\n \"\"\"\n GET_GROUP_HASH = '''global sql SELECT group_hash FROM agent WHERE\n id > 0 AND group_hash IS NOT NULL ORDER BY id'''\n\n result = query_wdb(GET_GROUP_HASH)\n group_hashes = [item['group_hash'] for item in result]\n\n return hashlib.sha1(\"\".join(group_hashes).encode()).hexdigest()"}}},{"rowIdx":2025,"cells":{"id":{"kind":"number","value":2025,"string":"2,025"},"label":{"kind":"string","value":"autogenerate"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"\nmanager.py - Script which acts as the user interface for schema management.\n\n\"\"\"\n\nimport argparse\nimport json\nimport os\n\nfrom schema_parser import LDAPSchemaParser\nfrom generator import SchemaGenerator\n\nlocaldir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef generate(infile, schema_type=None, out_file=None):\n \"\"\"Function generates the LDAP schema definitions from the JSON data\n\n Args:\n schema_type (str): The schema type to be generated (opendj)\n \"\"\"\n fp = open(infile, 'r')\n json_text = fp.read()\n fp.close()\n gen = SchemaGenerator(json_text)\n if schema_type == 'opendj':\n schema_str = gen.generate_ldif()\n else:\n schema_str = gen.generate_schema()\n if out_file:\n with open(out_file, 'w') as w:\n w.write(schema_str)\n else:\n print(schema_str)\n\n\ndef METHOD_NAME():\n \"\"\"Function that generates the LDAP schemas for OpenDJ from the\n gluu_schema.json and custom_schema.json and puts them in their respective\n folders.\n \"\"\"\n opendj_folder = os.path.join(os.path.dirname(localdir), 'static/opendj/')\n\n fp = open(os.path.join(localdir, 'gluu_schema.json'), 'r')\n gluu_json = fp.read()\n fp.close()\n gen = SchemaGenerator(gluu_json)\n with open(os.path.join(opendj_folder, '101-ox.ldif'), 'w') as f:\n f.write(gen.generate_ldif())\n\n fp = open(os.path.join(localdir, 'custom_schema.json'), 'r')\n custom_json = fp.read()\n fp.close()\n gen = SchemaGenerator(custom_json)\n with open(os.path.join(opendj_folder, '77-customAttributes.ldif'), 'w') \\\n as f:\n f.write(gen.generate_ldif())\n\n\ndef run_tests():\n \"\"\"Function that runs the unit tests of the scripts in this package.\n \"\"\"\n # TODO\n pass\n\n\ndef make_json(filename):\n \"\"\"Function that parses the input schema file and generates JSON.\n \"\"\"\n parser = LDAPSchemaParser(filename)\n definitions = parser.parse()\n schema_dict = {}\n objectclasses = []\n attributetypes = []\n for obj in definitions['objectClasses']:\n obcl = {}\n props = ['oid', 'names', 'desc', 'must', 'may', 'sup', 'x_origin']\n for prop in props:\n if hasattr(obj, prop):\n if getattr(obj, prop):\n obcl[prop] = getattr(obj, prop)\n # obcl['obsolete'] = obj.obsolete\n if obj.kind == 0:\n obcl['kind'] = 'STRUCTURAL'\n elif obj.kind == 1:\n obcl['kind'] = 'ABSTRACT'\n elif obj.kind == 2:\n obcl['kind'] = 'AUXILIARY'\n objectclasses.append(obcl)\n\n for att in definitions['attributeTypes']:\n attype = {}\n props = ['oid', 'names', 'desc', 'equality', 'substr', 'ordering',\n 'syntax', 'x_origin']\n for prop in props:\n if hasattr(att, prop):\n if getattr(att, prop):\n attype[prop] = getattr(att, prop)\n # attype['no_user_mod'] = att.no_user_mod\n # attype['single_value'] = att.single_value\n # attype['obsolete'] = att.obsolete\n attributetypes.append(attype)\n\n schema_dict['objectClasses'] = objectclasses\n schema_dict['attributeTypes'] = attributetypes\n schema_dict['oidMacros'] = definitions['oidMacros']\n print(json.dumps(schema_dict, indent=4, sort_keys=True))\n\n\ndef make_schema_docs():\n schema = os.path.join(localdir, 'gluu_schema.json')\n f = open(schema)\n json_string = f.read()\n f.close()\n data = json.loads(json_string)\n objClasses = data['objectClasses']\n attTypes = data['attributeTypes']\n docs = ''\n\n for obj_class in objClasses:\n docs += \"\\n\\n## {}\".format(\" (or) \".join(obj_class['names']))\n if 'desc' in obj_class:\n docs += \"\\n_{}_\".format(obj_class['desc'].encode('utf-8'))\n\n for obj_attr in obj_class['may']:\n attr_docs_added = False\n for attr_type in attTypes:\n if obj_attr in attr_type['names']:\n docs += \"\\n* __{}__\".format(\" (or) \".join(attr_type['names']))\n if 'desc' in attr_type:\n docs += \": {}\".format(attr_type['desc'].encode('utf-8'))\n attr_docs_added = True\n break\n if not attr_docs_added:\n docs += \"\\n* __{}__\".format(obj_attr)\n print(docs)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"action\", help=\"the action you want to perform.\",\n choices=[\"autogenerate\", \"generate\", \"makejson\", \"makedocs\", \"test\"])\n parser.add_argument(\n \"--type\", help=\"the schema type you want to generate\",\n choices=[\"opendj\"])\n parser.add_argument(\n \"--filename\", help=\"the input file for various actions\")\n args = parser.parse_args()\n\n if args.action == 'generate':\n if args.filename:\n generate(args.filename, args.type)\n else:\n print(\"No JSON Input. Specify a JSON file with --filename\")\n elif args.action == 'test':\n run_tests()\n elif args.action == 'makejson':\n if args.filename:\n make_json(args.filename)\n else:\n print(\"No Schema Input. Specify schema file with --filename\")\n elif args.action == 'autogenerate':\n METHOD_NAME()\n elif args.action == 'makedocs':\n make_schema_docs()"}}},{"rowIdx":2026,"cells":{"id":{"kind":"number","value":2026,"string":"2,026"},"label":{"kind":"string","value":"concat"},"text":{"kind":"string","value":"# Copyright (c) 2023 zfit\nimport numpy as np\nimport tensorflow as tf\n\nimport zfit.z.numpy as znp\n\nSWITCH_ON = True\n\n\ndef is_tensor(x):\n return tf.is_tensor(x)\n\n\ndef has_tensor(x):\n return any(tf.is_tensor(t) for t in tf.nest.flatten(x))\n\n\ndef allclose_anyaware(x, y, rtol=1e-5, atol=1e-8):\n \"\"\"Tests if x and y are close by first testing equality (with numpy), then within the limits.\n\n The prepended equality test allow for ANY objects to compare positively if the x and y have the shape (1, n)\n with n arbitrary\n\n Args:\n x:\n y:\n rtol:\n atol:\n\n Returns:\n \"\"\"\n if not SWITCH_ON or has_tensor([x, y]):\n return znp.all(znp.less_equal(znp.abs(x - y), znp.abs(y) * rtol + atol))\n else:\n x = np.array(x)\n y = np.array(y)\n if any(ar.dtype == object for ar in (x, y)):\n from zfit.core.space import LimitRangeDefinition\n\n equal = []\n for x1, y1 in zip(x[0], y[0]):\n if isinstance(x1, LimitRangeDefinition) or isinstance(\n y1, LimitRangeDefinition\n ):\n equal.append(x1 < y1 or x1 > y1)\n else:\n equal.append(np.allclose(x1, y1, rtol=rtol, atol=atol))\n allclose = np.array(equal)[None, :]\n else:\n allclose = np.allclose(x, y, rtol=rtol, atol=atol)\n\n return allclose\n\n\ndef broadcast_to(input, shape):\n if not SWITCH_ON or is_tensor(input):\n return tf.broadcast_to(input, shape)\n else:\n return np.broadcast_to(input, shape)\n\n\ndef expand_dims(input, axis):\n if not SWITCH_ON or has_tensor(input):\n return znp.expand_dims(input, axis)\n else:\n return np.expand_dims(input, axis)\n\n\ndef reduce_prod(input_tensor, axis=None, keepdims=None):\n if not SWITCH_ON or has_tensor(input_tensor):\n return znp.prod(input_tensor, axis, keepdims=keepdims)\n else:\n if keepdims is None:\n return np.prod(input_tensor, axis)\n else:\n return np.prod(input_tensor, axis, keepdims=keepdims)\n\n\ndef equal(x, y):\n if not SWITCH_ON or is_tensor(x) or is_tensor(y):\n return znp.equal(x, y)\n else:\n return np.equal(x, y)\n\n\ndef reduce_all(input_tensor, axis=None):\n if not SWITCH_ON or has_tensor(input_tensor):\n if axis is None:\n input_tensor = [\n znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor)\n ]\n return znp.all(input_tensor, axis)\n else:\n out = np.all(input_tensor, axis)\n if out.shape == (1,):\n out = out[0]\n return out\n\n\ndef reduce_any(input_tensor, axis=None):\n if not SWITCH_ON or has_tensor(input_tensor):\n if axis is None:\n input_tensor = [\n znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor)\n ]\n return znp.any(input_tensor, axis)\n else:\n out = np.any(input_tensor, axis)\n if out.shape == (1,):\n out = out[0]\n return out\n\n\ndef logical_and(x, y):\n if not SWITCH_ON or has_tensor(x) or has_tensor(y):\n return znp.logical_and(x, y)\n else:\n return np.logical_and(x, y)\n\n\ndef logical_or(x, y):\n if not SWITCH_ON or has_tensor(x) or has_tensor(y):\n return znp.logical_or(x, y)\n else:\n return np.logical_or(x, y)\n\n\ndef less_equal(x, y):\n if not SWITCH_ON or has_tensor(x) or has_tensor(y):\n return znp.less_equal(x, y)\n else:\n return np.less_equal(x, y)\n\n\ndef greater_equal(x, y):\n if not SWITCH_ON or has_tensor(x) or has_tensor(y):\n return znp.greater_equal(x, y)\n else:\n return np.greater_equal(x, y)\n\n\ndef gather(x, indices=None, axis=None):\n if not SWITCH_ON or has_tensor(x):\n return tf.gather(x, indices=indices, axis=axis)\n else:\n return np.take(x, indices=indices, axis=axis)\n\n\ndef METHOD_NAME(values, axis):\n if not SWITCH_ON or has_tensor(values):\n return znp.concatenate(values, axis=axis)\n else:\n return np.concatenate(values, axis=axis)\n\n\ndef _try_convert_numpy(tensorlike):\n if hasattr(tensorlike, \"numpy\"):\n tensorlike = tensorlike.numpy()\n\n if not isinstance(tensorlike, np.ndarray):\n from zfit.util.exception import CannotConvertToNumpyError\n\n raise CannotConvertToNumpyError(\n f\"Cannot convert {tensorlike} to a Numpy array. This may be because the\"\n f\" object is a Tensor and the function is called in Graph mode (e.g. in\"\n f\"a `z.function` decorated function.\\n\"\n f\"If this error appears and is not understandable, it is most likely a bug.\"\n f\" Please open an issue on Github.\"\n )\n return tensorlike"}}},{"rowIdx":2027,"cells":{"id":{"kind":"number","value":2027,"string":"2,027"},"label":{"kind":"string","value":"test rst"},"text":{"kind":"string","value":"# Copyright 2023 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nfrom textwrap import dedent\n\nimport pytest\nimport yaml\n\nfrom mlrun.runtimes import funcdoc\nfrom tests.conftest import tests_root_directory\n\n\ndef load_rst_cases(name):\n with open(tests_root_directory / \"runtimes\" / name) as fp:\n data = yaml.load(fp)\n\n for i, case in enumerate(data):\n name = case.get(\"name\", \"\")\n tid = f\"{i} - {name}\"\n yield pytest.param(case[\"text\"], case[\"expected\"], id=tid)\n\n\n@pytest.mark.parametrize(\"text, expected\", load_rst_cases(\"rst_cases.yml\"))\ndef METHOD_NAME(text, expected):\n doc, params, ret = funcdoc.parse_rst(text)\n assert expected[\"doc\"].strip() == doc.strip(), \"doc\"\n assert expected[\"params\"] == params, \"params\"\n assert expected[\"ret\"] == ret, \"ret\"\n\n\ndef is_ast_func(obj):\n return isinstance(obj, ast.FunctionDef)\n\n\ndef ast_func(code):\n funcs = [s for s in ast.parse(code).body if is_ast_func(s)]\n assert len(funcs) == 1, f\"{len(funcs)} functions in:\\n{code}\"\n return funcs[0]\n\n\ndef eval_func(code):\n out = {}\n exec(code, None, out)\n funcs = [obj for obj in out.values() if callable(obj)]\n assert len(funcs) == 1, f\"more than one function in:\\n{code}\"\n return funcs[0]\n\n\ninfo_handlers = [\n (funcdoc.func_info, eval_func),\n (funcdoc.ast_func_info, ast_func),\n]\n\n\ndef load_info_cases():\n with open(tests_root_directory / \"runtimes\" / \"info_cases.yml\") as fp:\n cases = yaml.load(fp)\n\n for case in cases:\n for info_fn, conv in info_handlers:\n obj = conv(case[\"code\"])\n tid = f'{case[\"id\"]}-{info_fn.__name__}'\n expected = case[\"expected\"].copy()\n # No line info in evaled functions\n if info_fn is funcdoc.func_info:\n expected[\"lineno\"] = -1\n yield pytest.param(info_fn, obj, expected, id=tid)\n\n\n@pytest.mark.parametrize(\"info_fn, obj, expected\", load_info_cases())\ndef test_func_info(info_fn, obj, expected):\n out = info_fn(obj)\n assert expected == out\n\n\nfind_handlers_code = \"\"\"\ndef dec(n):\n return n - 1\n\n# mlrun:handler\ndef inc(n):\n return n + 1\n\"\"\"\n\nfind_handlers_expected = [\n {\n \"name\": \"inc\",\n \"doc\": \"\",\n \"return\": funcdoc.param_dict(),\n \"params\": [funcdoc.param_dict(\"n\")],\n \"lineno\": 6,\n \"has_varargs\": False,\n \"has_kwargs\": False,\n },\n]\n\n\ndef test_find_handlers():\n funcs = funcdoc.find_handlers(find_handlers_code)\n assert funcs == find_handlers_expected\n\n\nast_code_cases = [\n \"{'x': 1, 'y': 2}\",\n \"dict(x=1, y=2)\",\n \"{}\",\n \"[1, 2]\",\n \"[]\",\n \"(1, 2)\",\n \"()\",\n \"{1, 2}\",\n \"set()\",\n \"Point(1, 2)\",\n \"3\",\n \"'hello'\",\n \"None\",\n]\n\n\n@pytest.mark.parametrize(\"expr\", ast_code_cases)\ndef test_ast_code(expr):\n node = ast.parse(expr).body[0].value\n code = funcdoc.ast_code(node)\n assert expr == code\n\n\ndef test_ast_none():\n code = \"\"\"\n def fn() -> None:\n pass\n \"\"\"\n fn: ast.FunctionDef = ast.parse(dedent(code)).body[0]\n funcdoc.ast_func_info(fn)\n\n\n@pytest.mark.parametrize(\n \"func_code,expected_has_varargs,expected_has_kwargs\",\n [\n (\n \"\"\"\n def fn(p1,p2,*args,**kwargs) -> None:\n pass\n \"\"\",\n True,\n True,\n ),\n (\n \"\"\"\n def fn(p1,p2,*args) -> None:\n pass\n \"\"\",\n True,\n False,\n ),\n (\n \"\"\"\n def fn(p1,p2,**kwargs) -> None:\n pass\n \"\"\",\n False,\n True,\n ),\n (\n \"\"\"\n def fn(p1,p2) -> None:\n pass\n \"\"\",\n False,\n False,\n ),\n (\n \"\"\"\n def fn(p1,p2,**something) -> None:\n pass\n \"\"\",\n False,\n True,\n ),\n ],\n)\ndef test_ast_func_info_with_kwargs_and_args(\n func_code, expected_has_varargs, expected_has_kwargs\n):\n fn: ast.FunctionDef = ast.parse(dedent(func_code)).body[0]\n func_info = funcdoc.ast_func_info(fn)\n assert func_info[\"has_varargs\"] == expected_has_varargs\n assert func_info[\"has_kwargs\"] == expected_has_kwargs\n\n\ndef test_ast_compound():\n param_types = []\n with open(f\"{tests_root_directory}/runtimes/arc.txt\") as fp:\n code = fp.read()\n\n # collect the types of the function parameters\n # assumes each param is in a new line for simplicity\n for line in code.splitlines()[3:15]:\n if \":\" not in line:\n param_types.append(None)\n continue\n\n param_type = line[line.index(\":\") + 1 :]\n if \"=\" in param_type:\n param_type = param_type[: param_type.index(\"=\")]\n param_type = param_type[:-1].strip()\n param_types.append(param_type)\n\n fn = ast_func(code)\n info = funcdoc.ast_func_info(fn)\n for i, param in enumerate(info[\"params\"]):\n if i in (4, 8):\n continue\n assert (\n param[\"type\"] == param_types[i]\n ), f\"param at index {i} has a bad type value. param: {param}\"\n\n\nunderscore_code = \"\"\"\ndef info(message):\n _log('INFO', message)\n\ndef warning(message):\n _log('WARNING', message)\n\ndef _log(level, message):\n print(f'{level} - {message}')\n\"\"\"\n\n\ndef test_ignore_underscore():\n funcs = funcdoc.find_handlers(underscore_code)\n names = {fn[\"name\"] for fn in funcs}\n assert {\"info\", \"warning\"} == names, \"names\"\n\n\ndef test_annotate_mod():\n code = \"\"\"\n import mlrun\n\n def handler(data: mlrun.DataItem):\n ...\n \"\"\"\n\n handlers = funcdoc.find_handlers(dedent(code))\n param = handlers[0][\"params\"][0]\n assert param[\"type\"] == \"DataItem\""}}},{"rowIdx":2028,"cells":{"id":{"kind":"number","value":2028,"string":"2,028"},"label":{"kind":"string","value":"pause"},"text":{"kind":"string","value":"\"\"\"Support for audio output\n\nThe `audioio` module contains classes to provide access to audio IO.\n\nAll classes change hardware state and should be deinitialized when they\nare no longer needed if the program continues after use. To do so, either\ncall :py:meth:`!deinit` or use a context manager. See\n:ref:`lifetime-and-contextmanagers` for more info.\n\nFor more information on working with this module, refer to the\n`CircuitPython Essentials Learn Guide\n`_.\n\nSince CircuitPython 5, `RawSample` and `WaveFile` are moved\nto :mod:`audiocore`, and `Mixer` is moved to :mod:`audiomixer`.\n\nFor compatibility with CircuitPython 4.x, some builds allow the items in\n`audiocore` to be imported from `audioio`. This will be removed for all\nboards in a future build of CircuitPython.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Optional\n\nimport circuitpython_typing\nimport microcontroller\n\nclass AudioOut:\n \"\"\"Output an analog audio signal\"\"\"\n\n def __init__(\n self,\n left_channel: microcontroller.Pin,\n *,\n right_channel: Optional[microcontroller.Pin] = None,\n quiescent_value: int = 0x8000,\n ) -> None:\n \"\"\"Create a AudioOut object associated with the given pin(s). This allows you to\n play audio signals out on the given pin(s).\n\n :param ~microcontroller.Pin left_channel: The pin to output the left channel to\n :param ~microcontroller.Pin right_channel: The pin to output the right channel to\n :param int quiescent_value: The output value when no signal is present. Samples should start\n and end with this value to prevent audible popping.\n\n Simple 8ksps 440 Hz sin wave::\n\n import audiocore\n import audioio\n import board\n import array\n import time\n import math\n\n # Generate one period of sine wav.\n length = 8000 // 440\n sine_wave = array.array(\"H\", [0] * length)\n for i in range(length):\n sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)\n\n dac = audioio.AudioOut(board.SPEAKER)\n sine_wave = audiocore.RawSample(sine_wave, sample_rate=8000)\n dac.play(sine_wave, loop=True)\n time.sleep(1)\n dac.stop()\n\n Playing a wave file from flash::\n\n import board\n import audioio\n import digitalio\n\n # Required for CircuitPlayground Express\n speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)\n speaker_enable.switch_to_output(value=True)\n\n data = open(\"cplay-5.1-16bit-16khz.wav\", \"rb\")\n wav = audiocore.WaveFile(data)\n a = audioio.AudioOut(board.A0)\n\n print(\"playing\")\n a.play(wav)\n while a.playing:\n pass\n print(\"stopped\")\"\"\"\n ...\n def deinit(self) -> None:\n \"\"\"Deinitialises the AudioOut and releases any hardware resources for reuse.\"\"\"\n ...\n def __enter__(self) -> AudioOut:\n \"\"\"No-op used by Context Managers.\"\"\"\n ...\n def __exit__(self) -> None:\n \"\"\"Automatically deinitializes the hardware when exiting a context. See\n :ref:`lifetime-and-contextmanagers` for more info.\"\"\"\n ...\n def play(\n self, sample: circuitpython_typing.AudioSample, *, loop: bool = False\n ) -> None:\n \"\"\"Plays the sample once when loop=False and continuously when loop=True.\n Does not block. Use `playing` to block.\n\n Sample must be an `audiocore.WaveFile`, `audiocore.RawSample`, `audiomixer.Mixer` or `audiomp3.MP3Decoder`.\n\n The sample itself should consist of 16 bit samples. Microcontrollers with a lower output\n resolution will use the highest order bits to output. For example, the SAMD21 has a 10 bit\n DAC that ignores the lowest 6 bits when playing 16 bit samples.\"\"\"\n ...\n def stop(self) -> None:\n \"\"\"Stops playback and resets to the start of the sample.\"\"\"\n ...\n playing: bool\n \"\"\"True when an audio sample is being output even if `paused`. (read-only)\"\"\"\n def METHOD_NAME(self) -> None:\n \"\"\"Stops playback temporarily while remembering the position. Use `resume` to resume playback.\"\"\"\n ...\n def resume(self) -> None:\n \"\"\"Resumes sample playback after :py:func:`pause`.\"\"\"\n ...\n paused: bool\n \"\"\"True when playback is paused. (read-only)\"\"\""}}},{"rowIdx":2029,"cells":{"id":{"kind":"number","value":2029,"string":"2,029"},"label":{"kind":"string","value":"test rename fields check log remove only"},"text":{"kind":"string","value":"from __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom bentoml._internal.configuration.helpers import flatten_dict\nfrom bentoml._internal.configuration.helpers import is_valid_ip_address\nfrom bentoml._internal.configuration.helpers import load_config_file\nfrom bentoml._internal.configuration.helpers import rename_fields\nfrom bentoml.exceptions import BentoMLConfigException\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from _pytest.logging import LogCaptureFixture\n\n\ndef test_flatten_dict():\n assert dict(flatten_dict({\"a\": 1, \"b\": {\"c\": 2, \"d\": {\"e\": 3}}})) == {\n \"a\": 1,\n \"b.c\": 2,\n \"b.d.e\": 3,\n }\n\n assert dict(\n flatten_dict({\"runners\": {\"iris_clf\": {\"nvidia.com/gpu\": [0, 1]}}})\n ) == {'runners.iris_clf.\"nvidia.com/gpu\"': [0, 1]}\n\n assert dict(flatten_dict({\"a\": 1, \"b\": 2}, sep=\"_\")) == {\"a\": 1, \"b\": 2}\n\n\ndef test_rename_fields_field_in_dict():\n # If given field is in the dictionary, it will be renamed\n d = {\"a\": 1, \"b\": 2}\n rename_fields(d, \"a\", \"x\")\n assert \"a\" not in d\n assert \"x\" in d\n assert d[\"x\"] == 1\n assert d[\"b\"] == 2\n\n\ndef test_rename_fields_field_not_in_dict():\n # If given field is not in the dictionary, nothing will happen\n d = {\"a\": 1, \"b\": 2}\n rename_fields(d, \"c\", \"d\")\n assert \"a\" in d\n assert \"b\" in d\n assert d[\"a\"] == 1\n assert d[\"b\"] == 2\n\n\ndef test_rename_fields_remove_only():\n # If given field is in the dictionary, and remove_only is True, it will be removed.\n d = {\"a\": 1, \"b\": 2}\n rename_fields(d, \"a\", remove_only=True)\n assert \"a\" not in d\n rename_fields(d, \"b\", remove_only=True)\n assert len(d) == 0\n\n\ndef test_rename_fields_check_log(caplog: LogCaptureFixture):\n d = {\"api_server.port\": 5000}\n with caplog.at_level(logging.WARNING):\n rename_fields(d, \"api_server.port\", \"api_server.http.port\")\n assert (\n \"Field 'api_server.port' is deprecated and has been renamed to 'api_server.http.port'\"\n in caplog.text\n )\n assert \"api_server.http.port\" in d and d[\"api_server.http.port\"] == 5000\n\n\ndef METHOD_NAME(caplog: LogCaptureFixture):\n d = {\"api_server.port\": 5000}\n with caplog.at_level(logging.WARNING):\n rename_fields(d, \"api_server.port\", remove_only=True)\n assert \"Field 'api_server.port' is deprecated and will be removed.\" in caplog.text\n assert len(d) == 0\n\n\ndef test_rename_fields_exception():\n # If no replace_with field is given, an AssertionError will be raised\n d = {\"api_server.port\": 5000}\n with pytest.raises(AssertionError, match=\"'replace_with' must be provided.\"):\n rename_fields(d, \"api_server.port\")\n\n with pytest.raises(AssertionError, match=\"'replace_with' must be provided.\"):\n rename_fields(d, \"api_server.port\", remove_only=False)\n\n # If the given dictionary is not flattened, a ValueError will be raised\n d = {\"a\": 1, \"b\": {\"c\": 2}}\n with pytest.raises(ValueError, match=\"Given dictionary is not flattened. *\"):\n rename_fields(d, \"b.c\", \"b.d.c\")\n\n # If the given dictionary is not flattened + no replace_with field is given, a ValueError will be raised\n d = {\"a\": 1, \"b\": {\"c\": 2}}\n with pytest.raises(ValueError, match=\"Given dictionary is not flattened. *\"):\n rename_fields(d, \"b.c\")\n\n\ndef test_valid_load_config_file(tmp_path: Path):\n config = tmp_path / \"configuration.yaml\"\n config.write_text(\"api_server:\\n port: 5000\")\n assert load_config_file(config.__fspath__()) == {\"api_server\": {\"port\": 5000}}\n\n\ndef test_invalid_load_config_file():\n with pytest.raises(BentoMLConfigException) as e:\n load_config_file(\"/tmp/nonexistent.yaml\")\n assert \"Configuration file /tmp/nonexistent.yaml not found.\" in str(e.value)\n\n with pytest.raises(BentoMLConfigException) as e:\n load_config_file(\"\\\\tmp\\\\invalid.yaml\")\n assert \"Configuration file \\\\tmp\\\\invalid.yaml not found.\" in str(e.value)\n\n\ndef test_valid_ip_address():\n assert is_valid_ip_address(\"0.0.0.0\")\n assert is_valid_ip_address(\"192.192.192.192\")\n assert is_valid_ip_address(\"255.255.255.255\")\n\n\ndef test_invalid_ip_address():\n assert not is_valid_ip_address(\"asdfadsf:143\")\n assert not is_valid_ip_address(\"asdfadsf\")\n assert not is_valid_ip_address(\"0.0.0.0.0\")\n assert not is_valid_ip_address(\"0.0.0.\")\n assert not is_valid_ip_address(\".0.0.0\")\n assert not is_valid_ip_address(\"x.0.0.0\")\n assert not is_valid_ip_address(\"255.255.255.256\")\n assert not is_valid_ip_address(\"255.255.256.255\")\n assert not is_valid_ip_address(\"255.256.255.255\")\n assert not is_valid_ip_address(\"256.255.255.255\")\n assert not is_valid_ip_address(\"256.256.256.256\")\n assert not is_valid_ip_address(\"\")"}}},{"rowIdx":2030,"cells":{"id":{"kind":"number","value":2030,"string":"2,030"},"label":{"kind":"string","value":"scale"},"text":{"kind":"string","value":"#!/usr/bin/env python\n\n############################################################################\n#\n# MODULE: r.out.kde\n# AUTHOR(S): Anna Petrasova\n#\n# PURPOSE:\n# COPYRIGHT: (C) 2013 - 2019 by the GRASS Development Team\n#\n# This program is free software under the GNU General Public\n# License (>=v2). Read the file COPYING that comes with GRASS\n# for details.\n#\n#############################################################################\n\n# %module\n# % description: Exports raster with variable transparency into an image file\n# % keyword: raster\n# % keyword: kernel density\n# % keyword: visualization\n# % keyword: transparency\n# % keyword: heatmap\n# %end\n\n# %option G_OPT_R_INPUT\n# % description: Raster map to be rendered with semi-transparency\n# %end\n\n# %option G_OPT_R_INPUT\n# % key: background\n# % description: Background raster map\n# %end\n\n# %option G_OPT_F_OUTPUT\n# % description: Rendered output file\n# %end\n\n# %option\n# % key: method\n# % type: string\n# % options: linear,logistic\n# % description: Method to scale transparency\n# %end\n\n\nimport os\nimport tempfile\nimport atexit\nimport shutil\nfrom math import exp\nimport grass.script as gscript\n\n\nTMPRAST = []\nTMPDIR = tempfile.mkdtemp()\n\n\ndef cleanup():\n gscript.run_command(\n \"g.remove\", name=\",\".join(TMPRAST), flags=\"f\", type=\"raster\", quiet=True\n )\n shutil.rmtree(TMPDIR)\n\n\ndef main(rinput, background, output, method):\n try:\n from PIL import Image\n except ImportError:\n gscript.fatal(\"Cannot import PIL.\" \" Please install the Python pillow package.\")\n\n if \"@\" in rinput:\n rinput = rinput.split(\"@\")[0]\n suffix = \"_\" + os.path.basename(gscript.tempfile(False))\n tmpname = rinput + suffix\n gscript.run_command(\"g.copy\", raster=[rinput, tmpname])\n TMPRAST.append(tmpname)\n gscript.run_command(\"r.colors\", map=tmpname, color=\"grey\")\n\n reg = gscript.region()\n width = reg[\"cols\"]\n height = reg[\"rows\"]\n\n fg_out = os.path.join(TMPDIR, \"foreground.png\")\n bg_out = os.path.join(TMPDIR, \"background.png\")\n intensity_tmp = os.path.join(TMPDIR, \"intensity.png\")\n gscript.run_command(\n \"d.mon\",\n start=\"cairo\",\n output=fg_out,\n width=width,\n height=height,\n bgcolor=\"black\",\n )\n gscript.run_command(\"d.rast\", map=rinput)\n gscript.run_command(\"d.mon\", stop=\"cairo\")\n\n # background\n gscript.run_command(\n \"d.mon\", start=\"cairo\", output=bg_out, width=width, height=height\n )\n gscript.run_command(\"d.rast\", map=background)\n gscript.run_command(\"d.mon\", stop=\"cairo\")\n\n # greyscale\n gscript.run_command(\n \"d.mon\", start=\"cairo\", output=intensity_tmp, width=width, height=height\n )\n gscript.run_command(\"d.rast\", map=tmpname)\n gscript.run_command(\"d.mon\", stop=\"cairo\")\n\n # put together with transparency\n foreground = Image.open(fg_out)\n background = Image.open(bg_out)\n intensity = Image.open(intensity_tmp)\n\n foreground = foreground.convert(\"RGBA\")\n data_f = foreground.getdata()\n data_i = intensity.getdata()\n newData = []\n for i in range(len(data_f)):\n intens = data_i[i][0]\n if intens == 0:\n newData.append((data_f[i][0], data_f[i][1], data_f[i][2], 0))\n else:\n newData.append(\n (\n data_f[i][0],\n data_f[i][1],\n data_f[i][2],\n METHOD_NAME(0, 255, intens, method),\n )\n )\n foreground.putdata(newData)\n background.paste(foreground, (0, 0), foreground)\n background.save(output)\n\n\ndef METHOD_NAME(cmin, cmax, intens, method):\n # scale to 0 - 1\n val = (intens - cmin) / float((cmax - cmin))\n if method == \"logistic\":\n val = 1.0 / (1 + exp(-10 * (val - 0.5)))\n val *= 255\n return int(val)\n\n\nif __name__ == \"__main__\":\n options, flags = gscript.parser()\n rinput = options[\"input\"]\n bg = options[\"background\"]\n output = options[\"output\"]\n method = options[\"method\"]\n atexit.register(cleanup)\n main(rinput, bg, output, method)"}}},{"rowIdx":2031,"cells":{"id":{"kind":"number","value":2031,"string":"2,031"},"label":{"kind":"string","value":"bctester"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright 2014 BitPay Inc.\n# Copyright 2016-2017 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test framework for defi utils.\n\nRuns automatically during `make check`.\n\nCan also be run manually.\"\"\"\n\nimport argparse\n\n# import binascii # TODO: (temp) it's used in bctest\nimport configparser\n\n# import difflib # TODO: (temp) it's used in bctest\nimport json\nimport logging\nimport os\nimport pprint\n\n# import subprocess # TODO: (temp) it's used in bctest\nimport sys\n\n\ndef main():\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read_file(\n open(os.path.join(os.path.dirname(__file__), \"../config.ini\"), encoding=\"utf8\")\n )\n env_conf = dict(config.items(\"environment\"))\n\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n verbose = args.verbose\n\n if verbose:\n level = logging.DEBUG\n else:\n level = logging.ERROR\n formatter = \"%(asctime)s - %(levelname)s - %(message)s\"\n # Add the format/level to the logger\n logging.basicConfig(format=formatter, level=level)\n\n METHOD_NAME(\n os.path.join(env_conf[\"SRCDIR\"], \"test\", \"util\", \"data\"),\n \"defi-util-test.json\",\n env_conf,\n )\n\n\ndef METHOD_NAME(testDir, input_basename, buildenv):\n \"\"\"Loads and parses the input file, runs all tests and reports results\"\"\"\n input_filename = os.path.join(testDir, input_basename)\n raw_data = open(input_filename, encoding=\"utf8\").read()\n input_data = json.loads(raw_data)\n\n failed_testcases = []\n\n for testObj in input_data:\n try:\n bctest(testDir, testObj, buildenv)\n logging.info(\"PASSED: \" + testObj[\"description\"])\n except Exception:\n logging.info(\"FAILED: \" + testObj[\"description\"])\n failed_testcases.append(testObj[\"description\"])\n\n if failed_testcases:\n error_message = \"FAILED_TESTCASES:\\n\"\n error_message += pprint.pformat(failed_testcases, width=400)\n logging.error(error_message)\n sys.exit(1)\n else:\n sys.exit(0)\n\n\ndef bctest(testDir, testObj, buildenv):\n \"\"\"Runs a single test, comparing output and RC to expected output and RC.\n\n Raises an error if input can't be read, executable fails, or output/RC\n are not as expected. Error is caught by bctester() and reported.\n \"\"\"\n return\n # # Get the exec names and arguments # TODO: (temp) disable functional tests\n # execprog = os.path.join(buildenv[\"BUILDDIR\"], \"src\", testObj[\"exec\"] + buildenv[\"EXEEXT\"])\n # execargs = testObj['args']\n # execrun = [execprog] + execargs\n #\n # # Read the input data (if there is any)\n # stdinCfg = None\n # inputData = None\n # if \"input\" in testObj:\n # filename = os.path.join(testDir, testObj[\"input\"])\n # inputData = open(filename, encoding=\"utf8\").read()\n # stdinCfg = subprocess.PIPE\n #\n # # Read the expected output data (if there is any)\n # outputFn = None\n # outputData = None\n # outputType = None\n # if \"output_cmp\" in testObj:\n # outputFn = testObj['output_cmp']\n # outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)\n # try:\n # outputData = open(os.path.join(testDir, outputFn), encoding=\"utf8\").read()\n # except:\n # logging.error(\"Output file \" + outputFn + \" can not be opened\")\n # raise\n # if not outputData:\n # logging.error(\"Output data missing for \" + outputFn)\n # raise Exception\n # if not outputType:\n # logging.error(\"Output file %s does not have a file extension\" % outputFn)\n # raise Exception\n #\n # # Run the test\n # proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n # try:\n # outs = proc.communicate(input=inputData)\n # except OSError:\n # logging.error(\"OSError, Failed to execute \" + execprog)\n # raise\n #\n # if outputData:\n # data_mismatch, formatting_mismatch = False, False\n # # Parse command output and expected output\n # try:\n # a_parsed = parse_output(outs[0], outputType)\n # except Exception as e:\n # logging.error('Error parsing command output as %s: %s' % (outputType, e))\n # raise\n # try:\n # b_parsed = parse_output(outputData, outputType)\n # except Exception as e:\n # logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))\n # raise\n # # Compare data\n # if a_parsed != b_parsed:\n # logging.error(\"Output data mismatch for \" + outputFn + \" (format \" + outputType + \")\")\n # data_mismatch = True\n # # Compare formatting\n # if outs[0] != outputData:\n # error_message = \"Output formatting mismatch for \" + outputFn + \":\\n\"\n # error_message += \"\".join(difflib.context_diff(outputData.splitlines(True),\n # outs[0].splitlines(True),\n # fromfile=outputFn,\n # tofile=\"returned\"))\n # logging.error(error_message)\n # formatting_mismatch = True\n #\n # assert not data_mismatch and not formatting_mismatch\n #\n # # Compare the return code to the expected return code\n # wantRC = 0\n # if \"return_code\" in testObj:\n # wantRC = testObj['return_code']\n # if proc.returncode != wantRC:\n # logging.error(\"Return code mismatch for \" + outputFn)\n # raise Exception\n #\n # if \"error_txt\" in testObj:\n # want_error = testObj[\"error_txt\"]\n # # Compare error text\n # # TODO: ideally, we'd compare the strings exactly and also assert\n # # That stderr is empty if no errors are expected. However, defi-tx\n # # emits DISPLAY errors when running as a windows application on\n # # linux through wine. Just assert that the expected error text appears\n # # somewhere in stderr.\n # if want_error not in outs[1]:\n # logging.error(\"Error mismatch:\\n\" + \"Expected: \" + want_error + \"\\nReceived: \" + outs[1].rstrip())\n # raise Exception\n\n\n#\n# def parse_output(a, fmt):\n# \"\"\"Parse the output according to specified format.\n#\n# Raise an error if the output can't be parsed.\"\"\"\n# if fmt == 'json': # json: compare parsed data\n# return json.loads(a)\n# elif fmt == 'hex': # hex: parse and compare binary data\n# return binascii.a2b_hex(a.strip())\n# else:\n# raise NotImplementedError(\"Don't know how to compare %s\" % fmt)\n\nif __name__ == \"__main__\":\n main()"}}},{"rowIdx":2032,"cells":{"id":{"kind":"number","value":2032,"string":"2,032"},"label":{"kind":"string","value":"group hashes"},"text":{"kind":"string","value":"import time\nimport uuid\n\nimport pytest\n\nfrom sentry.event_manager import _save_aggregate\nfrom sentry.eventstore.models import Event\nfrom sentry.grouping.result import CalculatedHashes\nfrom sentry.models import Group, GroupHash\nfrom sentry.testutils.pytest.fixtures import django_db_all\n\n\n@pytest.fixture\ndef fast_save(default_project, task_runner):\n def inner(last_frame):\n data = {\"timestamp\": time.time(), \"type\": \"error\"}\n evt = Event(\n default_project.id,\n uuid.uuid4().hex,\n data=data,\n )\n\n with task_runner():\n return _save_aggregate(\n evt,\n hashes=CalculatedHashes(\n hashes=[\"a\" * 32, \"b\" * 32],\n hierarchical_hashes=[\"c\" * 32, \"d\" * 32, \"e\" * 32, last_frame * 32],\n tree_labels=[\n [\n {\n \"function\": \"foo\",\n \"package\": \"\",\n \"is_sentinel\": False,\n \"is_prefix\": False,\n \"datapath\": \"\",\n }\n ],\n [\n {\n \"function\": \"bar\",\n \"package\": \"\",\n \"is_sentinel\": False,\n \"is_prefix\": False,\n \"datapath\": \"\",\n }\n ],\n [\n {\n \"function\": \"baz\",\n \"package\": \"\",\n \"is_sentinel\": False,\n \"is_prefix\": False,\n \"datapath\": \"\",\n }\n ],\n [\n {\n \"function\": \"bam\",\n \"package\": \"\",\n \"is_sentinel\": False,\n \"is_prefix\": False,\n \"datapath\": \"\",\n }\n ],\n ],\n ),\n release=None,\n metadata={},\n received_timestamp=0,\n level=10,\n culprit=\"\",\n )\n\n return inner\n\n\ndef METHOD_NAME(group_id):\n return {gh.hash for gh in GroupHash.objects.filter(group_id=group_id)}\n\n\ndef _assoc_hash(group, hash):\n gh = GroupHash.objects.get_or_create(project=group.project, hash=hash)[0]\n assert gh.group is None or gh.group.id != group.id\n gh.group = group\n gh.save()\n\n\n@django_db_all\ndef test_move_all_events(default_project, fast_save):\n group_info = fast_save(\"f\")\n\n assert group_info.is_new\n assert not group_info.is_regression\n\n new_group_info = fast_save(\"f\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group_info.group.id\n\n _assoc_hash(group_info.group, \"a\" * 32)\n _assoc_hash(group_info.group, \"b\" * 32)\n\n assert METHOD_NAME(group_info.group.id) == {\"a\" * 32, \"b\" * 32, \"c\" * 32}\n assert Group.objects.get(id=new_group_info.group.id).title == \"foo\"\n\n # simulate split operation where all events of group are moved into a more specific hash\n GroupHash.objects.filter(group=group_info.group).delete()\n GroupHash.objects.create(project=default_project, hash=\"f\" * 32, group_id=group_info.group.id)\n\n new_group_info = fast_save(\"f\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group_info.group.id\n\n assert {g.hash for g in GroupHash.objects.filter(group=group_info.group)} == {\n # one hierarchical hash associated\n # no flat hashes associated when sorting into split group!\n \"f\"\n * 32,\n }\n\n assert Group.objects.get(id=new_group_info.group.id).title == \"bam\"\n\n new_group_info = fast_save(\"g\")\n assert new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id != group_info.group.id\n\n assert METHOD_NAME(new_group_info.group.id) == {\"c\" * 32}\n assert Group.objects.get(id=new_group_info.group.id).title == \"foo\"\n\n\n@django_db_all\ndef test_partial_move(default_project, fast_save):\n group_info = fast_save(\"f\")\n assert group_info.is_new\n assert not group_info.is_regression\n\n new_group_info = fast_save(\"g\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group_info.group.id\n\n assert METHOD_NAME(group_info.group.id) == {\"c\" * 32}\n\n # simulate split operation where event \"f\" of group is moved into a more specific hash\n group2 = Group.objects.create(project=default_project)\n f_hash = GroupHash.objects.create(project=default_project, hash=\"f\" * 32, group_id=group2.id)\n\n new_group_info = fast_save(\"f\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group2.id\n\n assert METHOD_NAME(new_group_info.group.id) == {\n # one hierarchical hash associated\n # no flat hashes associated when sorting into split group!\n \"f\"\n * 32,\n }\n\n new_group_info = fast_save(\"g\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group_info.group.id\n\n assert METHOD_NAME(new_group_info.group.id) == {\n \"c\" * 32,\n }\n\n f_hash.delete()\n\n new_group_info = fast_save(\"f\")\n assert not new_group_info.is_new\n assert not new_group_info.is_regression\n assert new_group_info.group.id == group_info.group.id"}}},{"rowIdx":2033,"cells":{"id":{"kind":"number","value":2033,"string":"2,033"},"label":{"kind":"string","value":"event"},"text":{"kind":"string","value":"#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see .\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# .\n#\nfrom datetime import datetime, time\nfrom zoneinfo import ZoneInfo\n\nimport pytest\nfrom django_scopes import scope\n\nfrom pretix.base.models import Event, Organizer\nfrom pretix.base.reldate import RelativeDate, RelativeDateWrapper\n\nTOKYO = ZoneInfo('Asia/Tokyo')\nBERLIN = ZoneInfo('Europe/Berlin')\n\n\n@pytest.fixture\ndef METHOD_NAME():\n o = Organizer.objects.create(name='Dummy', slug='dummy')\n METHOD_NAME = Event.objects.create(\n organizer=o, name='Dummy', slug='dummy',\n date_from=datetime(2017, 12, 27, 5, 0, 0, tzinfo=TOKYO),\n presale_start=datetime(2017, 12, 1, 5, 0, 0, tzinfo=TOKYO),\n plugins='pretix.plugins.banktransfer'\n\n )\n METHOD_NAME.settings.timezone = \"Asia/Tokyo\"\n return METHOD_NAME\n\n\n@pytest.mark.django_db\ndef test_absolute_date(METHOD_NAME):\n d = datetime(2017, 12, 25, 5, 0, 0, tzinfo=TOKYO)\n rdw = RelativeDateWrapper(d)\n assert rdw.datetime(METHOD_NAME) == d\n assert rdw.to_string() == d.isoformat()\n\n\n@pytest.mark.django_db\ndef test_relative_date_without_time(METHOD_NAME):\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None))\n assert rdw.datetime(METHOD_NAME).astimezone(TOKYO) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO)\n assert rdw.to_string() == 'RELDATE/1/-/date_from/'\n\n\n@pytest.mark.django_db\ndef test_relative_date_other_base_point(METHOD_NAME):\n with scope(organizer=METHOD_NAME.organizer):\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None))\n assert rdw.datetime(METHOD_NAME) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO)\n assert rdw.to_string() == 'RELDATE/1/-/presale_start/'\n\n # presale_end is unset, defaults to date_from\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None))\n assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO)\n assert rdw.to_string() == 'RELDATE/1/-/presale_end/'\n\n # subevent base\n se = METHOD_NAME.subevents.create(name=\"SE1\", date_from=datetime(2017, 11, 27, 5, 0, 0, tzinfo=TOKYO))\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None))\n assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO)\n\n # presale_start is unset on subevent, default to event\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None))\n assert rdw.datetime(se) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO)\n\n # presale_end is unset on all, default to date_from of subevent\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None))\n assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO)\n\n\n@pytest.mark.django_db\ndef test_relative_date_in_minutes(METHOD_NAME):\n rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60))\n assert rdw.to_string() == 'RELDATE/minutes/60/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 27, 4, 0, 0, tzinfo=TOKYO)\n\n\n@pytest.mark.django_db\ndef test_relative_date_with_time(METHOD_NAME):\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(8, 5, 13), base_date_name='date_from', minutes_before=None))\n assert rdw.to_string() == 'RELDATE/1/08:05:13/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 8, 5, 13, tzinfo=TOKYO)\n\n\n@pytest.mark.django_db\ndef test_relative_date_with_time_around_dst(METHOD_NAME):\n METHOD_NAME.settings.timezone = \"Europe/Berlin\"\n METHOD_NAME.date_from = datetime(2020, 3, 29, 18, 0, 0, tzinfo=BERLIN)\n\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None))\n assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 28, 18, 0, 0, tzinfo=BERLIN)\n\n rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None))\n assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 29, 2, 30, 0, tzinfo=BERLIN)\n\n METHOD_NAME.date_from = datetime(2020, 10, 25, 18, 0, 0, tzinfo=BERLIN)\n\n rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None))\n assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 24, 18, 0, 0, tzinfo=BERLIN)\n\n rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None))\n assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/'\n assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 25, 2, 30, 0, tzinfo=BERLIN)\n\n\ndef test_unserialize():\n d = datetime(2017, 12, 25, 10, 0, 0, tzinfo=TOKYO)\n rdw = RelativeDateWrapper.from_string(d.isoformat())\n assert rdw.data == d\n\n rdw = RelativeDateWrapper.from_string('RELDATE/1/-/date_from/')\n assert rdw.data == RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None)\n\n rdw = RelativeDateWrapper.from_string('RELDATE/1/18:05:13/date_from/')\n assert rdw.data == RelativeDate(days_before=1, time=time(18, 5, 13), base_date_name='date_from', minutes_before=None)\n\n rdw = RelativeDateWrapper.from_string('RELDATE/minutes/60/date_from/')\n assert rdw.data == RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60)"}}},{"rowIdx":2034,"cells":{"id":{"kind":"number","value":2034,"string":"2,034"},"label":{"kind":"string","value":"test set owner"},"text":{"kind":"string","value":"from django.test import TestCase\nfrom django.utils import timezone\n\nfrom ...categories.models import Category\nfrom ...users.test import create_test_user\nfrom ..models import Post, Thread, ThreadParticipant\nfrom ..participants import (\n add_participants,\n has_participants,\n make_participants_aware,\n set_owner,\n set_users_unread_private_threads_sync,\n)\n\n\nclass ParticipantsTests(TestCase):\n def setUp(self):\n datetime = timezone.now()\n\n self.category = Category.objects.all_categories()[:1][0]\n self.thread = Thread(\n category=self.category,\n started_on=datetime,\n starter_name=\"Tester\",\n starter_slug=\"tester\",\n last_post_on=datetime,\n last_poster_name=\"Tester\",\n last_poster_slug=\"tester\",\n )\n\n self.thread.set_title(\"Test thread\")\n self.thread.save()\n\n post = Post.objects.create(\n category=self.category,\n thread=self.thread,\n poster_name=\"Tester\",\n original=\"Hello! I am test message!\",\n parsed=\"

Hello! I am test message!

\",\n checksum=\"nope\",\n posted_on=datetime,\n updated_on=datetime,\n )\n\n self.thread.first_post = post\n self.thread.last_post = post\n self.thread.save()\n\n def test_has_participants(self):\n \"\"\"has_participants returns true if thread has participants\"\"\"\n users = [\n create_test_user(\"User\", \"user@example.com\"),\n create_test_user(\"Other_User\", \"otheruser@example.com\"),\n ]\n\n self.assertFalse(has_participants(self.thread))\n\n ThreadParticipant.objects.add_participants(self.thread, users)\n self.assertTrue(has_participants(self.thread))\n\n self.thread.threadparticipant_set.all().delete()\n self.assertFalse(has_participants(self.thread))\n\n def test_make_threads_participants_aware(self):\n \"\"\"\n make_participants_aware sets participants_list and participant\n annotations on list of threads\n \"\"\"\n user = create_test_user(\"User\", \"user@example.com\")\n other_user = create_test_user(\"Other_User\", \"otheruser@example.com\")\n\n self.assertFalse(hasattr(self.thread, \"participants_list\"))\n self.assertFalse(hasattr(self.thread, \"participant\"))\n\n make_participants_aware(user, [self.thread])\n\n self.assertFalse(hasattr(self.thread, \"participants_list\"))\n self.assertTrue(hasattr(self.thread, \"participant\"))\n self.assertIsNone(self.thread.participant)\n\n ThreadParticipant.objects.set_owner(self.thread, user)\n ThreadParticipant.objects.add_participants(self.thread, [other_user])\n\n make_participants_aware(user, [self.thread])\n\n self.assertFalse(hasattr(self.thread, \"participants_list\"))\n self.assertEqual(self.thread.participant.user, user)\n\n def test_make_thread_participants_aware(self):\n \"\"\"\n make_participants_aware sets participants_list and participant\n annotations on thread model\n \"\"\"\n user = create_test_user(\"User\", \"user@example.com\")\n other_user = create_test_user(\"Other_User\", \"otheruser@example.com\")\n\n self.assertFalse(hasattr(self.thread, \"participants_list\"))\n self.assertFalse(hasattr(self.thread, \"participant\"))\n\n make_participants_aware(user, self.thread)\n\n self.assertTrue(hasattr(self.thread, \"participants_list\"))\n self.assertTrue(hasattr(self.thread, \"participant\"))\n\n self.assertEqual(self.thread.participants_list, [])\n self.assertIsNone(self.thread.participant)\n\n ThreadParticipant.objects.set_owner(self.thread, user)\n ThreadParticipant.objects.add_participants(self.thread, [other_user])\n\n make_participants_aware(user, self.thread)\n\n self.assertEqual(self.thread.participant.user, user)\n for participant in self.thread.participants_list:\n if participant.user == user:\n break\n else:\n self.fail(\"thread.participants_list didn't contain user\")\n\n def METHOD_NAME(self):\n \"\"\"set_owner sets user as thread owner\"\"\"\n user = create_test_user(\"User\", \"user@example.com\")\n\n set_owner(self.thread, user)\n\n owner = self.thread.threadparticipant_set.get(is_owner=True)\n self.assertEqual(user, owner.user)\n\n def test_set_users_unread_private_threads_sync(self):\n \"\"\"\n set_users_unread_private_threads_sync sets sync_unread_private_threads\n flag on users provided to true\n \"\"\"\n users = [\n create_test_user(\"User\", \"user@example.com\"),\n create_test_user(\"Other_User\", \"otheruser@example.com\"),\n ]\n\n set_users_unread_private_threads_sync(users=users)\n for user in users:\n user.refresh_from_db()\n assert user.sync_unread_private_threads\n\n def test_set_participants_unread_private_threads_sync(self):\n \"\"\"\n set_users_unread_private_threads_sync sets sync_unread_private_threads\n flag on participants provided to true\n \"\"\"\n users = [\n create_test_user(\"User\", \"user@example.com\"),\n create_test_user(\"Other_User\", \"otheruser@example.com\"),\n ]\n\n participants = [ThreadParticipant(user=u) for u in users]\n\n set_users_unread_private_threads_sync(participants=participants)\n for user in users:\n user.refresh_from_db()\n assert user.sync_unread_private_threads\n\n def test_set_participants_users_unread_private_threads_sync(self):\n \"\"\"\n set_users_unread_private_threads_sync sets sync_unread_private_threads\n flag on users and participants provided to true\n \"\"\"\n users = [create_test_user(\"User\", \"user@example.com\")]\n participants = [ThreadParticipant(user=u) for u in users]\n users.append(create_test_user(\"Other_User\", \"otheruser@example.com\"))\n\n set_users_unread_private_threads_sync(users=users, participants=participants)\n for user in users:\n user.refresh_from_db()\n assert user.sync_unread_private_threads\n\n def test_set_users_unread_private_threads_sync_exclude_user(self):\n \"\"\"exclude_user kwarg works\"\"\"\n users = [\n create_test_user(\"User\", \"user@example.com\"),\n create_test_user(\"Other_User\", \"otheruser@example.com\"),\n ]\n\n set_users_unread_private_threads_sync(users=users, exclude_user=users[0])\n\n [i.refresh_from_db() for i in users]\n assert users[0].sync_unread_private_threads is False\n assert users[1].sync_unread_private_threads\n\n def test_set_users_unread_private_threads_sync_noop(self):\n \"\"\"excluding only user is noop\"\"\"\n user = create_test_user(\"User\", \"user@example.com\")\n\n with self.assertNumQueries(0):\n set_users_unread_private_threads_sync(users=[user], exclude_user=user)\n\n user.refresh_from_db()\n assert user.sync_unread_private_threads is False\n\n\ndef test_add_participants_triggers_notify_on_new_private_thread(\n mocker, user, other_user, private_thread\n):\n notify_on_new_private_thread_mock = mocker.patch(\n \"misago.threads.participants.notify_on_new_private_thread\"\n )\n\n add_participants(user, private_thread, [user, other_user])\n\n notify_on_new_private_thread_mock.delay.assert_called_once_with(\n user.id, private_thread.id, [other_user.id]\n )"}}},{"rowIdx":2035,"cells":{"id":{"kind":"number","value":2035,"string":"2,035"},"label":{"kind":"string","value":"delete host"},"text":{"kind":"string","value":"\"\"\"\nSupport for RFC 2136 dynamic DNS updates.\n\n:depends: - dnspython Python module\n:configuration: If you want to use TSIG authentication for the server, there\n are a couple of optional configuration parameters made available to\n support this (the keyname is only needed if the keyring contains more\n than one key)::\n\n keyfile: keyring file (default=None)\n keyname: key name in file (default=None)\n keyalgorithm: algorithm used to create the key\n (default='HMAC-MD5.SIG-ALG.REG.INT').\n Other possible values: hmac-sha1, hmac-sha224, hmac-sha256,\n hmac-sha384, hmac-sha512\n\n\n The keyring file needs to be in json format and the key name needs to end\n with an extra period in the file, similar to this:\n\n .. code-block:: json\n\n {\"keyname.\": \"keycontent\"}\n\"\"\"\n\nimport logging\n\nimport salt.utils.files\nimport salt.utils.json\n\nlog = logging.getLogger(__name__)\n\ntry:\n import dns.query\n import dns.tsigkeyring # pylint: disable=no-name-in-module\n import dns.update # pylint: disable=no-name-in-module\n\n dns_support = True\nexcept ImportError as e:\n dns_support = False\n\n\ndef __virtual__():\n \"\"\"\n Confirm dnspython is available.\n \"\"\"\n if dns_support:\n return \"ddns\"\n return (\n False,\n \"The ddns execution module cannot be loaded: dnspython not installed.\",\n )\n\n\ndef _config(name, key=None, **kwargs):\n \"\"\"\n Return a value for 'name' from command line args then config file options.\n Specify 'key' if the config file option is not the same as 'name'.\n \"\"\"\n if key is None:\n key = name\n if name in kwargs:\n value = kwargs[name]\n else:\n value = __salt__[\"config.option\"](\"ddns.{}\".format(key))\n if not value:\n value = None\n return value\n\n\ndef _get_keyring(keyfile):\n keyring = None\n if keyfile:\n with salt.utils.files.fopen(keyfile) as _f:\n keyring = dns.tsigkeyring.from_text(salt.utils.json.load(_f))\n return keyring\n\n\ndef add_host(\n zone,\n name,\n ttl,\n ip,\n nameserver=\"127.0.0.1\",\n replace=True,\n timeout=5,\n port=53,\n **kwargs\n):\n \"\"\"\n Add, replace, or update the A and PTR (reverse) records for a host.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.add_host example.com host1 60 10.1.1.1\n \"\"\"\n res = update(zone, name, ttl, \"A\", ip, nameserver, timeout, replace, port, **kwargs)\n if res is False:\n return False\n\n fqdn = \"{}.{}.\".format(name, zone)\n parts = ip.split(\".\")[::-1]\n popped = []\n\n # Iterate over possible reverse zones\n while len(parts) > 1:\n p = parts.pop(0)\n popped.append(p)\n zone = \"{}.{}\".format(\".\".join(parts), \"in-addr.arpa.\")\n name = \".\".join(popped)\n ptr = update(\n zone, name, ttl, \"PTR\", fqdn, nameserver, timeout, replace, port, **kwargs\n )\n if ptr:\n return True\n return res\n\n\ndef METHOD_NAME(zone, name, nameserver=\"127.0.0.1\", timeout=5, port=53, **kwargs):\n \"\"\"\n Delete the forward and reverse records for a host.\n\n Returns true if any records are deleted.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.delete_host example.com host1\n \"\"\"\n fqdn = \"{}.{}\".format(name, zone)\n request = dns.message.make_query(fqdn, \"A\")\n answer = dns.query.udp(request, nameserver, timeout, port)\n try:\n ips = [i.address for i in answer.answer[0].items]\n except IndexError:\n ips = []\n\n res = delete(\n zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs\n )\n\n fqdn = fqdn + \".\"\n for ip in ips:\n parts = ip.split(\".\")[::-1]\n popped = []\n\n # Iterate over possible reverse zones\n while len(parts) > 1:\n p = parts.pop(0)\n popped.append(p)\n zone = \"{}.{}\".format(\".\".join(parts), \"in-addr.arpa.\")\n name = \".\".join(popped)\n ptr = delete(\n zone,\n name,\n \"PTR\",\n fqdn,\n nameserver=nameserver,\n timeout=timeout,\n port=port,\n **kwargs\n )\n if ptr:\n res = True\n return res\n\n\ndef update(\n zone,\n name,\n ttl,\n rdtype,\n data,\n nameserver=\"127.0.0.1\",\n timeout=5,\n replace=False,\n port=53,\n **kwargs\n):\n \"\"\"\n Add, replace, or update a DNS record.\n nameserver must be an IP address and the minion running this module\n must have update privileges on that server.\n If replace is true, first deletes all records for this name and type.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.update example.com host1 60 A 10.0.0.1\n \"\"\"\n name = str(name)\n\n if name[-1:] == \".\":\n fqdn = name\n else:\n fqdn = \"{}.{}\".format(name, zone)\n\n request = dns.message.make_query(fqdn, rdtype)\n answer = dns.query.udp(request, nameserver, timeout, port)\n\n rdtype = dns.rdatatype.from_text(rdtype)\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n\n keyring = _get_keyring(_config(\"keyfile\", **kwargs))\n keyname = _config(\"keyname\", **kwargs)\n keyalgorithm = _config(\"keyalgorithm\", **kwargs) or \"HMAC-MD5.SIG-ALG.REG.INT\"\n\n is_exist = False\n for rrset in answer.answer:\n if rdata in rrset.items:\n if ttl == rrset.ttl:\n if len(answer.answer) >= 1 or len(rrset.items) >= 1:\n is_exist = True\n break\n\n dns_update = dns.update.Update(\n zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm\n )\n if replace:\n dns_update.replace(name, ttl, rdata)\n elif not is_exist:\n dns_update.add(name, ttl, rdata)\n else:\n return None\n answer = dns.query.udp(dns_update, nameserver, timeout, port)\n if answer.rcode() > 0:\n return False\n return True\n\n\ndef delete(\n zone,\n name,\n rdtype=None,\n data=None,\n nameserver=\"127.0.0.1\",\n timeout=5,\n port=53,\n **kwargs\n):\n \"\"\"\n Delete a DNS record.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.delete example.com host1 A\n \"\"\"\n name = str(name)\n\n if name[-1:] == \".\":\n fqdn = name\n else:\n fqdn = \"{}.{}\".format(name, zone)\n\n request = dns.message.make_query(fqdn, (rdtype or \"ANY\"))\n answer = dns.query.udp(request, nameserver, timeout, port)\n if not answer.answer:\n return None\n\n keyring = _get_keyring(_config(\"keyfile\", **kwargs))\n keyname = _config(\"keyname\", **kwargs)\n keyalgorithm = _config(\"keyalgorithm\", **kwargs) or \"HMAC-MD5.SIG-ALG.REG.INT\"\n\n dns_update = dns.update.Update(\n zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm\n )\n\n if rdtype:\n rdtype = dns.rdatatype.from_text(rdtype)\n if data:\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n dns_update.delete(name, rdata)\n else:\n dns_update.delete(name, rdtype)\n else:\n dns_update.delete(name)\n\n answer = dns.query.udp(dns_update, nameserver, timeout, port)\n if answer.rcode() > 0:\n return False\n return True"}}},{"rowIdx":2036,"cells":{"id":{"kind":"number","value":2036,"string":"2,036"},"label":{"kind":"string","value":"repeats cmp"},"text":{"kind":"string","value":"\"\"\"\nComparison utilities for STIX pattern observation expressions.\n\"\"\"\nfrom stix2.equivalence.pattern.compare import generic_cmp, iter_lex_cmp\nfrom stix2.equivalence.pattern.compare.comparison import (\n comparison_expression_cmp, generic_constant_cmp,\n)\nfrom stix2.patterns import (\n AndObservationExpression, FollowedByObservationExpression,\n ObservationExpression, OrObservationExpression,\n QualifiedObservationExpression, RepeatQualifier, StartStopQualifier,\n WithinQualifier, _CompoundObservationExpression,\n)\n\n_OBSERVATION_EXPRESSION_TYPE_ORDER = (\n ObservationExpression, AndObservationExpression, OrObservationExpression,\n FollowedByObservationExpression, QualifiedObservationExpression,\n)\n\n\n_QUALIFIER_TYPE_ORDER = (\n RepeatQualifier, WithinQualifier, StartStopQualifier,\n)\n\n\ndef METHOD_NAME(qual1, qual2):\n \"\"\"\n Compare REPEATS qualifiers. This orders by repeat count.\n \"\"\"\n return generic_constant_cmp(qual1.times_to_repeat, qual2.times_to_repeat)\n\n\ndef within_cmp(qual1, qual2):\n \"\"\"\n Compare WITHIN qualifiers. This orders by number of seconds.\n \"\"\"\n return generic_constant_cmp(\n qual1.number_of_seconds, qual2.number_of_seconds,\n )\n\n\ndef startstop_cmp(qual1, qual2):\n \"\"\"\n Compare START/STOP qualifiers. This lexicographically orders by start time,\n then stop time.\n \"\"\"\n return iter_lex_cmp(\n (qual1.start_time, qual1.stop_time),\n (qual2.start_time, qual2.stop_time),\n generic_constant_cmp,\n )\n\n\n_QUALIFIER_COMPARATORS = {\n RepeatQualifier: METHOD_NAME,\n WithinQualifier: within_cmp,\n StartStopQualifier: startstop_cmp,\n}\n\n\ndef observation_expression_cmp(expr1, expr2):\n \"\"\"\n Compare two observation expression ASTs. This is sensitive to the order of\n the expressions' sub-components. To achieve an order-insensitive\n comparison, the sub-component ASTs must be ordered first.\n\n Args:\n expr1: The first observation expression\n expr2: The second observation expression\n\n Returns:\n <0, 0, or >0 depending on whether the first arg is less, equal or\n greater than the second\n \"\"\"\n type1 = type(expr1)\n type2 = type(expr2)\n\n type1_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type1)\n type2_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type2)\n\n if type1_idx != type2_idx:\n result = generic_cmp(type1_idx, type2_idx)\n\n # else, both exprs are of same type.\n\n # If they're simple, use contained comparison expression order\n elif type1 is ObservationExpression:\n result = comparison_expression_cmp(\n expr1.operand, expr2.operand,\n )\n\n elif isinstance(expr1, _CompoundObservationExpression):\n # Both compound, and of same type (and/or/followedby): sort according\n # to contents.\n result = iter_lex_cmp(\n expr1.operands, expr2.operands, observation_expression_cmp,\n )\n\n else: # QualifiedObservationExpression\n # Both qualified. Check qualifiers first; if they are the same,\n # use order of the qualified expressions.\n qual1_type = type(expr1.qualifier)\n qual2_type = type(expr2.qualifier)\n\n qual1_type_idx = _QUALIFIER_TYPE_ORDER.index(qual1_type)\n qual2_type_idx = _QUALIFIER_TYPE_ORDER.index(qual2_type)\n\n result = generic_cmp(qual1_type_idx, qual2_type_idx)\n\n if result == 0:\n # Same qualifier type; compare qualifier details\n qual_cmp = _QUALIFIER_COMPARATORS.get(qual1_type)\n if qual_cmp:\n result = qual_cmp(expr1.qualifier, expr2.qualifier)\n else:\n raise TypeError(\n \"Can't compare qualifier type: \" + qual1_type.__name__,\n )\n\n if result == 0:\n # Same qualifier type and details; use qualified expression order\n result = observation_expression_cmp(\n expr1.observation_expression, expr2.observation_expression,\n )\n\n return result"}}},{"rowIdx":2037,"cells":{"id":{"kind":"number","value":2037,"string":"2,037"},"label":{"kind":"string","value":"function returning generator"},"text":{"kind":"string","value":"# pylint: disable=too-few-public-methods,import-error, missing-docstring\n# pylint: disable=useless-super-delegation,wrong-import-position,invalid-name, wrong-import-order, condition-evals-to-constant\n\nif len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nif not len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nz = []\nif z and len(['T', 'E', 'S', 'T']): # [use-implicit-booleaness-not-len]\n pass\n\nif True or len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nif len('TEST') == 0: # Should be fine\n pass\n\nif len('TEST') < 1: # Should be fine\n pass\n\nif len('TEST') <= 0: # Should be fine\n pass\n\nif 1 > len('TEST'): # Should be fine\n pass\n\nif 0 >= len('TEST'): # Should be fine\n pass\n\nif z and len('TEST') == 0: # Should be fine\n pass\n\nif 0 == len('TEST') < 10: # Should be fine\n pass\n\n# Should be fine\nif 0 < 1 <= len('TEST') < 10: # [comparison-of-constants]\n pass\n\nif 10 > len('TEST') != 0: # Should be fine\n pass\n\nif 10 > len('TEST') > 1 > 0: # Should be fine\n pass\n\nif 0 <= len('TEST') < 100: # Should be fine\n pass\n\nif z or 10 > len('TEST') != 0: # Should be fine\n pass\n\nif z:\n pass\nelif len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nif z:\n pass\nelif not len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nwhile len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nwhile not len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nwhile z and len('TEST'): # [use-implicit-booleaness-not-len]\n pass\n\nwhile not len('TEST') and z: # [use-implicit-booleaness-not-len]\n pass\n\nassert len('TEST') > 0 # Should be fine\n\nx = 1 if len('TEST') != 0 else 2 # Should be fine\n\nf_o_o = len('TEST') or 42 # Should be fine\n\na = x and len(x) # Should be fine\n\ndef some_func():\n return len('TEST') > 0 # Should be fine\n\ndef github_issue_1325():\n l = [1, 2, 3]\n length = len(l) if l else 0 # Should be fine\n return length\n\ndef github_issue_1331(*args):\n assert False, len(args) # Should be fine\n\ndef github_issue_1331_v2(*args):\n assert len(args), args # [use-implicit-booleaness-not-len]\n\ndef github_issue_1331_v3(*args):\n assert len(args) or z, args # [use-implicit-booleaness-not-len]\n\ndef github_issue_1331_v4(*args):\n assert z and len(args), args # [use-implicit-booleaness-not-len]\n\nb = bool(len(z)) # [use-implicit-booleaness-not-len]\nc = bool(len('TEST') or 42) # [use-implicit-booleaness-not-len]\n\ndef github_issue_1879():\n\n class ClassWithBool(list):\n def __bool__(self):\n return True\n\n class ClassWithoutBool(list):\n pass\n\n class ChildClassWithBool(ClassWithBool):\n pass\n\n class ChildClassWithoutBool(ClassWithoutBool):\n pass\n\n assert len(ClassWithBool())\n assert len(ChildClassWithBool())\n assert len(ClassWithoutBool()) # [use-implicit-booleaness-not-len]\n assert len(ChildClassWithoutBool()) # [use-implicit-booleaness-not-len]\n assert len(range(0)) # [use-implicit-booleaness-not-len]\n assert len([t + 1 for t in []]) # [use-implicit-booleaness-not-len]\n assert len(u + 1 for u in []) # [use-implicit-booleaness-not-len]\n assert len({\"1\":(v + 1) for v in {}}) # [use-implicit-booleaness-not-len]\n assert len(set((w + 1) for w in set())) # [use-implicit-booleaness-not-len]\n\n # pylint: disable=import-outside-toplevel\n import numpy\n numpy_array = numpy.array([0])\n if len(numpy_array) > 0:\n print('numpy_array')\n if len(numpy_array):\n print('numpy_array')\n if numpy_array:\n print('b')\n\n import pandas as pd\n pandas_df = pd.DataFrame()\n if len(pandas_df):\n print(\"this works, but pylint tells me not to use len() without comparison\")\n if len(pandas_df) > 0:\n print(\"this works and pylint likes it, but it's not the solution intended by PEP-8\")\n if pandas_df:\n print(\"this does not work (truth value of dataframe is ambiguous)\")\n\n def function_returning_list(r):\n if r==1:\n return [1]\n return [2]\n\n def function_returning_int(r):\n if r==1:\n return 1\n return 2\n\n def METHOD_NAME(r):\n for i in [r, 1, 2, 3]:\n yield i\n\n def function_returning_comprehension(r):\n return [x+1 for x in [r, 1, 2, 3]]\n\n def function_returning_function(r):\n return METHOD_NAME(r)\n\n assert len(function_returning_list(z)) # [use-implicit-booleaness-not-len]\n assert len(function_returning_int(z))\n # This should raise a use-implicit-booleaness-not-len once astroid can infer it\n # See https://github.com/pylint-dev/pylint/pull/3821#issuecomment-743771514\n assert len(METHOD_NAME(z))\n assert len(function_returning_comprehension(z))\n assert len(function_returning_function(z))\n\n\ndef github_issue_4215():\n # Test undefined variables\n # https://github.com/pylint-dev/pylint/issues/4215\n if len(undefined_var): # [undefined-variable]\n pass\n if len(undefined_var2[0]): # [undefined-variable]\n pass\n\n# pylint: disable=len-as-condition\n\nif len('TEST'):\n pass"}}},{"rowIdx":2038,"cells":{"id":{"kind":"number","value":2038,"string":"2,038"},"label":{"kind":"string","value":"parse datetime"},"text":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2022 Satpy developers\n#\n# This file is part of satpy.\n#\n# satpy is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n#\n# satpy is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# satpy. If not, see .\n\n\"\"\"Reader for files produced by ESA's Ocean Color CCI project.\n\nThis reader currently supports the lat/lon gridded products and does not yet support the\nproducts on a sinusoidal grid. The products on each of the composite periods (1, 5 and 8 day plus monthly)\nare supported and both the merged product files (OC_PRODUCTS) and single product (RRS, CHLOR_A, IOP, K_490) are\nsupported.\n\"\"\"\nimport logging\nfrom datetime import datetime\n\nimport dask.array as da\nimport numpy as np\nfrom pyresample import geometry\n\nfrom satpy.readers.netcdf_utils import NetCDF4FileHandler\n\nlogger = logging.getLogger(__name__)\n\n\nclass OCCCIFileHandler(NetCDF4FileHandler):\n \"\"\"File handler for Ocean Color CCI netCDF files.\"\"\"\n\n @staticmethod\n def METHOD_NAME(datestr):\n \"\"\"Parse datetime.\"\"\"\n return datetime.strptime(datestr, \"%Y%m%d%H%MZ\")\n\n @property\n def start_time(self):\n \"\"\"Get the start time.\"\"\"\n return self.METHOD_NAME(self['/attr/time_coverage_start'])\n\n @property\n def end_time(self):\n \"\"\"Get the end time.\"\"\"\n return self.METHOD_NAME(self['/attr/time_coverage_end'])\n\n @property\n def composite_period(self):\n \"\"\"Determine composite period from filename information.\"\"\"\n comp1 = self.filename_info['composite_period_1']\n comp2 = self.filename_info['composite_period_2']\n if comp2 == 'MONTHLY' and comp1 == \"1M\":\n return 'monthly'\n elif comp1 == '1D':\n return 'daily'\n elif comp1 == '5D':\n return '5-day'\n elif comp1 == '8D':\n return '8-day'\n else:\n raise ValueError(f\"Unknown data compositing period: {comp1}_{comp2}\")\n\n def _update_attrs(self, dataset, dataset_info):\n \"\"\"Update dataset attributes.\"\"\"\n dataset.attrs.update(self[dataset_info['nc_key']].attrs)\n dataset.attrs.update(dataset_info)\n dataset.attrs['sensor'] = 'merged'\n dataset.attrs['composite_period'] = self.composite_period\n # remove attributes from original file which don't apply anymore\n dataset.attrs.pop(\"nc_key\")\n\n def get_dataset(self, dataset_id, ds_info):\n \"\"\"Get dataset.\"\"\"\n dataset = da.squeeze(self[ds_info['nc_key']])\n if '_FillValue' in dataset.attrs:\n dataset.data = da.where(dataset.data == dataset.attrs['_FillValue'], np.nan, dataset.data)\n self._update_attrs(dataset, ds_info)\n if 'lat' in dataset.dims:\n dataset = dataset.rename({'lat': 'y'})\n if 'lon' in dataset.dims:\n dataset = dataset.rename({'lon': 'x'})\n return dataset\n\n def get_area_def(self, dsid):\n \"\"\"Get the area definition based on information in file.\n\n There is no area definition in the file itself, so we have to compute it\n from the metadata, which specifies the area extent and pixel resolution.\n \"\"\"\n proj_param = 'EPSG:4326'\n\n lon_res = float(self['/attr/geospatial_lon_resolution'])\n lat_res = float(self['/attr/geospatial_lat_resolution'])\n\n min_lon = self['/attr/geospatial_lon_min']\n max_lon = self['/attr/geospatial_lon_max']\n min_lat = self['/attr/geospatial_lat_min']\n max_lat = self['/attr/geospatial_lat_max']\n\n area_extent = (min_lon, min_lat, max_lon, max_lat)\n lon_size = np.round((max_lon - min_lon) / lon_res).astype(int)\n lat_size = np.round((max_lat - min_lat) / lat_res).astype(int)\n\n area = geometry.AreaDefinition('gridded_occci',\n 'Full globe gridded area',\n 'longlat',\n proj_param,\n lon_size,\n lat_size,\n area_extent)\n return area"}}},{"rowIdx":2039,"cells":{"id":{"kind":"number","value":2039,"string":"2,039"},"label":{"kind":"string","value":"handle"},"text":{"kind":"string","value":"\"\"\"\nThis command:\n\n * deletes all prescribing data (both original data and extracts created by the matrixstore build) from:\n * the filesystem\n * BigQuery\n * Cloud Storage\n * resets the import pipeline so that the import may be re-run with correct data\n\"\"\"\n\nimport json\nimport os\n\nimport networkx as nx\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\nfrom frontend.models import ImportLog\nfrom gcutils.bigquery import Client as BQClient\nfrom gcutils.bigquery import NotFound\nfrom gcutils.storage import Client as StorageClient\nfrom pipeline.models import TaskLog\nfrom pipeline.runner import dump_import_records, load_import_records\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"year\")\n parser.add_argument(\"month\")\n\n def METHOD_NAME(self, year, month, **kwargs):\n verify_year_month(year, month)\n delete_import_record(year, month)\n mark_task_logs_as_failed(year, month)\n delete_fetch_and_import_task_log(year, month)\n delete_import_logs(year, month)\n delete_prescribing_file_on_filesystem(year, month)\n delete_prescribing_file_in_storage(year, month)\n delete_temporary_prescribing_bq_table(year, month)\n remove_records_from_bq_table(year, month)\n delete_backup_from_storage(year, month)\n delete_matrixstore_bq_table(year, month)\n delete_matrixstore_storage_files(year, month)\n delete_matrixstore_download(year, month)\n\n\ndef verify_year_month(year, month):\n print(\"verify_year_month\")\n log = ImportLog.objects.latest_in_category(\"prescribing\")\n assert log.current_at.year == year\n assert log.current_at.month == int(month)\n\n\ndef delete_import_record(year, month):\n print(\"delete_import_record\")\n import_records = load_import_records()\n logs = import_records[\"prescribing\"]\n new_logs = [\n r for r in logs if f\"prescribing_v2/{year}_{month}\" not in r[\"imported_file\"]\n ]\n assert len(logs) == len(new_logs) + 1\n import_records[\"prescribing\"] = new_logs\n dump_import_records(import_records)\n\n\ndef mark_task_logs_as_failed(year, month):\n print(\"mark_task_logs_as_failed\")\n with open(settings.PIPELINE_METADATA_DIR + \"/tasks.json\") as f:\n tasks = json.load(f)\n\n graph = nx.DiGraph()\n for task_name, task_def in tasks.items():\n for dependency_name in task_def.get(\"dependencies\", []):\n graph.add_edge(dependency_name, task_name)\n\n convert_task_log = TaskLog.objects.get(\n task_name=\"convert_hscic_prescribing\",\n year=year,\n month=month,\n status=TaskLog.SUCCESSFUL,\n )\n\n for task_name in nx.descendants(graph, \"convert_hscic_prescribing\"):\n task_log = TaskLog.objects.get(\n task_name=task_name, year=year, month=month, status=TaskLog.SUCCESSFUL\n )\n assert task_log.started_at > convert_task_log.started_at\n task_log.status = TaskLog.FAILED\n task_log.save()\n\n convert_task_log.status = TaskLog.FAILED\n convert_task_log.save()\n\n\ndef delete_fetch_and_import_task_log(year, month):\n print(\"delete_fetch_and_import_task_log\")\n TaskLog.objects.get(task_name=\"fetch_and_import\", year=year, month=month).delete()\n\n\ndef delete_import_logs(year, month):\n print(\"delete_import_logs\")\n ImportLog.objects.get(\n category=\"prescribing\", current_at=f\"{year}-{month}-01\"\n ).delete()\n ImportLog.objects.get(\n category=\"dashboard_data\", current_at=f\"{year}-{month}-01\"\n ).delete()\n\n\ndef delete_prescribing_file_on_filesystem(year, month):\n print(\"delete_prescribing_file_on_filesystem\")\n path = os.path.join(\n settings.PIPELINE_DATA_BASEDIR,\n \"prescribing_v2\",\n f\"{year}_{month}\",\n f\"epd_{year}{month}.csv\",\n )\n os.remove(path)\n\n\ndef delete_prescribing_file_in_storage(year, month):\n print(\"delete_prescribing_file_in_storage\")\n _delete_file_from_storage(\"hscic/prescribing_v2/2021_10\")\n\n\ndef delete_temporary_prescribing_bq_table(year, month):\n print(\"delete_temporary_prescribing_bq_table\")\n try:\n _delete_table_from_bq(\"tmp_eu\", f\"raw_prescribing_data_{year}_{month}\")\n except NotFound:\n # This is ok, as the table might already have been deleted\n pass\n\n\ndef remove_records_from_bq_table(year, month):\n print(\"remove_records_from_bq_table\")\n client = BQClient(\"hscic\")\n sql = (\n f\"DELETE FROM ebmdatalab.hscic.prescribing_v2 WHERE month = '{year}-{month}-01'\"\n )\n client.query(sql)\n\n\ndef delete_backup_from_storage(year, month):\n print(\"delete_backup_from_storage\")\n _delete_file_from_storage(\"backup/prescribing_v2/2021_10\")\n\n\ndef delete_matrixstore_bq_table(year, month):\n print(\"delete_matrixstore_bq_table\")\n _delete_table_from_bq(\"prescribing_export\", f\"prescribing_{year}_{month}\")\n\n\ndef delete_matrixstore_storage_files(year, month):\n print(\"delete_matrixstore_storage_files\")\n _delete_file_from_storage(f\"prescribing_exports/prescribing_{year}_{month}_*\")\n\n\ndef delete_matrixstore_download(year, month):\n print(\"delete_matrixstore_download\")\n path = os.path.join(\n settings.PIPELINE_DATA_BASEDIR,\n \"matrixstore_import\",\n f\"{year}-{month}-01_prescribing.csv.gz\",\n )\n os.remove(path)\n\n\ndef _delete_file_from_storage(path):\n client = StorageClient()\n bucket = client.get_bucket()\n for blob in bucket.list_blobs(prefix=path):\n blob.delete()\n\n\ndef _delete_table_from_bq(dataset_name, table_name):\n client = BQClient(dataset_name)\n client.delete_table(table_name)"}}},{"rowIdx":2040,"cells":{"id":{"kind":"number","value":2040,"string":"2,040"},"label":{"kind":"string","value":"test run specs"},"text":{"kind":"string","value":"from unittest import mock\n\nimport pytest\n\nimport string\nimport dbt.exceptions\nimport dbt.graph.selector as graph_selector\nimport dbt.graph.cli as graph_cli\nfrom dbt.node_types import NodeType\n\nimport networkx as nx\n\nfrom dbt import flags\n\nfrom argparse import Namespace\nfrom dbt.contracts.project import UserConfig\n\nflags.set_from_args(Namespace(), UserConfig())\n\n\ndef _get_graph():\n integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())\n\n package_mapping = {\n i: \"m.\" + (\"X\" if i % 2 == 0 else \"Y\") + \".\" + letter\n for (i, letter) in enumerate(string.ascii_lowercase)\n }\n\n # Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]\n return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))\n\n\ndef _get_manifest(graph):\n nodes = {}\n for unique_id in graph:\n fqn = unique_id.split(\".\")\n node = mock.MagicMock(\n unique_id=unique_id,\n fqn=fqn,\n package_name=fqn[0],\n tags=[],\n resource_type=NodeType.Model,\n empty=False,\n config=mock.MagicMock(enabled=True),\n is_versioned=False,\n )\n nodes[unique_id] = node\n\n nodes[\"m.X.a\"].tags = [\"abc\"]\n nodes[\"m.Y.b\"].tags = [\"abc\", \"bcef\"]\n nodes[\"m.X.c\"].tags = [\"abc\", \"bcef\"]\n nodes[\"m.Y.d\"].tags = []\n nodes[\"m.X.e\"].tags = [\"efg\", \"bcef\"]\n nodes[\"m.Y.f\"].tags = [\"efg\", \"bcef\"]\n nodes[\"m.X.g\"].tags = [\"efg\"]\n return mock.MagicMock(nodes=nodes)\n\n\n@pytest.fixture\ndef graph():\n return graph_selector.Graph(_get_graph())\n\n\n@pytest.fixture\ndef manifest(graph):\n return _get_manifest(graph)\n\n\ndef id_macro(arg):\n if isinstance(arg, str):\n return arg\n try:\n return \"_\".join(arg)\n except TypeError:\n return arg\n\n\nrun_specs = [\n # include by fqn\n ([\"X.a\"], [], {\"m.X.a\"}),\n # include by tag\n ([\"tag:abc\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\"}),\n # exclude by tag\n ([\"*\"], [\"tag:abc\"], {\"m.Y.d\", \"m.X.e\", \"m.Y.f\", \"m.X.g\"}),\n # tag + fqn\n ([\"tag:abc\", \"a\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\"}),\n ([\"tag:abc\", \"d\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.Y.d\"}),\n # multiple node selection across packages\n ([\"X.a\", \"b\"], [], {\"m.X.a\", \"m.Y.b\"}),\n ([\"X.a+\"], [\"b\"], {\"m.X.a\", \"m.X.c\", \"m.Y.d\", \"m.X.e\", \"m.Y.f\", \"m.X.g\"}),\n # children\n ([\"X.c+\"], [], {\"m.X.c\", \"m.Y.f\", \"m.X.g\"}),\n ([\"X.a+1\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\"}),\n ([\"X.a+\"], [\"tag:efg\"], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.Y.d\"}),\n # parents\n ([\"+Y.f\"], [], {\"m.X.c\", \"m.Y.f\", \"m.X.a\"}),\n ([\"1+Y.f\"], [], {\"m.X.c\", \"m.Y.f\"}),\n # childrens parents\n ([\"@X.c\"], [], {\"m.X.a\", \"m.X.c\", \"m.Y.f\", \"m.X.g\"}),\n # multiple selection/exclusion\n ([\"tag:abc\", \"tag:bcef\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.X.e\", \"m.Y.f\"}),\n ([\"tag:abc\", \"tag:bcef\"], [\"tag:efg\"], {\"m.X.a\", \"m.Y.b\", \"m.X.c\"}),\n ([\"tag:abc\", \"tag:bcef\"], [\"tag:efg\", \"a\"], {\"m.Y.b\", \"m.X.c\"}),\n # intersections\n ([\"a,a\"], [], {\"m.X.a\"}),\n ([\"+c,c+\"], [], {\"m.X.c\"}),\n ([\"a,b\"], [], set()),\n ([\"tag:abc,tag:bcef\"], [], {\"m.Y.b\", \"m.X.c\"}),\n ([\"*,tag:abc,a\"], [], {\"m.X.a\"}),\n ([\"a,tag:abc,*\"], [], {\"m.X.a\"}),\n ([\"tag:abc,tag:bcef\"], [\"c\"], {\"m.Y.b\"}),\n ([\"tag:bcef,tag:efg\"], [\"tag:bcef,@b\"], {\"m.Y.f\"}),\n ([\"tag:bcef,tag:efg\"], [\"tag:bcef,@a\"], set()),\n ([\"*,@a,+b\"], [\"*,tag:abc,tag:bcef\"], {\"m.X.a\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.X.e\", \"m.Y.f\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [\"e\"], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.Y.f\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [\"e\"], {\"m.X.a\", \"m.Y.b\", \"m.X.c\", \"m.Y.f\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [\"e\", \"f\"], {\"m.X.a\", \"m.Y.b\", \"m.X.c\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [\"tag:abc,tag:bcef\"], {\"m.X.a\", \"m.X.e\", \"m.Y.f\"}),\n ([\"tag:bcef,tag:efg\", \"*,tag:abc\"], [\"tag:abc,tag:bcef\", \"tag:abc,a\"], {\"m.X.e\", \"m.Y.f\"}),\n]\n\n\n@pytest.mark.parametrize(\"include,exclude,expected\", run_specs, ids=id_macro)\ndef METHOD_NAME(include, exclude, expected):\n graph = _get_graph()\n manifest = _get_manifest(graph)\n selector = graph_selector.NodeSelector(graph, manifest)\n # TODO: The \"eager\" string below needs to be replaced with programatic access\n # to the default value for the indirect selection parameter in\n # dbt.cli.params.indirect_selection\n #\n # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397\n spec = graph_cli.parse_difference(include, exclude, \"eager\")\n selected, _ = selector.select_nodes(spec)\n\n assert selected == expected\n\n\nparam_specs = [\n (\"a\", False, None, False, None, \"fqn\", \"a\", False),\n (\"+a\", True, None, False, None, \"fqn\", \"a\", False),\n (\"256+a\", True, 256, False, None, \"fqn\", \"a\", False),\n (\"a+\", False, None, True, None, \"fqn\", \"a\", False),\n (\"a+256\", False, None, True, 256, \"fqn\", \"a\", False),\n (\"+a+\", True, None, True, None, \"fqn\", \"a\", False),\n (\"16+a+32\", True, 16, True, 32, \"fqn\", \"a\", False),\n (\"@a\", False, None, False, None, \"fqn\", \"a\", True),\n (\"a.b\", False, None, False, None, \"fqn\", \"a.b\", False),\n (\"+a.b\", True, None, False, None, \"fqn\", \"a.b\", False),\n (\"256+a.b\", True, 256, False, None, \"fqn\", \"a.b\", False),\n (\"a.b+\", False, None, True, None, \"fqn\", \"a.b\", False),\n (\"a.b+256\", False, None, True, 256, \"fqn\", \"a.b\", False),\n (\"+a.b+\", True, None, True, None, \"fqn\", \"a.b\", False),\n (\"16+a.b+32\", True, 16, True, 32, \"fqn\", \"a.b\", False),\n (\"@a.b\", False, None, False, None, \"fqn\", \"a.b\", True),\n (\"a.b.*\", False, None, False, None, \"fqn\", \"a.b.*\", False),\n (\"+a.b.*\", True, None, False, None, \"fqn\", \"a.b.*\", False),\n (\"256+a.b.*\", True, 256, False, None, \"fqn\", \"a.b.*\", False),\n (\"a.b.*+\", False, None, True, None, \"fqn\", \"a.b.*\", False),\n (\"a.b.*+256\", False, None, True, 256, \"fqn\", \"a.b.*\", False),\n (\"+a.b.*+\", True, None, True, None, \"fqn\", \"a.b.*\", False),\n (\"16+a.b.*+32\", True, 16, True, 32, \"fqn\", \"a.b.*\", False),\n (\"@a.b.*\", False, None, False, None, \"fqn\", \"a.b.*\", True),\n (\"tag:a\", False, None, False, None, \"tag\", \"a\", False),\n (\"+tag:a\", True, None, False, None, \"tag\", \"a\", False),\n (\"256+tag:a\", True, 256, False, None, \"tag\", \"a\", False),\n (\"tag:a+\", False, None, True, None, \"tag\", \"a\", False),\n (\"tag:a+256\", False, None, True, 256, \"tag\", \"a\", False),\n (\"+tag:a+\", True, None, True, None, \"tag\", \"a\", False),\n (\"16+tag:a+32\", True, 16, True, 32, \"tag\", \"a\", False),\n (\"@tag:a\", False, None, False, None, \"tag\", \"a\", True),\n (\"source:a\", False, None, False, None, \"source\", \"a\", False),\n (\"source:a+\", False, None, True, None, \"source\", \"a\", False),\n (\"source:a+1\", False, None, True, 1, \"source\", \"a\", False),\n (\"source:a+32\", False, None, True, 32, \"source\", \"a\", False),\n (\"@source:a\", False, None, False, None, \"source\", \"a\", True),\n]\n\n\n@pytest.mark.parametrize(\n \"spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents\",\n param_specs,\n ids=id_macro,\n)\ndef test_parse_specs(\n spec,\n parents,\n parents_depth,\n children,\n children_depth,\n filter_type,\n filter_value,\n childrens_parents,\n):\n parsed = graph_selector.SelectionCriteria.from_single_spec(spec)\n assert parsed.parents == parents\n assert parsed.parents_depth == parents_depth\n assert parsed.children == children\n assert parsed.children_depth == children_depth\n assert parsed.method == filter_type\n assert parsed.value == filter_value\n assert parsed.childrens_parents == childrens_parents\n\n\ninvalid_specs = [\n \"@a+\",\n \"@a.b+\",\n \"@a.b*+\",\n \"@tag:a+\",\n \"@source:a+\",\n]\n\n\n@pytest.mark.parametrize(\"invalid\", invalid_specs, ids=lambda k: str(k))\ndef test_invalid_specs(invalid):\n with pytest.raises(dbt.exceptions.DbtRuntimeError):\n graph_selector.SelectionCriteria.from_single_spec(invalid)"}}},{"rowIdx":2041,"cells":{"id":{"kind":"number","value":2041,"string":"2,041"},"label":{"kind":"string","value":"compute exact"},"text":{"kind":"string","value":"\"\"\"\nCopyright (c) 2018-2023 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport re\nfrom collections import Counter\nimport string\nimport numpy\n\nfrom ..representation import QuestionAnsweringAnnotation, QuestionAnsweringPrediction\nfrom ..representation import QuestionAnsweringEmbeddingAnnotation, QuestionAnsweringEmbeddingPrediction\nfrom ..representation import QuestionAnsweringBiDAFAnnotation\nfrom .metric import PerImageEvaluationMetric, FullDatasetEvaluationMetric\nfrom ..config import NumberField\n\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef get_tokens(s):\n if not s:\n return []\n return normalize_answer(s).split()\n\n\nclass ScoreF1(PerImageEvaluationMetric):\n __provider__ = 'f1'\n\n annotation_types = (QuestionAnsweringAnnotation,)\n prediction_types = (QuestionAnsweringPrediction,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.per_question_results = {}\n\n def update(self, annotation, prediction):\n gold_answers = [answer[\"text\"] for answer in annotation.orig_answer_text if normalize_answer(answer[\"text\"])]\n if not gold_answers:\n gold_answers = ['']\n prediction_answer = prediction.tokens[0] if prediction.tokens else ''\n max_f1_score = max(self.compute_f1(a, prediction_answer) for a in gold_answers)\n current_max_f1_score = self.per_question_results.get(annotation.question_id, 0)\n self.per_question_results[annotation.question_id] = max(max_f1_score, current_max_f1_score)\n return max_f1_score\n\n @staticmethod\n def compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold)\n pred_toks = get_tokens(a_pred)\n common = Counter(gold_toks) & Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n def evaluate(self, annotations, predictions):\n return sum(self.per_question_results.values()) / len(self.per_question_results)\n\n def reset(self):\n del self.per_question_results\n self.per_question_results = {}\n\n\nclass ExactMatchScore(PerImageEvaluationMetric):\n __provider__ = 'exact_match'\n\n annotation_types = (QuestionAnsweringAnnotation, QuestionAnsweringBiDAFAnnotation, )\n prediction_types = (QuestionAnsweringPrediction, )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.per_question_results = {}\n\n def update(self, annotation, prediction):\n gold_answers = [answer[\"text\"] for answer in annotation.orig_answer_text if normalize_answer(answer[\"text\"])]\n if not gold_answers:\n gold_answers = ['']\n pred_answer = prediction.tokens[0] if prediction.tokens else ''\n max_exact_match = max(self.METHOD_NAME(a_gold, pred_answer) for a_gold in gold_answers)\n self.per_question_results[annotation.question_id] = max(\n max_exact_match, self.per_question_results.get(annotation.question_id, 0)\n )\n return max_exact_match\n\n @staticmethod\n def METHOD_NAME(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\n def evaluate(self, annotations, predictions):\n return sum(self.per_question_results.values()) / len(self.per_question_results)\n\n def reset(self):\n del self.per_question_results\n self.per_question_results = {}\n\n\nclass QuestionAnsweringEmbeddingAccuracy(FullDatasetEvaluationMetric):\n\n __provider__ = 'qa_embedding_accuracy'\n annotation_types = (QuestionAnsweringEmbeddingAnnotation,)\n prediction_types = (QuestionAnsweringEmbeddingPrediction,)\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'top_k': NumberField(\n value_type=int, min_value=1, max_value=1000, default=5, optional=True,\n description='Specifies the number of closest context embeddings to check.'\n ),\n })\n return parameters\n\n def configure(self):\n self.top_k = self.get_value_from_config('top_k')\n\n def evaluate(self, annotations, predictions):\n\n ap_pairs = list(zip(annotations, predictions))\n\n #check data alignment\n assert all(\n a.identifier is p.identifier\n if not isinstance(p.identifier, tuple)\n else p.identifier.values\n for a, p in ap_pairs), \"annotations and predictions are not aligned\"\n\n q_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is not None]\n c_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is None]\n\n c_data_identifiers = [a.identifier for a, p in c_pairs]\n c_vecs = numpy.array([p.embedding for a, p in c_pairs])\n\n # calc distances from each question to all contexts and check if top_k has true positives\n true_pos = 0\n for q_a, q_p in q_pairs:\n\n #calc distance between question embedding with all context embeddings\n d = c_vecs - q_p.embedding[None, :]\n dist = numpy.linalg.norm(d, ord=2, axis=1)\n index = dist.argsort()\n\n #check that right context in the list of top_k\n c_pos_index = c_data_identifiers.index(q_a.context_pos_indetifier)\n if c_pos_index in index[:self.top_k]:\n true_pos += 1\n\n return [true_pos/len(q_pairs)] if q_pairs else 0"}}},{"rowIdx":2042,"cells":{"id":{"kind":"number","value":2042,"string":"2,042"},"label":{"kind":"string","value":"grad"},"text":{"kind":"string","value":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021, 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Tests for the Gradient Descent optimizer.\"\"\"\n\nfrom test.python.algorithms import QiskitAlgorithmsTestCase\nimport numpy as np\nfrom qiskit.algorithms.optimizers import GradientDescent, GradientDescentState\nfrom qiskit.algorithms.optimizers.steppable_optimizer import TellData, AskData\nfrom qiskit.circuit.library import PauliTwoDesign\nfrom qiskit.opflow import I, Z, StateFn\n\n\nclass TestGradientDescent(QiskitAlgorithmsTestCase):\n \"\"\"Tests for the gradient descent optimizer.\"\"\"\n\n def setUp(self):\n super().setUp()\n np.random.seed(12)\n self.initial_point = np.array([1, 1, 1, 1, 0])\n\n def objective(self, x):\n \"\"\"Objective Function for the tests\"\"\"\n return (np.linalg.norm(x) - 1) ** 2\n\n def METHOD_NAME(self, x):\n \"\"\"Gradient of the objective function\"\"\"\n return 2 * (np.linalg.norm(x) - 1) * x / np.linalg.norm(x)\n\n def test_pauli_two_design(self):\n \"\"\"Test standard gradient descent on the Pauli two-design example.\"\"\"\n circuit = PauliTwoDesign(3, reps=3, seed=2)\n parameters = list(circuit.parameters)\n with self.assertWarns(DeprecationWarning):\n obs = Z ^ Z ^ I\n expr = ~StateFn(obs) @ StateFn(circuit)\n\n initial_point = np.array(\n [\n 0.1822308,\n -0.27254251,\n 0.83684425,\n 0.86153976,\n -0.7111668,\n 0.82766631,\n 0.97867993,\n 0.46136964,\n 2.27079901,\n 0.13382699,\n 0.29589915,\n 0.64883193,\n ]\n )\n\n def objective_pauli(x):\n return expr.bind_parameters(dict(zip(parameters, x))).eval().real\n\n optimizer = GradientDescent(maxiter=100, learning_rate=0.1, perturbation=0.1)\n\n with self.assertWarns(DeprecationWarning):\n result = optimizer.minimize(objective_pauli, x0=initial_point)\n self.assertLess(result.fun, -0.95) # final loss\n self.assertEqual(result.nfev, 1300) # function evaluations\n\n def test_callback(self):\n \"\"\"Test the callback.\"\"\"\n\n history = []\n\n def callback(*args):\n history.append(args)\n\n optimizer = GradientDescent(maxiter=1, callback=callback)\n\n _ = optimizer.minimize(self.objective, np.array([1, -1]))\n\n self.assertEqual(len(history), 1)\n self.assertIsInstance(history[0][0], int) # nfevs\n self.assertIsInstance(history[0][1], np.ndarray) # parameters\n self.assertIsInstance(history[0][2], float) # function value\n self.assertIsInstance(history[0][3], float) # norm of the gradient\n\n def test_minimize(self):\n \"\"\"Test setting the learning rate as iterator and minimizing the funciton.\"\"\"\n\n def learning_rate():\n power = 0.6\n constant_coeff = 0.1\n\n def powerlaw():\n n = 0\n while True:\n yield constant_coeff * (n**power)\n n += 1\n\n return powerlaw()\n\n optimizer = GradientDescent(maxiter=20, learning_rate=learning_rate)\n result = optimizer.minimize(self.objective, self.initial_point, self.METHOD_NAME)\n\n self.assertLess(result.fun, 1e-5)\n\n def test_no_start(self):\n \"\"\"Tests that making a step without having started the optimizer raises an error.\"\"\"\n optimizer = GradientDescent()\n with self.assertRaises(AttributeError):\n optimizer.step()\n\n def test_start(self):\n \"\"\"Tests if the start method initializes the state properly.\"\"\"\n optimizer = GradientDescent()\n self.assertIsNone(optimizer.state)\n self.assertIsNone(optimizer.perturbation)\n optimizer.start(x0=self.initial_point, fun=self.objective)\n\n test_state = GradientDescentState(\n x=self.initial_point,\n fun=self.objective,\n jac=None,\n nfev=0,\n njev=0,\n nit=0,\n learning_rate=1,\n stepsize=None,\n )\n\n self.assertEqual(test_state, optimizer.state)\n\n def test_ask(self):\n \"\"\"Test the ask method.\"\"\"\n optimizer = GradientDescent()\n optimizer.start(fun=self.objective, x0=self.initial_point)\n\n ask_data = optimizer.ask()\n np.testing.assert_equal(ask_data.x_jac, self.initial_point)\n self.assertIsNone(ask_data.x_fun)\n\n def test_evaluate(self):\n \"\"\"Test the evaluate method.\"\"\"\n optimizer = GradientDescent(perturbation=1e-10)\n optimizer.start(fun=self.objective, x0=self.initial_point)\n ask_data = AskData(x_jac=self.initial_point)\n tell_data = optimizer.evaluate(ask_data=ask_data)\n np.testing.assert_almost_equal(tell_data.eval_jac, self.METHOD_NAME(self.initial_point), decimal=2)\n\n def test_tell(self):\n \"\"\"Test the tell method.\"\"\"\n optimizer = GradientDescent(learning_rate=1.0)\n optimizer.start(fun=self.objective, x0=self.initial_point)\n ask_data = AskData(x_jac=self.initial_point)\n tell_data = TellData(eval_jac=self.initial_point)\n optimizer.tell(ask_data=ask_data, tell_data=tell_data)\n np.testing.assert_equal(optimizer.state.x, np.zeros(optimizer.state.x.shape))\n\n def test_continue_condition(self):\n \"\"\"Test if the continue condition is working properly.\"\"\"\n optimizer = GradientDescent(tol=1)\n optimizer.start(fun=self.objective, x0=self.initial_point)\n self.assertTrue(optimizer.continue_condition())\n optimizer.state.stepsize = 0.1\n self.assertFalse(optimizer.continue_condition())\n optimizer.state.stepsize = 10\n optimizer.state.nit = 1000\n self.assertFalse(optimizer.continue_condition())\n\n def test_step(self):\n \"\"\"Tests if performing one step yields the desired result.\"\"\"\n optimizer = GradientDescent(learning_rate=1.0)\n optimizer.start(fun=self.objective, jac=self.METHOD_NAME, x0=self.initial_point)\n optimizer.step()\n np.testing.assert_almost_equal(\n optimizer.state.x, self.initial_point - self.METHOD_NAME(self.initial_point), 6\n )\n\n def test_wrong_dimension_gradient(self):\n \"\"\"Tests if an error is raised when a gradient of the wrong dimension is passed.\"\"\"\n\n optimizer = GradientDescent(learning_rate=1.0)\n optimizer.start(fun=self.objective, x0=self.initial_point)\n ask_data = AskData(x_jac=self.initial_point)\n tell_data = TellData(eval_jac=np.array([1.0, 5]))\n with self.assertRaises(ValueError):\n optimizer.tell(ask_data=ask_data, tell_data=tell_data)\n\n tell_data = TellData(eval_jac=np.array(1))\n with self.assertRaises(ValueError):\n optimizer.tell(ask_data=ask_data, tell_data=tell_data)"}}},{"rowIdx":2043,"cells":{"id":{"kind":"number","value":2043,"string":"2,043"},"label":{"kind":"string","value":"separate true followers"},"text":{"kind":"string","value":"import json\nimport re\nfrom html.parser import HTMLParser\nfrom io import StringIO\n\nfrom hedera.supported_languages import SUPPORTED_LANGUAGES\nfrom lemmatization.lemmatizer import Lemmatizer\n\n\nclass EditedTextHtmlParser(HTMLParser):\n\n def __init__(self, token_lemma_dict=None, lang=None):\n self.current_tag = None\n self.current_attrs = {}\n self.current_data = \"\"\n self.lemmatized_text_data = []\n self.token_lemma_dict = token_lemma_dict\n self.lemmatizer = Lemmatizer(lang)\n self.service = SUPPORTED_LANGUAGES[lang].service\n self.initial = \"\"\n self.unique_text = False\n return super().__init__()\n\n def handle_starttag(self, tag, attrs):\n if tag == \"span\":\n self.current_tag = \"span\"\n \"\"\"\n Note: the fed in data could be two different types from a tuple of (key, dict) or (key, bool)\n handle_endtag() will require a key:value pair containing either of the structure below:\n [('data-token', '{\"glossed\": \"glossed-automatic\", \"initial\": \"\", \"lemma_id\": 1372, \"resolved\": \"resolved-automatic\", \"gloss_ids\": [84128, 68154], \"word_normalized\": \"Arma\"}')]\n [('follower', 'true')]\n \"\"\"\n key, value = attrs[0]\n if key in \"follower\":\n self.current_attrs = {key: value}\n else:\n self.current_attrs = json.loads(value)\n\n def handle_endtag(self, tag):\n if \"follower\" in self.current_attrs:\n self.METHOD_NAME(self.current_data)\n #Note: sometimes the current_tag/self.current_attrs will be empty/None when there is a newline/break\n # len() checks if empty string so we dont append blank words\n elif self.current_data is not None and self.current_tag is not None and len(self.current_data):\n self.lemmatized_text_data.append(\n {\n **self.current_attrs,\n \"word\": self.current_data,\n \"following\": \"\",\n }\n )\n self.current_tag = None\n self.current_attrs = {}\n self.current_data = \"\"\n\n def handle_data(self, data):\n # used to modify data by the service(e.g latin underscores)\n formatted_text_data = self.service.apply_text_rule(self.unique_text, data)\n if type(formatted_text_data) is dict:\n data = formatted_text_data[\"data\"]\n self.unique_text = formatted_text_data[\"unique_text\"]\n if (\"follower\" in self.current_attrs):\n self.current_data = data\n else:\n try:\n if (\n (self.current_tag is None) or\n (self.current_tag == \"span\" and self.current_attrs == {}) or\n (self.current_attrs[\"lemma_id\"] not in self.token_lemma_dict[data])\n ):\n self.lemmatize_chunk(data)\n else:\n self.current_data = data\n except KeyError:\n if self.service.check_text(data):\n self.unique_text = data\n if not self.unique_text:\n self.lemmatize_chunk(data)\n\n def METHOD_NAME(self, follower):\n \"\"\"\n Takes the contents of a span where 'follower' is true.\n Splits any 'follower' characters from alpha numeric characters.\n Sets the 'following' attr on the previous data point with true followers\n and sends new alpha numeric string to be lemmatized.\n Returns None\n \"\"\"\n followers = []\n text = []\n for idx, ch in enumerate(follower):\n if ch.isalnum():\n text = follower[idx:]\n break\n followers.append(ch)\n\n if len(self.lemmatized_text_data) > 0:\n self.lemmatized_text_data[-1][\"following\"] += \"\".join(followers)\n else:\n # this will only occur if the text begins with a \"follower\"\n self.lemmatized_text_data.append(\n {\n \"word\": \"\",\n \"lemma_id\": None,\n \"resolved\": True,\n \"word_normalized\": \"\",\n \"following\": \"\".join(followers)\n }\n )\n if (len(text) > 0):\n self.lemmatize_chunk(\"\".join(text))\n\n def lemmatize_chunk(self, chunk):\n \"\"\"\n Takes an unrecognized chunk of text.\n Sends 'chunk' to be lemmatized, then extends the data with the returned content.\n Checks if chunk does not contain return and newline \"\\r\\n\" - only add tokens if it the chunk is not a return/newline\n In case there is an newline at the beginning of the text(\"initial\"), the newline char will be added to the previous text \"following\" key:value pair\n **Fixes problem with empty tokens**\n **Fixes problem with latin underscores**\n Returns None\n \"\"\"\n self.current_data = None\n new_data = self.lemmatizer.lemmatize(chunk)\n # regex checks if '\\r\\n' is the only char used in the chunk\n contains_only_newline = bool(re.match(r\"^[\\r\\n]+$\", chunk))\n if not contains_only_newline:\n self.process_initial_data(new_data)\n self.lemmatized_text_data.extend(new_data)\n if contains_only_newline and len(self.lemmatized_text_data):\n token_lemma_dict_keys = list(self.token_lemma_dict.keys())\n prev_lemma_id = self.lemmatized_text_data[-1][\"lemma_id\"]\n following = self.lemmatized_text_data[-1][\"following\"]\n #Note: Added check if we have reached the end of the data array because theres a bug where new lines are added after each edit\n if len(token_lemma_dict_keys) and prev_lemma_id not in self.token_lemma_dict[token_lemma_dict_keys[-1]]:\n self.lemmatized_text_data[-1][\"following\"] = f\"{following}{chunk}\"\n else:\n self.process_initial_data(new_data)\n self.lemmatized_text_data.extend(new_data)\n #TODO EDGE CASE: Newlines/breaks that may happen at the very beginning of the text\n\n def process_initial_data(self, new_data):\n # if statement will add newlines to \"following\" to previous text in lemmatized_text_data\n if len(new_data) and new_data[0][\"initial\"] and len(self.lemmatized_text_data):\n following = self.lemmatized_text_data[-1][\"following\"]\n self.lemmatized_text_data[-1][\"following\"] = f\"{following}{new_data[0]['initial']}\"\n\n\nclass TagStripper(HTMLParser):\n\n def __init__(self):\n super().__init__()\n self.reset()\n self.strict = False\n self.convert_charrefs = True\n self.text = StringIO()\n\n def handle_data(self, d):\n self.text.write(d)\n\n def get_data(self):\n return self.text.getvalue()"}}},{"rowIdx":2044,"cells":{"id":{"kind":"number","value":2044,"string":"2,044"},"label":{"kind":"string","value":"awk rule1"},"text":{"kind":"string","value":"# Leo colorizer control file for awk mode.\n# This file is in the public domain.\n\n# Properties for awk mode.\nproperties = {\n \"indentCloseBrackets\": \"}\",\n \"indentOpenBrackets\": \"{\",\n \"lineComment\": \"#\",\n \"lineUpClosingBracket\": \"true\",\n \"wordBreakChars\": \",+-=<>/?^&*\",\n}\n\n# Attributes dict for awk_main ruleset.\nawk_main_attributes_dict = {\n \"default\": \"null\",\n \"digit_re\": \"\",\n \"escape\": \"\\\\\",\n \"highlight_digits\": \"true\",\n \"ignore_case\": \"false\",\n \"no_word_sep\": \"\",\n}\n\n# Dictionary of attributes dictionaries for awk mode.\nattributesDictDict = {\n \"awk_main\": awk_main_attributes_dict,\n}\n\n# Keywords dict for awk_main ruleset.\nawk_main_keywords_dict = {\n \"$0\": \"keyword3\",\n \"ARGC\": \"keyword3\",\n \"ARGIND\": \"keyword3\",\n \"ARGV\": \"keyword3\",\n \"BEGIN\": \"keyword3\",\n \"CONVFMT\": \"keyword3\",\n \"END\": \"keyword3\",\n \"ENVIRON\": \"keyword3\",\n \"ERRNO\": \"keyword3\",\n \"FIELDSWIDTH\": \"keyword3\",\n \"FILENAME\": \"keyword3\",\n \"FNR\": \"keyword3\",\n \"FS\": \"keyword3\",\n \"IGNORECASE\": \"keyword3\",\n \"NF\": \"keyword3\",\n \"NR\": \"keyword3\",\n \"OFMT\": \"keyword3\",\n \"OFS\": \"keyword3\",\n \"ORS\": \"keyword3\",\n \"RLENGTH\": \"keyword3\",\n \"RS\": \"keyword3\",\n \"RSTART\": \"keyword3\",\n \"RT\": \"keyword3\",\n \"SUBSEP\": \"keyword3\",\n \"atan2\": \"keyword2\",\n \"break\": \"keyword1\",\n \"close\": \"keyword1\",\n \"continue\": \"keyword1\",\n \"cos\": \"keyword2\",\n \"delete\": \"keyword1\",\n \"do\": \"keyword1\",\n \"else\": \"keyword1\",\n \"exit\": \"keyword1\",\n \"exp\": \"keyword2\",\n \"fflush\": \"keyword1\",\n \"for\": \"keyword1\",\n \"function\": \"keyword1\",\n \"gensub\": \"keyword2\",\n \"getline\": \"keyword2\",\n \"gsub\": \"keyword2\",\n \"huge\": \"keyword1\",\n \"if\": \"keyword1\",\n \"in\": \"keyword1\",\n \"index\": \"keyword2\",\n \"int\": \"keyword2\",\n \"length\": \"keyword2\",\n \"log\": \"keyword2\",\n \"match\": \"keyword2\",\n \"next\": \"keyword1\",\n \"nextfile\": \"keyword1\",\n \"print\": \"keyword1\",\n \"printf\": \"keyword1\",\n \"rand\": \"keyword2\",\n \"return\": \"keyword1\",\n \"sin\": \"keyword2\",\n \"split\": \"keyword2\",\n \"sprintf\": \"keyword2\",\n \"sqrt\": \"keyword2\",\n \"srand\": \"keyword2\",\n \"sub\": \"keyword2\",\n \"substr\": \"keyword2\",\n \"system\": \"keyword2\",\n \"tolower\": \"keyword2\",\n \"toupper\": \"keyword2\",\n \"while\": \"keyword1\",\n}\n\n# Dictionary of keywords dictionaries for awk mode.\nkeywordsDictDict = {\n \"awk_main\": awk_main_keywords_dict,\n}\n\n# Rules for awk_main ruleset.\n\ndef awk_rule0(colorer, s, i):\n return colorer.match_span(s, i, kind=\"literal1\", begin=\"\\\"\", end=\"\\\"\",\n no_line_break=True)\n\ndef METHOD_NAME(colorer, s, i):\n return colorer.match_span(s, i, kind=\"literal1\", begin=\"'\", end=\"'\",\n no_line_break=True)\n\ndef awk_rule2(colorer, s, i):\n return colorer.match_eol_span(s, i, kind=\"comment1\", seq=\"#\")\n\ndef awk_rule3(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"=\")\n\ndef awk_rule4(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"!\")\n\ndef awk_rule5(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\">=\")\n\ndef awk_rule6(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"<=\")\n\ndef awk_rule7(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"+\")\n\ndef awk_rule8(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"-\")\n\ndef awk_rule9(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"/\")\n\ndef awk_rule10(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"*\")\n\ndef awk_rule11(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\">\")\n\ndef awk_rule12(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"<\")\n\ndef awk_rule13(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"%\")\n\ndef awk_rule14(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"&\")\n\ndef awk_rule15(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"|\")\n\ndef awk_rule16(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"^\")\n\ndef awk_rule17(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"~\")\n\ndef awk_rule18(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"}\")\n\ndef awk_rule19(colorer, s, i):\n return colorer.match_plain_seq(s, i, kind=\"operator\", seq=\"{\")\n\ndef awk_rule20(colorer, s, i):\n return colorer.match_mark_previous(s, i, kind=\"label\", pattern=\":\",\n at_whitespace_end=True,\n exclude_match=True)\n\ndef awk_rule21(colorer, s, i):\n return colorer.match_keywords(s, i)\n\n# Rules dict for awk_main ruleset.\nrulesDict1 = {\n \"!\": [awk_rule4,],\n \"\\\"\": [awk_rule0,],\n \"#\": [awk_rule2,],\n \"$\": [awk_rule21,],\n \"%\": [awk_rule13,],\n \"&\": [awk_rule14,],\n \"'\": [METHOD_NAME,],\n \"*\": [awk_rule10,],\n \"+\": [awk_rule7,],\n \"-\": [awk_rule8,],\n \"/\": [awk_rule9,],\n \"0\": [awk_rule21,],\n \"1\": [awk_rule21,],\n \"2\": [awk_rule21,],\n \"3\": [awk_rule21,],\n \"4\": [awk_rule21,],\n \"5\": [awk_rule21,],\n \"6\": [awk_rule21,],\n \"7\": [awk_rule21,],\n \"8\": [awk_rule21,],\n \"9\": [awk_rule21,],\n \":\": [awk_rule20,],\n \"<\": [awk_rule6, awk_rule12,],\n \"=\": [awk_rule3,],\n \">\": [awk_rule5, awk_rule11,],\n \"@\": [awk_rule21,],\n \"A\": [awk_rule21,],\n \"B\": [awk_rule21,],\n \"C\": [awk_rule21,],\n \"D\": [awk_rule21,],\n \"E\": [awk_rule21,],\n \"F\": [awk_rule21,],\n \"G\": [awk_rule21,],\n \"H\": [awk_rule21,],\n \"I\": [awk_rule21,],\n \"J\": [awk_rule21,],\n \"K\": [awk_rule21,],\n \"L\": [awk_rule21,],\n \"M\": [awk_rule21,],\n \"N\": [awk_rule21,],\n \"O\": [awk_rule21,],\n \"P\": [awk_rule21,],\n \"Q\": [awk_rule21,],\n \"R\": [awk_rule21,],\n \"S\": [awk_rule21,],\n \"T\": [awk_rule21,],\n \"U\": [awk_rule21,],\n \"V\": [awk_rule21,],\n \"W\": [awk_rule21,],\n \"X\": [awk_rule21,],\n \"Y\": [awk_rule21,],\n \"Z\": [awk_rule21,],\n \"^\": [awk_rule16,],\n \"a\": [awk_rule21,],\n \"b\": [awk_rule21,],\n \"c\": [awk_rule21,],\n \"d\": [awk_rule21,],\n \"e\": [awk_rule21,],\n \"f\": [awk_rule21,],\n \"g\": [awk_rule21,],\n \"h\": [awk_rule21,],\n \"i\": [awk_rule21,],\n \"j\": [awk_rule21,],\n \"k\": [awk_rule21,],\n \"l\": [awk_rule21,],\n \"m\": [awk_rule21,],\n \"n\": [awk_rule21,],\n \"o\": [awk_rule21,],\n \"p\": [awk_rule21,],\n \"q\": [awk_rule21,],\n \"r\": [awk_rule21,],\n \"s\": [awk_rule21,],\n \"t\": [awk_rule21,],\n \"u\": [awk_rule21,],\n \"v\": [awk_rule21,],\n \"w\": [awk_rule21,],\n \"x\": [awk_rule21,],\n \"y\": [awk_rule21,],\n \"z\": [awk_rule21,],\n \"{\": [awk_rule19,],\n \"|\": [awk_rule15,],\n \"}\": [awk_rule18,],\n \"~\": [awk_rule17,],\n}\n\n# x.rulesDictDict for awk mode.\nrulesDictDict = {\n \"awk_main\": rulesDict1,\n}\n\n# Import dict for awk mode.\nimportDict = {}"}}},{"rowIdx":2045,"cells":{"id":{"kind":"number","value":2045,"string":"2,045"},"label":{"kind":"string","value":"test consider not charging chargepoint in loadmanagement"},"text":{"kind":"string","value":"from typing import List, Tuple\nfrom unittest.mock import Mock\n\nimport pytest\n\nfrom control import data\nfrom control.algorithm import common\nfrom control.chargepoint.chargepoint import Chargepoint\nfrom control.ev import Ev\nfrom control.counter import Counter\nfrom control.counter_all import CounterAll\n\n\n@pytest.fixture(autouse=True)\ndef cp() -> None:\n data.data_init(Mock())\n data.data.cp_data = {\"cp0\": Chargepoint(0, None)}\n\n\n@pytest.mark.parametrize(\"set_current, expected_current\",\n [pytest.param(6, 0),\n pytest.param(0, 0)])\ndef test_reset_current(set_current: int, expected_current: int):\n # setup\n data.data.cp_data[\"cp0\"].data.set.current = set_current\n\n # execution\n common.reset_current()\n\n # evaluation\n assert data.data.cp_data[\"cp0\"].data.set.current == expected_current\n\n\n@pytest.mark.parametrize(\n \"diff, required_currents, expected_set_current, expected_diffs\",\n [\n pytest.param(2, [10, 0, 0], 8, [2, 0, 0], id=\"set diff one phase\"),\n pytest.param(2, [12]*3, 8, [2]*3, id=\"set diff three phases\"),\n pytest.param(8, [8]*3, 8, [8]*3, id=\"set min current three phases\"),\n pytest.param(0, [8]*3, 8, [0]*3, id=\"min current is already set, three phases\"),\n ])\ndef test_set_current_counterdiff(diff: float,\n required_currents: List[float],\n expected_set_current: float,\n expected_diffs: List[float],\n monkeypatch):\n # setup\n cp = Chargepoint(4, None)\n ev = Ev(0)\n ev.data.control_parameter.required_currents = required_currents\n cp.data.set.charging_ev_data = ev\n cp.data.set.current = 6\n get_counters_to_check_mock = Mock(return_value=[\"cp0\", \"cp6\"])\n monkeypatch.setattr(CounterAll, \"get_counters_to_check\", get_counters_to_check_mock)\n data.data.counter_data = {\"cp0\": Mock(spec=Counter), \"cp6\": Mock(spec=Counter)}\n\n # evaluation\n common.set_current_counterdiff(diff, 8, cp)\n\n # assertion\n assert cp.data.set.current == expected_set_current\n if diff != 0:\n assert data.data._counter_data['cp0'].update_values_left.call_args_list[0][0][0] == expected_diffs\n assert data.data._counter_data['cp6'].update_values_left.call_args_list[0][0][0] == expected_diffs\n\n\n@pytest.mark.parametrize(\n \"required_currents, expected_mins_counts\",\n [\n ([10, 0, 0], ([6, 0, 0], [1, 0, 0])),\n ([12]*3, ([6]*3, [1]*3))\n ])\ndef test_get_min_current(required_currents: List[float], expected_mins_counts: Tuple[List[float], List[int]]):\n # setup\n cp = Chargepoint(4, None)\n ev = Ev(0)\n ev.data.control_parameter.required_currents = required_currents\n cp.data.set.charging_ev_data = ev\n\n # evaluation\n mins_counts = common.get_min_current(cp)\n\n # assertion\n assert mins_counts == expected_mins_counts\n\n\n@pytest.mark.parametrize(\n \"set_current, diff, expected_current\",\n [\n pytest.param(0, 2, 8, id=\"min current is set, no current has been set on this iteration\"),\n pytest.param(6, 2, 6, id=\"min current is set, current has been set on this iteration\"),\n pytest.param(7, 2, 7, id=\"new current is higher, current has been set on this iteration\"),\n pytest.param(9, 2, 8, id=\"new current is lower, current has been set on this iteration\"),\n ])\ndef test_get_current_to_set(set_current: float, diff: float, expected_current: float):\n # setup & evaluation\n current = common.get_current_to_set(set_current, diff, 6)\n\n # assertion\n assert current == expected_current\n\n\n@pytest.mark.parametrize(\n \"counts, available_currents, missing_currents, expected_current\",\n [\n pytest.param([2]*3, [12, 15, 16], [5]*3, 6),\n pytest.param([2]*3, [1]*3, [2]*3, 0.5),\n pytest.param([2]*3, [0]*3, [2]*3, 0),\n ])\ndef test_available_currents_for_cp(counts: List[int],\n available_currents: List[float],\n missing_currents: List[float],\n expected_current: float):\n # setup\n cp = Chargepoint(4, None)\n ev = Ev(0)\n ev.data.control_parameter.required_currents = [16]*3\n ev.data.control_parameter.required_current = 16\n cp.data.set.charging_ev_data = ev\n cp.data.set.target_current = 10\n\n # evaluation\n current = common.available_current_for_cp(cp, counts, available_currents, missing_currents)\n\n # assertion\n assert current == expected_current\n\n\n@pytest.mark.parametrize(\n \"required_currents_1, required_currents_2, expected_currents\",\n [\n pytest.param([6, 10, 15], [20]*3, ([14, 18, 23], [2]*3)),\n pytest.param([6, 10, 15], [6, 0, 0], ([0, 4, 9], [2, 1, 1])),\n ])\ndef test_get_missing_currents_left(required_currents_1: List[float],\n required_currents_2: List[float],\n expected_currents: List[float]):\n # setup\n def setup_cp(num: int, required_currents) -> Chargepoint:\n ev = Ev(0)\n cp = Chargepoint(num, None)\n ev.data.control_parameter.required_currents = required_currents\n cp.data.set.charging_ev_data = ev\n return cp\n\n # evaluation\n currents = common.get_missing_currents_left(\n [setup_cp(1, required_currents_1), setup_cp(2, required_currents_2)])\n\n # assertion\n assert currents == expected_currents\n\n\n@pytest.mark.parametrize(\n \"reserve_for_not_charging, get_currents, expected_considered\",\n [\n pytest.param(True, [0]*3, False, id=\"reserve_for_not_charging active\"),\n pytest.param(True, [6]*3, False, id=\"reserve_for_not_charging active\"),\n pytest.param(False, [0]*3, True, id=\"not charging\"),\n pytest.param(False, [6]*3, False, id=\"charging\"),\n ])\ndef METHOD_NAME(reserve_for_not_charging: bool,\n get_currents: List[float],\n expected_considered: bool):\n # setup\n cp = Chargepoint(4, None)\n cp.data.get.currents = get_currents\n data.data.counter_all_data.data.config.reserve_for_not_charging = reserve_for_not_charging\n\n # evaluation\n considered = common.consider_not_charging_chargepoint_in_loadmanagement(cp)\n\n # assertion\n assert considered == expected_considered"}}},{"rowIdx":2046,"cells":{"id":{"kind":"number","value":2046,"string":"2,046"},"label":{"kind":"string","value":"cindex"},"text":{"kind":"string","value":"\"\"\"\nClasses and methods to interface with files storing rate data.\n\"\"\"\n\nimport os\nimport re\n\nfrom scipy.constants import physical_constants\n\nfrom pynucastro.nucdata.binding_table import BindingTable\nfrom pynucastro.nucdata.elements import PeriodicTable\nfrom pynucastro.nucdata.mass_table import MassTable\nfrom pynucastro.nucdata.partition_function import PartitionFunctionCollection\nfrom pynucastro.nucdata.spin_table import SpinTable\n\n_pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n_pynucastro_rates_dir = os.path.join(_pynucastro_dir, 'library')\n_pynucastro_tabular_dir = os.path.join(_pynucastro_rates_dir, 'tabular')\n\n#set the atomic mass unit constant in MeV\nm_u, _, _ = physical_constants['atomic mass constant energy equivalent in MeV']\n\n#read the mass excess table once and store it at the module-level\n_mass_table = MassTable()\n\n#read the spin table once and store it at the module-level\n_spin_table = SpinTable(reliable=True)\n\n# read the binding energy table once and store it at the module-level\n_binding_table = BindingTable()\n\n# read the partition function table once and store it at the module-level\n_pcollection = PartitionFunctionCollection(use_high_temperatures=True, use_set='frdm')\n\n\nclass UnsupportedNucleus(Exception):\n pass\n\n\nclass Nucleus:\n \"\"\"\n a nucleus that participates in a reaction -- we store it in a\n class to hold its properties, define a sorting, and give it a\n pretty printing string.\n\n :var Z: atomic number\n :var N: neutron number\n :var A: atomic mass\n :var nucbind: nuclear binding energy (MeV / nucleon)\n :var short_spec_name: nucleus abbreviation (e.g. \"he4\")\n :var caps_name: capitalized short species name (e.g. \"He4\")\n :var el: element name (e.g. \"he\")\n :var pretty: LaTeX formatted version of the nucleus name\n :var A_nuc: Nuclear Mass in amu\n\n \"\"\"\n _cache = {}\n\n def __init__(self, name, dummy=False):\n name = name.lower()\n self.raw = name\n\n # a dummy nucleus is one that we can use where a nucleus is needed\n # but it is not considered to be part of the network\n self.dummy = dummy\n\n # element symbol and atomic weight\n if name == \"p\":\n self.el = \"h\"\n self.A = 1\n self.short_spec_name = \"h1\"\n self.caps_name = \"p\"\n elif name == \"d\":\n self.el = \"h\"\n self.A = 2\n self.short_spec_name = \"h2\"\n self.caps_name = \"H2\"\n elif name == \"t\":\n self.el = \"h\"\n self.A = 3\n self.short_spec_name = \"h3\"\n self.caps_name = \"H3\"\n elif name == \"a\":\n #this is a convenience, enabling the use of a commonly-used alias:\n # He4 --> \\alpha --> \"a\" , e.g. c12(a,g)o16\n self.el = \"he\"\n self.A = 4\n self.short_spec_name = \"he4\"\n self.raw = \"he4\"\n self.caps_name = \"He4\"\n elif name == \"n\":\n self.el = \"n\"\n self.A = 1\n self.Z = 0\n self.N = 1\n self.short_spec_name = \"n\"\n self.spec_name = \"neutron\"\n self.pretty = fr\"\\mathrm{{{self.el}}}\"\n self.caps_name = \"n\"\n elif name.strip() in (\"al-6\", \"al*6\"):\n raise UnsupportedNucleus()\n else:\n e = re.match(r\"([a-zA-Z]*)(\\d*)\", name)\n self.el = e.group(1).title() # chemical symbol\n assert self.el\n self.A = int(e.group(2))\n assert self.A >= 0\n self.short_spec_name = name\n self.caps_name = name.capitalize()\n\n # use lowercase element abbreviation regardless the case of the input\n self.el = self.el.lower()\n\n # atomic number comes from periodic table\n if name != \"n\":\n i = PeriodicTable.lookup_abbreviation(self.el)\n self.Z = i.Z\n assert isinstance(self.Z, int)\n assert self.Z >= 0\n self.N = self.A - self.Z\n assert isinstance(self.N, int)\n assert self.N >= 0\n\n # long name\n self.spec_name = f'{i.name}-{self.A}'\n\n # latex formatted style\n self.pretty = fr\"{{}}^{{{self.A}}}\\mathrm{{{self.el.capitalize()}}}\"\n\n # set the number of spin states\n try:\n self.spin_states = _spin_table.get_spin_states(a=self.A, z=self.Z)\n except NotImplementedError:\n self.spin_states = None\n\n # set a partition function object to every nucleus\n try:\n self.partition_function = _pcollection.get_partition_function(self.short_spec_name)\n except ValueError:\n self.partition_function = None\n\n try:\n self.nucbind = _binding_table.get_binding_energy(n=self.N, z=self.Z)\n except NotImplementedError:\n # the binding energy table doesn't know about this nucleus\n self.nucbind = None\n\n # Now we will define the Nuclear Mass,\n try:\n self.A_nuc = float(self.A) + _mass_table.get_mass_diff(a=self.A, z=self.Z) / m_u\n except NotImplementedError:\n self.A_nuc = None\n\n @classmethod\n def from_cache(cls, name, dummy=False):\n key = (name.lower(), dummy)\n if key not in cls._cache:\n cls._cache[key] = Nucleus(name, dummy)\n return cls._cache[key]\n\n def __repr__(self):\n return self.raw\n\n def __hash__(self):\n return hash((self.Z, self.A))\n\n def c(self):\n \"\"\"return the capitalized-style name\"\"\"\n return self.caps_name\n\n def METHOD_NAME(self):\n \"\"\"return the name for C++ indexing\"\"\"\n return self.short_spec_name.capitalize()\n\n def __eq__(self, other):\n if isinstance(other, Nucleus):\n return self.el == other.el and \\\n self.Z == other.Z and self.A == other.A\n if isinstance(other, tuple):\n return (self.Z, self.A) == other\n return NotImplemented\n\n def __lt__(self, other):\n if not self.Z == other.Z:\n return self.Z < other.Z\n return self.A < other.A\n\n\ndef get_nuclei_in_range(zmin, zmax, amin, amax):\n \"\"\"given a range of Z = [zmin, zmax], and A = [amin, amax],\n return a list of Nucleus objects for all nuclei in this range\"\"\"\n\n nuc_list = []\n assert zmax >= zmin, \"zmax must be >= zmin\"\n assert amax >= amin, \"amax must be >= amin\"\n\n for z in range(zmin, zmax+1):\n element = PeriodicTable.lookup_Z(z)\n for a in range(amin, amax+1):\n name = f\"{element.abbreviation}{a}\"\n nuc_list.append(Nucleus(name))\n\n return nuc_list"}}},{"rowIdx":2047,"cells":{"id":{"kind":"number","value":2047,"string":"2,047"},"label":{"kind":"string","value":"get test params"},"text":{"kind":"string","value":"\"\"\"Optional passthrough transformer.\"\"\"\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__author__ = [\"aiwalter\", \"fkiraly\"]\n__all__ = [\"OptionalPassthrough\"]\n\nfrom sktime.transformations._delegate import _DelegatedTransformer\nfrom sktime.transformations.compose._common import CORE_MTYPES\nfrom sktime.transformations.compose._id import Id\n\n\nclass OptionalPassthrough(_DelegatedTransformer):\n \"\"\"Wrap an existing transformer to tune whether to include it in a pipeline.\n\n Allows tuning the implicit hyperparameter whether or not to use a\n particular transformer inside a pipeline (e.g. TransformedTargetForecaster)\n or not. This is achieved by the hyperparameter `passthrough`\n which can be added to a tuning grid then (see example).\n\n Parameters\n ----------\n transformer : Estimator\n scikit-learn-like or sktime-like transformer to fit and apply to series.\n this is a \"blueprint\" transformer, state does not change when `fit` is called\n passthrough : bool, default=False\n Whether to apply the given transformer or to just\n passthrough the data (identity transformation). If, True the transformer\n is not applied and the OptionalPassthrough uses the identity\n transformation.\n\n Attributes\n ----------\n transformer_: transformer,\n this clone is fitted when `fit` is called and provides `transform` and inverse\n if passthrough = False, a clone of `transformer`passed\n if passthrough = True, the identity transformer `Id`\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.naive import NaiveForecaster\n >>> from sktime.transformations.compose import OptionalPassthrough\n >>> from sktime.transformations.series.detrend import Deseasonalizer\n >>> from sktime.transformations.series.adapt import TabularToSeriesAdaptor\n >>> from sktime.forecasting.compose import TransformedTargetForecaster\n >>> from sktime.forecasting.model_selection import (\n ... ForecastingGridSearchCV,\n ... SlidingWindowSplitter)\n >>> from sklearn.preprocessing import StandardScaler\n >>> # create pipeline\n >>> pipe = TransformedTargetForecaster(steps=[\n ... (\"deseasonalizer\", OptionalPassthrough(Deseasonalizer())),\n ... (\"scaler\", OptionalPassthrough(TabularToSeriesAdaptor(StandardScaler()))),\n ... (\"forecaster\", NaiveForecaster())]) # doctest: +SKIP\n >>> # putting it all together in a grid search\n >>> cv = SlidingWindowSplitter(\n ... initial_window=60,\n ... window_length=24,\n ... start_with_window=True,\n ... step_length=48) # doctest: +SKIP\n >>> param_grid = {\n ... \"deseasonalizer__passthrough\" : [True, False],\n ... \"scaler__transformer__transformer__with_mean\": [True, False],\n ... \"scaler__passthrough\" : [True, False],\n ... \"forecaster__strategy\": [\"drift\", \"mean\", \"last\"]} # doctest: +SKIP\n >>> gscv = ForecastingGridSearchCV(\n ... forecaster=pipe,\n ... param_grid=param_grid,\n ... cv=cv,\n ... n_jobs=-1) # doctest: +SKIP\n >>> gscv_fitted = gscv.fit(load_airline()) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Series\",\n # what scitype is returned: Primitives, Series, Panel\n \"scitype:instancewise\": True, # is this an instance-wise transform?\n \"X_inner_mtype\": CORE_MTYPES,\n # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for y?\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"capability:inverse_transform\": True,\n }\n\n def __init__(self, transformer, passthrough=False):\n self.transformer = transformer\n self.passthrough = passthrough\n\n super().__init__()\n\n # should be all tags, but not fit_is_empty\n # (_fit should not be skipped)\n tags_to_clone = [\n \"scitype:transform-input\",\n \"scitype:transform-output\",\n \"scitype:instancewise\",\n \"y_inner_mtype\",\n \"capability:inverse_transform\",\n \"handles-missing-data\",\n \"X-y-must-have-same-index\",\n \"transform-returns-same-time-index\",\n \"skip-inverse-transform\",\n ]\n self.clone_tags(transformer, tag_names=tags_to_clone)\n\n if passthrough:\n self.transformer_ = Id()\n else:\n self.transformer_ = transformer.clone()\n\n # attribute for _DelegatedTransformer, which then delegates\n # all non-overridden methods are same as of getattr(self, _delegate_name)\n # see further details in _DelegatedTransformer docstring\n _delegate_name = \"transformer_\"\n\n @classmethod\n def METHOD_NAME(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`\n \"\"\"\n from sktime.transformations.series.boxcox import BoxCoxTransformer\n\n return {\"transformer\": BoxCoxTransformer(), \"passthrough\": False}"}}},{"rowIdx":2048,"cells":{"id":{"kind":"number","value":2048,"string":"2,048"},"label":{"kind":"string","value":"name"},"text":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by pulumi. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom .. import _utilities\nfrom . import outputs\n\n__all__ = [\n 'GetSqlPoolVulnerabilityAssessmentRuleBaselineResult',\n 'AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult',\n 'get_sql_pool_vulnerability_assessment_rule_baseline',\n 'get_sql_pool_vulnerability_assessment_rule_baseline_output',\n]\n\n@pulumi.output_type\nclass GetSqlPoolVulnerabilityAssessmentRuleBaselineResult:\n \"\"\"\n A Sql pool vulnerability assessment rule baseline.\n \"\"\"\n def __init__(__self__, baseline_results=None, id=None, METHOD_NAME=None, type=None):\n if baseline_results and not isinstance(baseline_results, list):\n raise TypeError(\"Expected argument 'baseline_results' to be a list\")\n pulumi.set(__self__, \"baseline_results\", baseline_results)\n if id and not isinstance(id, str):\n raise TypeError(\"Expected argument 'id' to be a str\")\n pulumi.set(__self__, \"id\", id)\n if METHOD_NAME and not isinstance(METHOD_NAME, str):\n raise TypeError(\"Expected argument 'name' to be a str\")\n pulumi.set(__self__, \"name\", METHOD_NAME)\n if type and not isinstance(type, str):\n raise TypeError(\"Expected argument 'type' to be a str\")\n pulumi.set(__self__, \"type\", type)\n\n @property\n @pulumi.getter(METHOD_NAME=\"baselineResults\")\n def baseline_results(self) -> Sequence['outputs.SqlPoolVulnerabilityAssessmentRuleBaselineItemResponse']:\n \"\"\"\n The rule baseline result\n \"\"\"\n return pulumi.get(self, \"baseline_results\")\n\n @property\n @pulumi.getter\n def id(self) -> str:\n \"\"\"\n Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}\n \"\"\"\n return pulumi.get(self, \"id\")\n\n @property\n @pulumi.getter\n def METHOD_NAME(self) -> str:\n \"\"\"\n The name of the resource\n \"\"\"\n return pulumi.get(self, \"name\")\n\n @property\n @pulumi.getter\n def type(self) -> str:\n \"\"\"\n The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or \"Microsoft.Storage/storageAccounts\"\n \"\"\"\n return pulumi.get(self, \"type\")\n\n\nclass AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(GetSqlPoolVulnerabilityAssessmentRuleBaselineResult):\n # pylint: disable=using-constant-test\n def __await__(self):\n if False:\n yield self\n return GetSqlPoolVulnerabilityAssessmentRuleBaselineResult(\n baseline_results=self.baseline_results,\n id=self.id,\n METHOD_NAME=self.METHOD_NAME,\n type=self.type)\n\n\ndef get_sql_pool_vulnerability_assessment_rule_baseline(baseline_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n rule_id: Optional[str] = None,\n sql_pool_name: Optional[str] = None,\n vulnerability_assessment_name: Optional[str] = None,\n workspace_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult:\n \"\"\"\n Gets a SqlPool's vulnerability assessment rule baseline.\n Azure REST API version: 2021-06-01.\n\n\n :param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).\n :param str resource_group_name: The name of the resource group. The name is case insensitive.\n :param str rule_id: The vulnerability assessment rule ID.\n :param str sql_pool_name: SQL pool name\n :param str vulnerability_assessment_name: The name of the vulnerability assessment.\n :param str workspace_name: The name of the workspace.\n \"\"\"\n __args__ = dict()\n __args__['baselineName'] = baseline_name\n __args__['resourceGroupName'] = resource_group_name\n __args__['ruleId'] = rule_id\n __args__['sqlPoolName'] = sql_pool_name\n __args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name\n __args__['workspaceName'] = workspace_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:synapse:getSqlPoolVulnerabilityAssessmentRuleBaseline', __args__, opts=opts, typ=GetSqlPoolVulnerabilityAssessmentRuleBaselineResult).value\n\n return AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(\n baseline_results=pulumi.get(__ret__, 'baseline_results'),\n id=pulumi.get(__ret__, 'id'),\n METHOD_NAME=pulumi.get(__ret__, 'name'),\n type=pulumi.get(__ret__, 'type'))\n\n\n@_utilities.lift_output_func(get_sql_pool_vulnerability_assessment_rule_baseline)\ndef get_sql_pool_vulnerability_assessment_rule_baseline_output(baseline_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n rule_id: Optional[pulumi.Input[str]] = None,\n sql_pool_name: Optional[pulumi.Input[str]] = None,\n vulnerability_assessment_name: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlPoolVulnerabilityAssessmentRuleBaselineResult]:\n \"\"\"\n Gets a SqlPool's vulnerability assessment rule baseline.\n Azure REST API version: 2021-06-01.\n\n\n :param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule).\n :param str resource_group_name: The name of the resource group. The name is case insensitive.\n :param str rule_id: The vulnerability assessment rule ID.\n :param str sql_pool_name: SQL pool name\n :param str vulnerability_assessment_name: The name of the vulnerability assessment.\n :param str workspace_name: The name of the workspace.\n \"\"\"\n ..."}}},{"rowIdx":2049,"cells":{"id":{"kind":"number","value":2049,"string":"2,049"},"label":{"kind":"string","value":"norm"},"text":{"kind":"string","value":"# Test the Unicode versions of normal file functions\n# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir\nimport sys, os, unittest\nfrom unicodedata import normalize\nfrom test import test_support\n\nfilenames = [\n '1_abc',\n u'2_ascii',\n u'3_Gr\\xfc\\xdf-Gott',\n u'4_\\u0393\\u03b5\\u03b9\\u03ac-\\u03c3\\u03b1\\u03c2',\n u'5_\\u0417\\u0434\\u0440\\u0430\\u0432\\u0441\\u0442\\u0432\\u0443\\u0439\\u0442\\u0435',\n u'6_\\u306b\\u307d\\u3093',\n u'7_\\u05d4\\u05e9\\u05e7\\u05e6\\u05e5\\u05e1',\n u'8_\\u66e8\\u66e9\\u66eb',\n u'9_\\u66e8\\u05e9\\u3093\\u0434\\u0393\\xdf',\n # Specific code points: fn, NFC(fn) and NFKC(fn) all differents\n u'10_\\u1fee\\u1ffd',\n ]\n\n# Mac OS X decomposes Unicode names, using Normal Form D.\n# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html\n# \"However, most volume formats do not follow the exact specification for\n# these normal forms. For example, HFS Plus uses a variant of Normal Form D\n# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through\n# U+2FAFF are not decomposed.\"\nif sys.platform != 'darwin':\n filenames.extend([\n # Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents\n u'11_\\u0385\\u03d3\\u03d4',\n u'12_\\u00a8\\u0301\\u03d2\\u0301\\u03d2\\u0308', # == NFD(u'\\u0385\\u03d3\\u03d4')\n u'13_\\u0020\\u0308\\u0301\\u038e\\u03ab', # == NFKC(u'\\u0385\\u03d3\\u03d4')\n u'14_\\u1e9b\\u1fc1\\u1fcd\\u1fce\\u1fcf\\u1fdd\\u1fde\\u1fdf\\u1fed',\n\n # Specific code points: fn, NFC(fn) and NFKC(fn) all differents\n u'15_\\u1fee\\u1ffd\\ufad1',\n u'16_\\u2000\\u2000\\u2000A',\n u'17_\\u2001\\u2001\\u2001A',\n u'18_\\u2003\\u2003\\u2003A', # == NFC(u'\\u2001\\u2001\\u2001A')\n u'19_\\u0020\\u0020\\u0020A', # u'\\u0020' == u' ' == NFKC(u'\\u2000') ==\n # NFKC(u'\\u2001') == NFKC(u'\\u2003')\n])\n\n\n# Is it Unicode-friendly?\nif not os.path.supports_unicode_filenames:\n fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n try:\n for name in filenames:\n name.encode(fsencoding)\n except UnicodeEncodeError:\n raise unittest.SkipTest(\"only NT+ and systems with \"\n \"Unicode-friendly filesystem encoding\")\n\n\n# Destroy directory dirname and all files under it, to one level.\ndef deltree(dirname):\n # Don't hide legitimate errors: if one of these suckers exists, it's\n # an error if we can't remove it.\n if os.path.exists(dirname):\n # must pass unicode to os.listdir() so we get back unicode results.\n for fname in os.listdir(unicode(dirname)):\n os.unlink(os.path.join(dirname, fname))\n os.rmdir(dirname)\n\n\nclass UnicodeFileTests(unittest.TestCase):\n files = set(filenames)\n normal_form = None\n\n def setUp(self):\n try:\n os.mkdir(test_support.TESTFN)\n except OSError:\n pass\n files = set()\n for name in self.files:\n name = os.path.join(test_support.TESTFN, self.METHOD_NAME(name))\n with open(name, 'w') as f:\n f.write((name+'\\n').encode(\"utf-8\"))\n os.stat(name)\n files.add(name)\n self.files = files\n\n def tearDown(self):\n deltree(test_support.TESTFN)\n\n def METHOD_NAME(self, s):\n if self.normal_form and isinstance(s, unicode):\n return normalize(self.normal_form, s)\n return s\n\n def _apply_failure(self, fn, filename, expected_exception,\n check_fn_in_exception = True):\n with self.assertRaises(expected_exception) as c:\n fn(filename)\n exc_filename = c.exception.filename\n # the \"filename\" exception attribute may be encoded\n if isinstance(exc_filename, str):\n filename = filename.encode(sys.getfilesystemencoding())\n if check_fn_in_exception:\n self.assertEqual(exc_filename, filename, \"Function '%s(%r) failed \"\n \"with bad filename in the exception: %r\" %\n (fn.__name__, filename, exc_filename))\n\n def test_failures(self):\n # Pass non-existing Unicode filenames all over the place.\n for name in self.files:\n name = \"not_\" + name\n self._apply_failure(open, name, IOError)\n self._apply_failure(os.stat, name, OSError)\n self._apply_failure(os.chdir, name, OSError)\n self._apply_failure(os.rmdir, name, OSError)\n self._apply_failure(os.remove, name, OSError)\n # listdir may append a wildcard to the filename, so dont check\n self._apply_failure(os.listdir, name, OSError, False)\n\n def test_open(self):\n for name in self.files:\n f = open(name, 'w')\n f.write((name+'\\n').encode(\"utf-8\"))\n f.close()\n os.stat(name)\n\n # Skip the test on darwin, because darwin does normalize the filename to\n # NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,\n # NFKD in Python is useless, because darwin will normalize it later and so\n # open(), os.stat(), etc. don't raise any exception.\n @unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')\n def test_normalize(self):\n files = set(f for f in self.files if isinstance(f, unicode))\n others = set()\n for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):\n others |= set(normalize(nf, file) for file in files)\n others -= files\n for name in others:\n self._apply_failure(open, name, IOError)\n self._apply_failure(os.stat, name, OSError)\n self._apply_failure(os.chdir, name, OSError)\n self._apply_failure(os.rmdir, name, OSError)\n self._apply_failure(os.remove, name, OSError)\n # listdir may append a wildcard to the filename, so dont check\n self._apply_failure(os.listdir, name, OSError, False)\n\n # Skip the test on darwin, because darwin uses a normalization different\n # than Python NFD normalization: filenames are different even if we use\n # Python NFD normalization.\n @unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')\n def test_listdir(self):\n sf0 = set(self.files)\n f1 = os.listdir(test_support.TESTFN)\n f2 = os.listdir(unicode(test_support.TESTFN,\n sys.getfilesystemencoding()))\n sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2)\n self.assertEqual(sf0, sf2)\n self.assertEqual(len(f1), len(f2))\n\n def test_rename(self):\n for name in self.files:\n os.rename(name, \"tmp\")\n os.rename(\"tmp\", name)\n\n def test_directory(self):\n dirname = os.path.join(test_support.TESTFN,\n u'Gr\\xfc\\xdf-\\u66e8\\u66e9\\u66eb')\n filename = u'\\xdf-\\u66e8\\u66e9\\u66eb'\n oldwd = os.getcwd()\n os.mkdir(dirname)\n os.chdir(dirname)\n try:\n with open(filename, 'w') as f:\n f.write((filename + '\\n').encode(\"utf-8\"))\n os.access(filename,os.R_OK)\n os.remove(filename)\n finally:\n os.chdir(oldwd)\n os.rmdir(dirname)\n\n\nclass UnicodeNFCFileTests(UnicodeFileTests):\n normal_form = 'NFC'\n\n\nclass UnicodeNFDFileTests(UnicodeFileTests):\n normal_form = 'NFD'\n\n\nclass UnicodeNFKCFileTests(UnicodeFileTests):\n normal_form = 'NFKC'\n\n\nclass UnicodeNFKDFileTests(UnicodeFileTests):\n normal_form = 'NFKD'\n\n\ndef test_main():\n try:\n test_support.run_unittest(\n UnicodeFileTests,\n UnicodeNFCFileTests,\n UnicodeNFDFileTests,\n UnicodeNFKCFileTests,\n UnicodeNFKDFileTests,\n )\n finally:\n deltree(test_support.TESTFN)\n\n\nif __name__ == \"__main__\":\n test_main()"}}},{"rowIdx":2050,"cells":{"id":{"kind":"number","value":2050,"string":"2,050"},"label":{"kind":"string","value":"patch internal"},"text":{"kind":"string","value":"# engine params\nfrom typing import Any, Callable, Dict, Optional, Sequence, Union, cast\n\nfrom .argument_config import get_argument_config_value\nfrom .config_file_config import get_config_dict_from_config_file\nfrom .default_config import get_default_config_value\nfrom .environment_config import get_environment_config_value\nfrom .keys import ALL_KEYS, KEYS, ConfigDict\nfrom .system_config import get_system_config_value\n\n\ndef chain_getters(\n getters: Sequence[Callable[[str], Optional[str]]],\n key: str,\n default_return: Optional[str] = None,\n) -> Optional[str]:\n for getter in getters:\n result = getter(key)\n if result is not None:\n return result\n return default_return\n\n\ndef lazy_get_config_value(\n key: str, default_return: Optional[str] = None\n) -> Optional[Union[str, Dict[str, Dict[str, str]]]]:\n \"\"\"\n Get the config value for a key in the following precedence\n Otherwise return default_return\n \"\"\"\n\n if key not in ALL_KEYS:\n # For sections which can't be overridden via envvars/arguments,\n # we only use default values\n return chain_getters([get_default_config_value], key, default_return)\n\n return chain_getters(\n [\n get_argument_config_value,\n get_environment_config_value,\n get_system_config_value,\n get_default_config_value,\n ],\n key,\n default_return,\n )\n\n\ndef update_config_dict_from_arguments(config_dict: ConfigDict) -> ConfigDict:\n \"\"\"\n Given an existing config_dict, update after reading sys.argv\n and overwriting any keys.\n\n Return updated copy of config_dict.\n \"\"\"\n argument_config_dict = {\n k: get_argument_config_value(k, None)\n for k in KEYS\n if get_argument_config_value(k) is not None\n }\n new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict))\n return new_config_dict\n\n\ndef update_config_dict_from_env_vars(config_dict: ConfigDict) -> ConfigDict:\n \"\"\"\n Given an existing config_dict, update after reading os.environ\n and overwriting any keys.\n\n Return updated copy of config_dict.\n \"\"\"\n\n argument_config_dict = {\n k: get_environment_config_value(k, None)\n for k in KEYS\n if get_environment_config_value(k) is not None\n }\n new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict))\n\n return new_config_dict\n\n\ndef update_config_dict_from_file(config_dict: ConfigDict, sg_config_file: str) -> ConfigDict:\n \"\"\"\n Given an existing config_dict, update after reading sg_config_file\n and overwriting any keys according to the rules in config_file_config\n\n Return updated copy of config_dict.\n \"\"\"\n\n config_file_dict = get_config_dict_from_config_file(sg_config_file)\n new_config_dict = patch_config(config_dict, config_file_dict)\n\n return new_config_dict\n\n\ndef create_config_dict() -> ConfigDict:\n \"\"\"\n Create and return a dict of all known config values\n \"\"\"\n initial_dict = {k: lazy_get_config_value(k) for k in ALL_KEYS}\n config_dict = cast(ConfigDict, {k: v for k, v in initial_dict.items() if v is not None})\n try:\n sg_config_file = get_singleton(config_dict, \"SG_CONFIG_FILE\")\n config_dict = update_config_dict_from_file(config_dict, sg_config_file)\n except KeyError:\n pass\n config_dict = update_config_dict_from_env_vars(config_dict)\n config_dict = update_config_dict_from_arguments(config_dict)\n\n return config_dict\n\n\ndef patch_config(config: ConfigDict, patch: ConfigDict) -> ConfigDict:\n \"\"\"\n Recursively updates a nested configuration dictionary:\n\n patch_config(\n {\"key_1\": \"value_1\",\n \"dict_1\": {\"key_1\": \"value_1\"}},\n {\"key_1\": \"value_2\",\n \"dict_1\": {\"key_2\": \"value_2\"}}) == \\\n {\"key_1\": \"value_2\",\n \"dict_1\": {\"key_1\": \"value_1\", \"key_2\": \"value_2\"}}\n\n :param config: Config dictionary\n :param patch: Dictionary with the path\n :return: New patched dictionary\n \"\"\"\n\n def METHOD_NAME(left: Dict[str, Any], right: Dict[str, Any]) -> Dict[str, Any]:\n result = left.copy()\n for key, value in right.items():\n if key in left and isinstance(left[key], dict) and isinstance(value, dict):\n result[key] = METHOD_NAME(left[key], value)\n else:\n result[key] = value\n return result\n\n return METHOD_NAME(config, patch)\n\n\ndef get_singleton(config: ConfigDict, item: str) -> str:\n \"\"\"Return a singleton (not a section) variable from the config.\"\"\"\n return str(config[item])\n\n\ndef get_all_in_section(config: ConfigDict, section: str) -> Dict[str, Union[str, Dict[str, str]]]:\n \"\"\"\n Get all subsections from a config (e.g. config[\"data_sources\"])\n \"\"\"\n result: Dict[str, Union[str, Dict[str, str]]] = cast(\n Dict[str, Union[str, Dict[str, str]]], config.get(section, {})\n )\n assert isinstance(result, dict)\n return result\n\n\ndef get_all_in_subsection(config: ConfigDict, section: str, subsection: str) -> Dict[str, str]:\n section_dict = get_all_in_section(config, section)\n subsection_dict: Dict[str, str] = cast(Dict[str, str], section_dict.get(subsection, {}))\n assert isinstance(subsection_dict, dict)\n return subsection_dict\n\n\ndef get_from_subsection(config: ConfigDict, section: str, subsection: str, item: str) -> str:\n \"\"\"Return a singleton variable from a subsection of the config,\n e.g. config[\"remotes\"][\"data.splitgraph.com\"][\"SG_ENGINE_HOST\"]\"\"\"\n subsection_dict = get_all_in_subsection(config, section, subsection)\n return subsection_dict[item]\n\n\ndef get_from_section(config: ConfigDict, section: str, item: str) -> str:\n section_dict = get_all_in_section(config, section)\n assert isinstance(section_dict, dict)\n return cast(str, section_dict[item])\n\n\ndef set_in_subsection(\n config: ConfigDict, section: str, subsection: str, item: str, value: str\n) -> None:\n \"\"\"Set a singleton variable in a subsection of the config,\n e.g. config[\"remotes\"][\"data.splitgraph.com\"][\"SG_ENGINE_HOST\"]\"\"\"\n subsection_dict = get_all_in_subsection(config, section, subsection)\n subsection_dict[item] = value"}}},{"rowIdx":2051,"cells":{"id":{"kind":"number","value":2051,"string":"2,051"},"label":{"kind":"string","value":"test resampling to numpy img 1"},"text":{"kind":"string","value":"import numpy as np\nimport unittest\n\nfrom grass.gunittest.case import TestCase\nfrom grass.gunittest.main import test\n\nfrom grass.pygrass.raster import raster2numpy_img\nfrom grass.pygrass.gis.region import Region\nfrom grass.script.core import tempfile\n\nhas_PyQt4 = False\ntry:\n from PyQt4.QtCore import *\n from PyQt4.QtGui import *\n\n has_PyQt4 = True\nexcept:\n pass\n\n\nclass RasterRowImgTestCase(TestCase):\n name = \"RasterRowImgTestCase_map\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Create test raster map and region\"\"\"\n cls.use_temp_region()\n cls.runModule(\"g.region\", n=60, s=0, e=40, w=0, res=0.1)\n cls.runModule(\n \"r.mapcalc\",\n expression=\"%s = if(row() >= 10 && row() <= 60, null(), row() + (10.0 * col()))\"\n % (cls.name),\n overwrite=True,\n )\n cls.runModule(\"r.colors\", map=cls.name, color=\"elevation\")\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Remove the generated vector map, if exist\"\"\"\n cls.runModule(\"g.remove\", flags=\"f\", type=\"raster\", name=cls.name)\n cls.del_temp_region()\n\n @unittest.skipIf(has_PyQt4 is False, \"Require PyQt4\")\n def test_resampling_to_QImg_1(self):\n region = Region()\n region.from_rast(self.name)\n region.cols = 320\n region.rows = 240\n region.adjust()\n\n tmpfile = tempfile(False)\n tmpfile = tmpfile + \".png\"\n\n a = raster2numpy_img(self.name, region)\n\n image = QImage(a.data, region.cols, region.rows, QImage.Format_ARGB32)\n # image.save(\"data/a.png\")\n image.save(tmpfile)\n self.assertFilesEqualMd5(tmpfile, \"data/a.png\")\n\n @unittest.skipIf(has_PyQt4 is False, \"Require PyQt4\")\n def test_resampling_to_QImg_2(self):\n region = Region()\n region.from_rast(self.name)\n region.cols = 640\n region.rows = 480\n region.adjust()\n\n tmpfile = tempfile(False)\n tmpfile = tmpfile + \".png\"\n\n # With array as argument\n array = np.ndarray((region.rows * region.cols * 4), np.uint8)\n\n raster2numpy_img(rastname=self.name, region=region, color=\"ARGB\", array=array)\n\n image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32)\n # image.save(\"data/b.png\")\n image.save(tmpfile)\n self.assertFilesEqualMd5(tmpfile, \"data/b.png\")\n\n @unittest.skipIf(has_PyQt4 is False, \"Require PyQt4\")\n def test_resampling_to_QImg_large(self):\n region = Region()\n region.from_rast(self.name)\n region.cols = 4000\n region.rows = 3000\n region.adjust()\n\n tmpfile = tempfile(False)\n tmpfile = tmpfile + \".png\"\n\n # With array as argument\n array = np.ndarray((region.rows * region.cols * 4), np.uint8)\n\n raster2numpy_img(rastname=self.name, region=region, color=\"ARGB\", array=array)\n\n image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32)\n # image.save(\"data/c.png\")\n image.save(tmpfile)\n self.assertFilesEqualMd5(tmpfile, \"data/c.png\")\n\n @unittest.skipIf(has_PyQt4 is False, \"Require PyQt4\")\n def test_resampling_to_QImg_3(self):\n region = Region()\n region.from_rast(self.name)\n region.cols = 400\n region.rows = 300\n region.adjust()\n\n tmpfile = tempfile(False)\n tmpfile = tmpfile + \".png\"\n\n # With array as argument\n array = np.ndarray((region.rows * region.cols * 4), np.uint8)\n\n raster2numpy_img(rastname=self.name, region=region, color=\"RGB\", array=array)\n\n image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32)\n # image.save(\"data/d.png\")\n image.save(tmpfile)\n self.assertFilesEqualMd5(tmpfile, \"data/d.png\")\n\n @unittest.skipIf(has_PyQt4 is False, \"Require PyQt4\")\n def test_resampling_to_QImg_4(self):\n region = Region()\n region.from_rast(self.name)\n region.cols = 400\n region.rows = 300\n region.adjust()\n\n tmpfile = tempfile(False)\n tmpfile = tmpfile + \".png\"\n\n array = raster2numpy_img(rastname=self.name, region=region, color=\"RGB\")\n\n image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32)\n # image.save(\"data/e.png\")\n image.save(tmpfile)\n self.assertFilesEqualMd5(tmpfile, \"data/e.png\")\n\n def METHOD_NAME(self):\n region = Region()\n region.ewres = 10\n region.nsres = 10\n region.adjust(rows=True, cols=True)\n\n a = raster2numpy_img(self.name, region)\n\n self.assertEqual(len(a), region.rows * region.cols * 4)\n\n def test_resampling_to_numpy_img_2(self):\n region = Region()\n region.ewres = 1\n region.nsres = 1\n region.adjust(rows=True, cols=True)\n\n a = raster2numpy_img(self.name, region)\n\n self.assertEqual(len(a), region.rows * region.cols * 4)\n\n def test_resampling_to_numpy_img_3(self):\n region = Region()\n region.ewres = 0.4\n region.nsres = 0.4\n region.adjust(rows=True, cols=True)\n\n a = raster2numpy_img(self.name, region, color=\"GRAY1\")\n\n self.assertEqual(len(a), region.rows * region.cols * 1)\n\n def test_resampling_to_numpy_img_4(self):\n region = Region()\n region.ewres = 0.1\n region.nsres = 0.1\n region.adjust(rows=True, cols=True)\n\n a = raster2numpy_img(self.name, region, color=\"GRAY2\")\n\n self.assertEqual(len(a), region.rows * region.cols * 1)\n\n\nif __name__ == \"__main__\":\n test()"}}},{"rowIdx":2052,"cells":{"id":{"kind":"number","value":2052,"string":"2,052"},"label":{"kind":"string","value":"on server init complete"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\" *==LICENSE==*\n\nCyanWorlds.com Engine - MMOG client, server and tools\nCopyright (C) 2011 Cyan Worlds, Inc.\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\nAdditional permissions under GNU GPL version 3 section 7\n\nIf you modify this Program, or any covered work, by linking or\ncombining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,\nNVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent\nJPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK\n(or a modified version of those libraries),\ncontaining parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,\nPhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG\nJPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the\nlicensors of this Program grant you additional\npermission to convey the resulting work. Corresponding Source for a\nnon-source form of such a combination shall include the source code for\nthe parts of OpenSSL and IJG JPEG Library used as well as that of the covered\nwork.\n\nYou can contact Cyan Worlds, Inc. by email legal@cyan.com\n or by snail mail at:\n Cyan Worlds, Inc.\n 14617 N Newport Hwy\n Mead, WA 99021\n\n *==LICENSE==* \"\"\"\n\"\"\"\nModule: Neighborhood02.py\nAge: Neighborhood02\nDate: January 2004\nevent manager hooks for Neighborhood02, aka Kirel\n\"\"\"\n\nfrom Plasma import *\nfrom PlasmaTypes import *\nfrom PlasmaKITypes import *\nimport time\n\nclass Neighborhood02(ptResponder):\n\n def __init__(self):\n ptResponder.__init__(self)\n self.id = 5700\n self.version = 1\n\n def OnFirstUpdate(self):\n pass\n def OnNotify(self,state,id,events):\n pass\n\n def METHOD_NAME(self):\n self.UpdateRecentVisitors()\n\n def UpdateRecentVisitors(self):\n try:\n AmCCR = ptCCRMgr().getLevel()\n except:\n AmCCR = 0\n if not AmCCR:\n # add player to recent players list\n deviceNode = None\n deviceInbox = None\n playerlist = None\n \n # find the device\n avault = ptAgeVault()\n adevicesfolder = avault.getAgeDevicesFolder()\n adevices = adevicesfolder.getChildNodeRefList()\n for device in adevices:\n device = device.getChild()\n devicetn = device.upcastToTextNoteNode()\n if devicetn and devicetn.getTitle() == \"D'ni Imager Right\":\n deviceNode = devicetn\n break\n\n # if we have the device then find the inbox\n if deviceNode:\n inboxes = deviceNode.getChildNodeRefList()\n for inbox in inboxes:\n inbox = inbox.getChild()\n inboxfolder = inbox.upcastToFolderNode()\n if inboxfolder:\n deviceInbox = inboxfolder\n break\n\n # if we have the inbox then look for the heek score note\n if deviceInbox:\n items = deviceInbox.getChildNodeRefList()\n for item in items:\n item = item.getChild()\n itemtn = item.upcastToTextNoteNode()\n if itemtn:\n if itemtn.getTitle() == \"Visitors, Visiteurs, Besucher\":\n playerlist = itemtn\n break\n elif itemtn.getTitle() == \"Most Recent Visitors\":\n itemtn.setTitle(\"Visitors, Visiteurs, Besucher\")\n playerlist = itemtn\n break\n\n # if we have the text note then update it, otherwise create it\n if playerlist:\n currenttime = time.gmtime(PtGetDniTime())\n currenttimestr = time.strftime(\"%m/%d/%Y %I:%M %p\", currenttime)\n playername = PtGetLocalPlayer().getPlayerName()\n thetext = playerlist.getText()\n if (thetext.count(\"\\n\") + 1) > 15:\n thetext = thetext[:thetext.rfind(\"\\n\")]\n thetext = currenttimestr + (\" \" * (30 - len(currenttimestr))) + playername + \"\\n\" + thetext\n playerlist.setText(thetext)\n playerlist.save()\n else:\n currenttime = time.gmtime(PtGetDniTime())\n currenttimestr = time.strftime(\"%m/%d/%Y %I:%M %p\", currenttime)\n playername = PtGetLocalPlayer().getPlayerName()\n thetext = currenttimestr + (\" \" * (30 - len(currenttimestr))) + playername\n \n playerlist = ptVaultTextNoteNode(0)\n playerlist.setTitle(\"Visitors, Visiteurs, Besucher\")\n playerlist.setText(thetext)\n deviceInbox.addNode(playerlist"}}},{"rowIdx":2053,"cells":{"id":{"kind":"number","value":2053,"string":"2,053"},"label":{"kind":"string","value":"add rule"},"text":{"kind":"string","value":"import re\nfrom collections.abc import Mapping\n\nELEMENT_SELECTOR = re.compile(r\"^([\\w-]+)$\")\nELEMENT_WITH_ATTR_SELECTOR = re.compile(r\"^([\\w-]+)\\[([\\w-]+)\\]$\")\nELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR = re.compile(\n r\"^([\\w-]+)\\[([\\w-]+)='(.*)'\\]$\"\n)\nELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR = re.compile(\n r'^([\\w-]+)\\[([\\w-]+)=\"(.*)\"\\]$'\n)\nELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR = re.compile(\n r\"^([\\w-]+)\\[([\\w-]+)=([\\w-]+)\\]$\"\n)\n\n\nclass HTMLRuleset:\n \"\"\"\n Maintains a set of rules for matching HTML elements.\n Each rule defines a mapping from a CSS-like selector to an arbitrary result object.\n\n The following forms of rule are currently supported:\n 'a' = matches any element\n 'a[href]' = matches any element with an 'href' attribute\n 'a[linktype=\"page\"]' = matches any element with a 'linktype' attribute equal to 'page'\n \"\"\"\n\n def __init__(self, rules=None):\n # mapping of element name to a sorted list of (precedence, attr_check, result) tuples\n # where attr_check is a callable that takes an attr dict and returns True if they match\n self.element_rules = {}\n\n if rules:\n self.add_rules(rules)\n\n def add_rules(self, rules):\n # accepts either a dict of {selector: result}, or a list of (selector, result) tuples\n if isinstance(rules, Mapping):\n rules = rules.items()\n\n for selector, result in rules:\n self.METHOD_NAME(selector, result)\n\n def _add_element_rule(self, name, result):\n # add a rule that matches on any element with name `name`\n rules = self.element_rules.setdefault(name, [])\n # element-only rules have priority 2 (lower)\n rules.append((2, (lambda attrs: True), result))\n # sort list on priority\n rules.sort(key=lambda t: t[0])\n\n def _add_element_with_attr_rule(self, name, attr, result):\n # add a rule that matches any element with name `name` which has the attribute `attr`\n rules = self.element_rules.setdefault(name, [])\n # element-and-attr rules have priority 1 (higher)\n rules.append((1, (lambda attrs: attr in attrs), result))\n # sort list on priority\n rules.sort(key=lambda t: t[0])\n\n def _add_element_with_attr_exact_rule(self, name, attr, value, result):\n # add a rule that matches any element with name `name` which has an\n # attribute `attr` equal to `value`\n rules = self.element_rules.setdefault(name, [])\n # element-and-attr rules have priority 1 (higher)\n rules.append(\n (1, (lambda attrs: attr in attrs and attrs[attr] == value), result)\n )\n # sort list on priority\n rules.sort(key=lambda t: t[0])\n\n def METHOD_NAME(self, selector, result):\n match = ELEMENT_SELECTOR.match(selector)\n if match:\n name = match.group(1)\n self._add_element_rule(name, result)\n return\n\n match = ELEMENT_WITH_ATTR_SELECTOR.match(selector)\n if match:\n name, attr = match.groups()\n self._add_element_with_attr_rule(name, attr, result)\n return\n\n for regex in (\n ELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR,\n ELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR,\n ELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR,\n ):\n match = regex.match(selector)\n if match:\n name, attr, value = match.groups()\n self._add_element_with_attr_exact_rule(name, attr, value, result)\n return\n\n def match(self, name, attrs):\n \"\"\"\n Look for a rule matching an HTML element with the given name and attribute dict,\n and return the corresponding result object. If no rule matches, return None.\n If multiple rules match, the one chosen is undetermined.\n \"\"\"\n try:\n rules_to_test = self.element_rules[name]\n except KeyError:\n return None\n\n for precedence, attr_check, result in rules_to_test:\n if attr_check(attrs):\n return result"}}},{"rowIdx":2054,"cells":{"id":{"kind":"number","value":2054,"string":"2,054"},"label":{"kind":"string","value":"test no owner"},"text":{"kind":"string","value":"# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.trial import unittest\n\nfrom buildbot.test.util.config import ConfigErrorsMixin\nfrom buildbot.www.authz import roles\n\n\nclass RolesFromGroups(unittest.TestCase):\n\n def setUp(self):\n self.roles = roles.RolesFromGroups(\"buildbot-\")\n\n def test_noGroups(self):\n ret = self.roles.getRolesFromUser({\"username\": 'homer'})\n self.assertEqual(ret, [])\n\n def test_noBuildbotGroups(self):\n ret = self.roles.getRolesFromUser({\n \"username\": \"homer\",\n \"groups\": [\"employee\"]\n })\n self.assertEqual(ret, [])\n\n def test_someBuildbotGroups(self):\n ret = self.roles.getRolesFromUser({\n \"username\": \"homer\",\n \"groups\": [\"employee\", \"buildbot-maintainer\", \"buildbot-admin\"]\n })\n self.assertEqual(ret, [\"maintainer\", \"admin\"])\n\n\nclass RolesFromEmails(unittest.TestCase):\n\n def setUp(self):\n self.roles = roles.RolesFromEmails(\n employee=[\"homer@plant.com\", \"burns@plant.com\"], boss=[\"burns@plant.com\"])\n\n def test_noUser(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'lisa',\n \"email\": 'lisa@school.com'\n })\n self.assertEqual(ret, [])\n\n def test_User1(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'homer',\n \"email\": 'homer@plant.com'\n })\n self.assertEqual(ret, [\"employee\"])\n\n def test_User2(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'burns',\n \"email\": 'burns@plant.com'\n })\n self.assertEqual(sorted(ret), [\"boss\", \"employee\"])\n\n\nclass RolesFromOwner(unittest.TestCase):\n\n def setUp(self):\n self.roles = roles.RolesFromOwner(\"ownerofbuild\")\n\n def METHOD_NAME(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'lisa',\n \"email\": 'lisa@school.com'\n }, None)\n self.assertEqual(ret, [])\n\n def test_notOwner(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'lisa',\n \"email\": 'lisa@school.com'\n }, \"homer@plant.com\")\n self.assertEqual(ret, [])\n\n def test_owner(self):\n ret = self.roles.getRolesFromUser({\n \"username\": 'homer',\n \"email\": 'homer@plant.com'\n }, \"homer@plant.com\")\n self.assertEqual(ret, [\"ownerofbuild\"])\n\n\nclass RolesFromUsername(unittest.TestCase, ConfigErrorsMixin):\n\n def setUp(self):\n self.roles = roles.RolesFromUsername(roles=[\"admins\"], usernames=[\"Admin\"])\n self.roles2 = roles.RolesFromUsername(\n roles=[\"developers\", \"integrators\"], usernames=[\"Alice\", \"Bob\"])\n\n def test_anonymous(self):\n ret = self.roles.getRolesFromUser({\"anonymous\": True})\n self.assertEqual(ret, [])\n\n def test_normalUser(self):\n ret = self.roles.getRolesFromUser({\"username\": 'Alice'})\n self.assertEqual(ret, [])\n\n def test_admin(self):\n ret = self.roles.getRolesFromUser({\"username\": 'Admin'})\n self.assertEqual(ret, [\"admins\"])\n\n def test_multipleGroups(self):\n ret = self.roles2.getRolesFromUser({\"username\": 'Bob'})\n self.assertEqual(ret, [\"developers\", \"integrators\"])\n\n def test_badUsernames(self):\n with self.assertRaisesConfigError('Usernames cannot be None'):\n roles.RolesFromUsername(roles=[], usernames=[None])"}}},{"rowIdx":2055,"cells":{"id":{"kind":"number","value":2055,"string":"2,055"},"label":{"kind":"string","value":"add datum for arg"},"text":{"kind":"string","value":"\"\"\"\nEndpointsHelper\n---------------\n\nThis is support for session endpoints, which are a flagged feature for mobile that also form the basis of smart\nlinks in web apps.\n\nEndpoints define specific locations in the application using a stack, so they rely on similar logic to end of form\nnavigation. The complexity of generating endpoints is all delegated to ``WorkflowHelper``.\n\"\"\"\nfrom corehq.apps.app_manager.suite_xml.contributors import PostProcessor\nfrom corehq.apps.app_manager.suite_xml.post_process.workflow import (\n CommandId,\n WorkflowDatumMeta,\n WorkflowHelper,\n prepend_parent_frame_children,\n)\nfrom corehq.apps.app_manager.suite_xml.xml_models import (\n Argument,\n PushFrame,\n SessionEndpoint,\n Stack,\n StackDatum,\n StackInstanceDatum,\n)\nfrom corehq.util.timer import time_method\n\n\nclass EndpointsHelper(PostProcessor):\n \"\"\"\n Generates \"Session Endpoints\" - user-defined labels for forms or modules.\n They end up as entries in the suite file that declare stack operations\n necessary to navigate to the form or module, as well as what arguments (eg:\n case IDs) must be provided to get there.\n \"\"\"\n\n @time_method()\n def update_suite(self):\n for module in self.modules:\n if module.session_endpoint_id:\n self.suite.endpoints.append(self._make_session_endpoint(module.session_endpoint_id, module))\n if module.case_list_session_endpoint_id:\n self.suite.endpoints.append(self._make_session_endpoint(\n module.case_list_session_endpoint_id, module, None, False))\n if module.module_type != \"shadow\":\n for form in module.get_suite_forms():\n if form.session_endpoint_id:\n self.suite.endpoints.append(self._make_session_endpoint(\n form.session_endpoint_id, module, form))\n elif module.session_endpoint_id:\n for form in module.get_suite_forms():\n endpoint = next(\n (m for m in module.form_session_endpoints if m.form_id == form.unique_id), None)\n if endpoint:\n self.suite.endpoints.append(self._make_session_endpoint(\n endpoint.session_endpoint_id, module, form))\n\n def _make_session_endpoint(self, endpoint_id, module, form=None, should_add_last_selection_datum=True):\n stack = Stack()\n children = self.get_frame_children(module, form)\n argument_ids = self.get_argument_ids(children, form, should_add_last_selection_datum)\n\n # Add a claim request for each endpoint argument.\n # This assumes that all arguments are case ids.\n non_computed_arguments = [\n child for child in children\n if isinstance(child, WorkflowDatumMeta) and child.requires_selection\n and (should_add_last_selection_datum or child != children[-1])\n ]\n for arg in non_computed_arguments:\n self._add_claim_frame(stack, arg, endpoint_id)\n\n # Add a frame to navigate to the endpoint\n frame = PushFrame()\n stack.add_frame(frame)\n for child in children:\n if isinstance(child, CommandId):\n frame.add_command(child.to_command())\n elif child.id in argument_ids:\n self.METHOD_NAME(frame, child)\n\n def get_child(child_id):\n for child in children:\n if child.id == child_id:\n return child\n\n arguments = []\n for arg_id in argument_ids:\n child = get_child(arg_id)\n if child.is_instance:\n arguments.append(Argument(\n id=arg_id,\n instance_id=arg_id,\n instance_src=\"jr://instance/selected-entities\",\n ))\n else:\n arguments.append(Argument(id=arg_id))\n\n return SessionEndpoint(\n id=endpoint_id,\n arguments=arguments,\n stack=stack,\n )\n\n def get_argument_ids(self, frame_children, form=None, should_add_last_selection_datum=True):\n\n def should_include(child, add_selection_datum):\n if not isinstance(child, WorkflowDatumMeta):\n return False\n if child.requires_selection and add_selection_datum:\n return True\n if form:\n return child.id in (form.function_datum_endpoints or [])\n return False\n\n return [\n child.id for child in frame_children\n if should_include(child, should_add_last_selection_datum or child != frame_children[-1])\n ]\n\n def _add_claim_frame(self, stack, arg, endpoint_id):\n frame = PushFrame()\n stack.add_frame(frame)\n self.METHOD_NAME(frame, arg)\n frame.add_command(f\"'claim_command.{endpoint_id}.{arg.id}'\")\n\n def METHOD_NAME(self, frame, child):\n datum = StackInstanceDatum(id=child.id, value=f\"${child.id}\") if child.is_instance \\\n else StackDatum(id=child.id, value=f\"${child.id}\")\n\n frame.add_datum(datum)\n\n def get_frame_children(self, module, form):\n helper = WorkflowHelper(self.suite, self.app, self.app.get_modules())\n frame_children = helper.get_frame_children(module, form)\n if module.root_module_id:\n frame_children = prepend_parent_frame_children(helper, frame_children, module.root_module)\n return frame_children"}}},{"rowIdx":2056,"cells":{"id":{"kind":"number","value":2056,"string":"2,056"},"label":{"kind":"string","value":"calc time"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nTravel time calculations.\n\"\"\"\nfrom .helper_classes import TauModelError\nfrom .seismic_phase import SeismicPhase\nfrom .utils import parse_phase_list\nfrom . import _DEFAULT_VALUES\n\n\nclass TauPTime(object):\n \"\"\"\n Calculate travel times for different branches using linear interpolation\n between known slowness samples.\n \"\"\"\n def __init__(self, model, phase_list, depth, degrees, receiver_depth=0.0,\n ray_param_tol=_DEFAULT_VALUES[\"default_time_ray_param_tol\"]\n ):\n self.source_depth = depth\n self.receiver_depth = receiver_depth\n self.degrees = degrees\n self.arrivals = []\n self.phases = []\n # Names of phases to be used, e.g. PKIKP\n self.phase_names = parse_phase_list(phase_list)\n\n # A standard and a depth corrected model. Both are needed.\n self.model = model\n self.depth_corrected_model = self.model\n self.ray_param_tol = ray_param_tol\n\n def run(self):\n \"\"\"\n Do all the calculations and print the output if told to. The resulting\n arrival times will be in self.arrivals.\n \"\"\"\n self.depth_correct(self.source_depth, self.receiver_depth)\n self.calculate(self.degrees)\n\n def depth_correct(self, depth, receiver_depth=None):\n \"\"\"\n Corrects the TauModel for the given source depth (if not already\n corrected).\n \"\"\"\n if receiver_depth is None:\n receiver_depth = self.receiver_depth\n if self.depth_corrected_model is None or \\\n self.depth_corrected_model.source_depth != depth:\n self.depth_corrected_model = self.model.depth_correct(depth)\n self.arrivals = []\n if receiver_depth != depth:\n # If already split on receiver depth this does nothing.\n self.depth_corrected_model = \\\n self.depth_corrected_model.split_branch(receiver_depth)\n self.arrivals = []\n self.source_depth = depth\n self.receiver_depth = receiver_depth\n\n def recalc_phases(self):\n \"\"\"\n Recalculates the given phases using a possibly new or changed tau\n model.\n \"\"\"\n new_phases = []\n for temp_phase_name in self.phase_names:\n for phase_num, seismic_phase in enumerate(self.phases):\n pass\n # if seismic_phase.name == temp_phase_name:\n # self.phases.pop(phase_num)\n # if (seismic_phase.source_depth == self.source_depth and\n # seismic_phase.tau_model ==\n # self.depth_corrected_model):\n # # OK so copy to new_phases:\n # new_phases.append(seismic_phase)\n # break\n # Executed, if break is NOT called.\n else:\n # Didn't find it precomputed, so recalculate:\n try:\n seismic_phase = SeismicPhase(temp_phase_name,\n self.depth_corrected_model,\n self.receiver_depth)\n new_phases.append(seismic_phase)\n except TauModelError:\n print(\"Error with this phase, skipping it: \" +\n str(temp_phase_name))\n self.phases = new_phases\n\n def calculate(self, degrees):\n \"\"\"\n Calculate the arrival times.\n \"\"\"\n self.depth_correct(self.source_depth, self.receiver_depth)\n # Called before, but depth_correct might have changed the phases.\n self.recalc_phases()\n self.METHOD_NAME(degrees)\n\n def METHOD_NAME(self, degrees):\n \"\"\"\n Calls the calc_time method of SeismicPhase to calculate arrival\n times for every phase, each sorted by time.\n \"\"\"\n self.degrees = degrees\n self.arrivals = []\n for phase in self.phases:\n self.arrivals += phase.METHOD_NAME(degrees, self.ray_param_tol)\n # Sort them.\n self.arrivals = sorted(self.arrivals,\n key=lambda arrivals: arrivals.time)"}}},{"rowIdx":2057,"cells":{"id":{"kind":"number","value":2057,"string":"2,057"},"label":{"kind":"string","value":"global var"},"text":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Visualize Relay IR in AST text-form.\"\"\"\n\nfrom collections import deque\nfrom typing import (\n Dict,\n Union,\n Tuple,\n List,\n)\nimport tvm\nfrom tvm import relay\nfrom .interface import (\n DefaultVizParser,\n Plotter,\n VizEdge,\n VizGraph,\n VizNode,\n VizParser,\n)\n\n\nclass TermVizParser(VizParser):\n \"\"\"`TermVizParser` parse nodes and edges for `TermPlotter`.\"\"\"\n\n def __init__(self):\n self._default_parser = DefaultVizParser()\n\n def get_node_edges(\n self,\n node: relay.Expr,\n relay_param: Dict[str, tvm.runtime.NDArray],\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n \"\"\"Parse a node and edges from a relay.Expr.\"\"\"\n if isinstance(node, relay.Call):\n return self._call(node, node_to_id)\n if isinstance(node, relay.Let):\n return self._let(node, node_to_id)\n if isinstance(node, relay.GlobalVar):\n return self.METHOD_NAME(node, node_to_id)\n if isinstance(node, relay.If):\n return self._if(node, node_to_id)\n if isinstance(node, tvm.ir.Op):\n return self._op(node, node_to_id)\n if isinstance(node, relay.Function):\n return self._function(node, node_to_id)\n\n # Leverage logics from default parser.\n return self._default_parser.get_node_edges(node, relay_param, node_to_id)\n\n def _call(self, node, node_to_id):\n node_id = node_to_id[node]\n viz_node = VizNode(node_id, \"Call\", \"\")\n viz_edges = [VizEdge(node_to_id[node.op], node_id)]\n for arg in node.args:\n arg_id = node_to_id[arg]\n viz_edges.append(VizEdge(arg_id, node_id))\n return viz_node, viz_edges\n\n def _let(self, node, node_to_id):\n node_id = node_to_id[node]\n viz_node = VizNode(node_id, \"Let\", \"(var, val, body)\")\n viz_edges = [\n VizEdge(node_to_id[node.var], node_id),\n VizEdge(node_to_id[node.value], node_id),\n VizEdge(node_to_id[node.body], node_id),\n ]\n return viz_node, viz_edges\n\n def METHOD_NAME(self, node, node_to_id):\n node_id = node_to_id[node]\n viz_node = VizNode(node_id, \"GlobalVar\", node.name_hint)\n viz_edges = []\n return viz_node, viz_edges\n\n def _if(self, node, node_to_id):\n node_id = node_to_id[node]\n viz_node = VizNode(node_id, \"If\", \"(cond, true, false)\")\n viz_edges = [\n VizEdge(node_to_id[node.cond], node_id),\n VizEdge(node_to_id[node.true_branch], node_id),\n VizEdge(node_to_id[node.false_branch], node_id),\n ]\n return viz_node, viz_edges\n\n def _op(self, node, node_to_id):\n node_id = node_to_id[node]\n op_name = node.name\n viz_node = VizNode(node_id, op_name, \"\")\n viz_edges = []\n return viz_node, viz_edges\n\n def _function(self, node, node_to_id):\n node_id = node_to_id[node]\n viz_node = VizNode(node_id, \"Func\", str(node.params))\n viz_edges = [VizEdge(node_to_id[node.body], node_id)]\n return viz_node, viz_edges\n\n\nclass TermNode:\n \"\"\"TermNode is aimed to generate text more suitable for terminal visualization.\"\"\"\n\n def __init__(self, viz_node: VizNode):\n self.type = viz_node.type_name\n # We don't want too many lines in a terminal.\n self.other_info = viz_node.detail.replace(\"\\n\", \", \")\n\n\nclass TermGraph(VizGraph):\n \"\"\"Terminal graph for a relay IR Module\n\n Parameters\n ----------\n name: str\n name of this graph.\n \"\"\"\n\n def __init__(self, name: str):\n self._name = name\n # A graph in adjacency list form.\n # The key is source node, and the value is a list of destination nodes.\n self._graph = {}\n # a hash table for quick searching.\n self._id_to_term_node = {}\n # node_id in reversed post order\n # That mean, root is the first node.\n self._node_id_rpo = deque()\n\n def node(self, viz_node: VizNode) -> None:\n \"\"\"Add a node to the underlying graph.\n Nodes in a Relay IR Module are expected to be added in the post-order.\n\n Parameters\n ----------\n viz_node : VizNode\n A `VizNode` instance.\n \"\"\"\n\n self._node_id_rpo.appendleft(viz_node.identity)\n\n if viz_node.identity not in self._graph:\n # Add the node into the graph.\n self._graph[viz_node.identity] = []\n\n # Create TermNode from VizNode\n node = TermNode(viz_node)\n self._id_to_term_node[viz_node.identity] = node\n\n def edge(self, viz_edge: VizEdge) -> None:\n \"\"\"Add an edge to the terminal graph.\n\n Parameters\n ----------\n viz_edge : VizEdge\n A `VizEdge` instance.\n \"\"\"\n # Take CallNode as an example, instead of \"arguments point to CallNode\",\n # we want \"CallNode points to arguments\" in ast-dump form.\n #\n # The direction of edge is typically controlled by the implemented VizParser.\n # Reverse start/end here simply because we leverage default parser implementation.\n if viz_edge.end in self._graph:\n self._graph[viz_edge.end].append(viz_edge.start)\n else:\n self._graph[viz_edge.end] = [viz_edge.start]\n\n def render(self) -> str:\n \"\"\"Draw a terminal graph\n\n Returns\n -------\n rv1: str\n text representing a graph.\n \"\"\"\n lines = []\n seen_node = set()\n\n def gen_line(indent, n_id):\n if (indent, n_id) in seen_node:\n return\n seen_node.add((indent, n_id))\n\n conn_symbol = [\"|--\", \"`--\"]\n last = len(self._graph[n_id]) - 1\n for i, next_n_id in enumerate(self._graph[n_id]):\n node = self._id_to_term_node[next_n_id]\n lines.append(\n f\"{indent}{conn_symbol[1 if i==last else 0]}{node.type} {node.other_info}\"\n )\n next_indent = indent\n # increase indent for the next level.\n next_indent += \" \" if (i == last) else \"| \"\n gen_line(next_indent, next_n_id)\n\n first_node_id = self._node_id_rpo[0]\n first_node = self._id_to_term_node[first_node_id]\n lines.append(f\"@{self._name}({first_node.other_info})\")\n gen_line(\"\", first_node_id)\n\n return \"\\n\".join(lines)\n\n\nclass TermPlotter(Plotter):\n \"\"\"Terminal plotter\"\"\"\n\n def __init__(self):\n self._name_to_graph = {}\n\n def create_graph(self, name):\n self._name_to_graph[name] = TermGraph(name)\n return self._name_to_graph[name]\n\n def render(self, filename):\n \"\"\"If filename is None, print to stdio. Otherwise, write to the filename.\"\"\"\n lines = []\n for name in self._name_to_graph:\n text_graph = self._name_to_graph[name].render()\n lines.append(text_graph)\n if filename is None:\n print(\"\\n\".join(lines))\n else:\n with open(filename, \"w\") as out_file:\n out_file.write(\"\\n\".join(lines))"}}},{"rowIdx":2058,"cells":{"id":{"kind":"number","value":2058,"string":"2,058"},"label":{"kind":"string","value":"test delta reached pending"},"text":{"kind":"string","value":"import itertools\nfrom datetime import timedelta\n\nimport pytest\nfrom django.utils import timezone\nfrom freezegun import freeze_time\n\nfrom sentry.models import Group, GroupSnooze\nfrom sentry.testutils.cases import PerformanceIssueTestCase, SnubaTestCase, TestCase\nfrom sentry.testutils.helpers.datetime import before_now, iso_format\nfrom sentry.testutils.performance_issues.store_transaction import PerfIssueTransactionTestMixin\nfrom sentry.testutils.silo import region_silo_test\nfrom sentry.utils.samples import load_data\nfrom tests.sentry.issues.test_utils import SearchIssueTestMixin\n\n\n@region_silo_test(stable=True)\nclass GroupSnoozeTest(\n TestCase,\n SnubaTestCase,\n PerfIssueTransactionTestMixin,\n SearchIssueTestMixin,\n PerformanceIssueTestCase,\n):\n sequence = itertools.count() # generates unique values, class scope doesn't matter\n\n def setUp(self):\n super().setUp()\n self.project = self.create_project()\n self.group.times_seen_pending = 0\n\n def test_until_not_reached(self):\n snooze = GroupSnooze.objects.create(\n group=self.group, until=timezone.now() + timedelta(days=1)\n )\n assert snooze.is_valid()\n\n def test_until_reached(self):\n snooze = GroupSnooze.objects.create(\n group=self.group, until=timezone.now() - timedelta(days=1)\n )\n assert not snooze.is_valid()\n\n def test_mismatched_group(self):\n snooze = GroupSnooze.objects.create(group=self.group)\n with pytest.raises(ValueError):\n snooze.is_valid(self.create_group())\n\n def test_delta_not_reached(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, state={\"times_seen\": 0})\n assert snooze.is_valid()\n\n def test_delta_reached(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, state={\"times_seen\": 0})\n self.group.update(times_seen=100)\n assert not snooze.is_valid()\n\n def METHOD_NAME(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, state={\"times_seen\": 0})\n self.group.update(times_seen=90)\n assert snooze.is_valid(use_pending_data=True)\n\n self.group.times_seen_pending = 10\n assert not snooze.is_valid(use_pending_data=True)\n\n def test_user_delta_not_reached(self):\n snooze = GroupSnooze.objects.create(\n group=self.group, user_count=100, state={\"users_seen\": 0}\n )\n assert snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_user_delta_reached(self):\n for i in range(0, 100):\n self.store_event(\n data={\n \"user\": {\"id\": i},\n \"timestamp\": iso_format(before_now(seconds=1)),\n \"fingerprint\": [\"group1\"],\n },\n project_id=self.project.id,\n )\n\n group = list(Group.objects.all())[-1]\n snooze = GroupSnooze.objects.create(group=group, user_count=100, state={\"users_seen\": 0})\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_user_rate_reached(self):\n \"\"\"Test that ignoring an error issue until it's hit by 10 users in an hour works.\"\"\"\n for i in range(5):\n group = self.store_event(\n data={\n \"fingerprint\": [\"group1\"],\n \"timestamp\": iso_format(before_now(minutes=5 + i)),\n \"tags\": {\"sentry:user\": i},\n },\n project_id=self.project.id,\n ).group\n\n snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60)\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_user_rate_reached_perf_issues(self):\n \"\"\"Test that ignoring a performance issue until it's hit by 10 users in an hour works.\"\"\"\n for i in range(0, 10):\n event_data = load_data(\n \"transaction-n-plus-one\",\n timestamp=before_now(minutes=10),\n )\n event_data[\"user\"][\"id\"] = str(i)\n event = self.create_performance_issue(event_data=event_data)\n perf_group = event.group\n snooze = GroupSnooze.objects.create(group=perf_group, user_count=10, user_window=60)\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_user_rate_not_reached(self):\n snooze = GroupSnooze.objects.create(group=self.group, user_count=100, user_window=60)\n assert snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_user_rate_without_test(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)\n assert snooze.is_valid(test_rates=False)\n\n @freeze_time()\n def test_rate_not_reached(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)\n assert snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_rate_reached(self):\n \"\"\"Test when an error issue is ignored until it happens 5 times in a day\"\"\"\n for i in range(5):\n group = self.store_event(\n data={\n \"fingerprint\": [\"group1\"],\n \"timestamp\": iso_format(before_now(minutes=5 + i)),\n },\n project_id=self.project.id,\n ).group\n snooze = GroupSnooze.objects.create(group=group, count=5, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_rate_reached_perf_issue(self):\n \"\"\"Test when a performance issue is ignored until it happens 10 times in a day\"\"\"\n for i in range(0, 10):\n event = self.create_performance_issue()\n snooze = GroupSnooze.objects.create(group=event.group, count=10, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_rate_without_test(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)\n assert snooze.is_valid(test_rates=False)\n\n @freeze_time()\n def test_user_rate_reached_generic_issues(self):\n \"\"\"Test that ignoring a generic issue until it's hit by 10 users in an hour works.\"\"\"\n for i in range(0, 10):\n event, occurrence, group_info = self.store_search_issue(\n project_id=self.project.id,\n user_id=i,\n fingerprints=[\"test_user_rate_reached_generic_issues-group\"],\n environment=None,\n )\n assert group_info is not None\n generic_group = group_info.group\n assert generic_group is not None\n snooze = GroupSnooze.objects.create(group=generic_group, user_count=10, user_window=60)\n assert not snooze.is_valid(test_rates=True)\n\n @freeze_time()\n def test_rate_reached_generic_issue(self):\n \"\"\"Test when a generic issue is ignored until it happens 10 times in a day\"\"\"\n for i in range(0, 10):\n event, occurrence, group_info = self.store_search_issue(\n project_id=self.project.id,\n user_id=3, # pin the user_id here to verify the number of events impacts the snooze\n fingerprints=[\"test_rate_reached_generic_issue-group\"],\n environment=None,\n )\n assert group_info is not None\n generic_group = group_info.group\n assert generic_group is not None\n snooze = GroupSnooze.objects.create(group=generic_group, count=10, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)"}}},{"rowIdx":2059,"cells":{"id":{"kind":"number","value":2059,"string":"2,059"},"label":{"kind":"string","value":"validate tar archive"},"text":{"kind":"string","value":"import errno\nimport json\nimport os\nimport shutil\nimport subprocess\nimport requests\n\nDEFAULT_MODEL_PATH = \"model_archiver/tests/integ_tests/resources/regular_model\"\nDEFAULT_HANDLER = \"service:handle\"\nDEFAULT_RUNTIME = \"python\"\nDEFAULT_MODEL_NAME = \"model\"\nDEFAULT_EXPORT_PATH = \"/tmp/model\"\nMANIFEST_FILE = \"MAR-INF/MANIFEST.json\"\n\n\ndef update_tests(test):\n test[\"modelName\"] = test.get(\"modelName\", DEFAULT_MODEL_NAME)\n test[\"modelPath\"] = test.get(\"modelPath\", DEFAULT_MODEL_PATH)\n test[\"handler\"] = test.get(\"handler\", DEFAULT_HANDLER)\n test[\"runtime\"] = test.get(\"runtime\", DEFAULT_RUNTIME)\n test[\"exportPath\"] = test.get(\"exportPath\", DEFAULT_EXPORT_PATH)\n test[\"archiveFormat\"] = test.get(\"archiveFormat\", \"default\")\n return test\n\n\ndef create_file_path(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef delete_file_path(path):\n try:\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n except OSError:\n pass\n\n\ndef run_test(test, cmd):\n it = test.get(\"iterations\") if test.get(\"iterations\") is not None else 1\n for i in range(it):\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as exc:\n if test.get(\"expectError\") is not True:\n assert 0, \"{}\".format(exc.output)\n else:\n return 0\n return 1\n\n\ndef validate_archive_exists(test):\n fmt = test.get(\"archiveFormat\")\n if fmt == \"tgz\":\n assert os.path.isfile(os.path.join(test.get(\"exportPath\"), test.get(\"modelName\")+\".tar.gz\"))\n elif fmt == \"no-archive\":\n assert os.path.isdir(os.path.join(test.get(\"exportPath\"), test.get(\"modelName\")))\n else:\n assert os.path.isfile(os.path.join(test.get(\"exportPath\"), test.get(\"modelName\")+\".mar\"))\n\n\ndef validate_manifest_file(manifest, test):\n \"\"\"\n Validate the MANIFEST file\n :param manifest:\n :param test:\n :return:\n \"\"\"\n assert manifest.get(\"runtime\") == test.get(\"runtime\")\n assert manifest.get(\"model\").get(\"modelName\") == test.get(\"modelName\")\n assert manifest.get(\"model\").get(\"handler\") == test.get(\"handler\")\n\n\ndef validate_files(file_list, prefix, regular):\n assert os.path.join(prefix, MANIFEST_FILE) in file_list\n assert os.path.join(prefix, \"service.py\") in file_list\n\n if regular:\n assert os.path.join(prefix, \"dummy-artifacts.txt\") in file_list\n assert os.path.join(prefix, \"dir/1.py\") in file_list\n else:\n assert os.path.join(prefix, \"model.onnx\") in file_list\n\n\ndef METHOD_NAME(test_cfg):\n import tarfile\n file_name = os.path.join(test_cfg.get(\"exportPath\"), test_cfg.get(\"modelName\") + \".tar.gz\")\n f = tarfile.open(file_name, \"r:gz\")\n manifest = json.loads(f.extractfile(os.path.join(test_cfg.get(\"modelName\"), MANIFEST_FILE)).read())\n validate_manifest_file(manifest, test_cfg)\n validate_files(f.getnames(), test_cfg.get(\"modelName\"), \"regular_model\" in test_cfg.get(\"modelPath\"))\n\n\ndef validate_noarchive_archive(test):\n file_name = os.path.join(test.get(\"exportPath\"), test.get(\"modelName\"), MANIFEST_FILE)\n manifest = json.loads(open(file_name).read())\n validate_manifest_file(manifest, test)\n\n\ndef validate_mar_archive(test):\n import zipfile\n file_name = os.path.join(test.get(\"exportPath\"), test.get(\"modelName\") + \".mar\")\n zf = zipfile.ZipFile(file_name, \"r\")\n manifest = json.loads(zf.open(MANIFEST_FILE).read())\n validate_manifest_file(manifest, test)\n\n\ndef validate_archive_content(test):\n fmt = test.get(\"archiveFormat\")\n if fmt == \"tgz\":\n METHOD_NAME(test)\n if fmt == \"no-archive\":\n validate_noarchive_archive(test)\n if fmt == \"default\":\n validate_mar_archive(test)\n\n\ndef validate(test):\n validate_archive_exists(test)\n validate_archive_content(test)\n\n\ndef test_model_archiver():\n \n f = open(\"model_archiver/tests/integ_tests/configuration.json\", \"r\")\n tests = json.loads(f.read())\n for t in tests:\n try:\n delete_file_path(t.get(\"exportPath\"))\n create_file_path(t.get(\"exportPath\"))\n t = update_tests(t)\n cmd = \"model-archiver \" \\\n \"--model-name {} \" \\\n \"--model-path {} \" \\\n \"--handler {} \" \\\n \"--runtime {} \" \\\n \"--export-path {} \" \\\n \"--archive-format {}\".format(t.get(\"modelName\"),\n t.get(\"modelPath\"),\n t.get(\"handler\"),\n t.get(\"runtime\"),\n t.get(\"exportPath\"),\n t.get(\"archiveFormat\"))\n if t.get(\"force\"):\n cmd += \" -f\"\n\n # TODO: Add tests to check for \"convert\" functionality\n if run_test(t, cmd):\n validate(t)\n finally:\n delete_file_path(t.get(\"exportPath\"))\n\n\nif __name__ == \"__main__\":\n test_model_archiver()"}}},{"rowIdx":2060,"cells":{"id":{"kind":"number","value":2060,"string":"2,060"},"label":{"kind":"string","value":"get enabled"},"text":{"kind":"string","value":"\"\"\"\nService support for Debian systems (uses update-rc.d and /sbin/service)\n\n.. important::\n If you feel that Salt should be using this module to manage services on a\n minion, and it is using a different module (or gives an error similar to\n *'service.start' is not available*), see :ref:`here\n `.\n\"\"\"\n\nimport fnmatch\nimport glob\nimport logging\nimport os\nimport re\nimport shlex\n\nimport salt.utils.systemd\n\n__func_alias__ = {\"reload_\": \"reload\"}\n\n# Define the module's virtual name\n__virtualname__ = \"service\"\n\nlog = logging.getLogger(__name__)\n\n\ndef __virtual__():\n \"\"\"\n Only work on Debian and when systemd isn't running\n \"\"\"\n if __grains__[\"os\"] in (\n \"Debian\",\n \"Raspbian\",\n \"Devuan\",\n \"NILinuxRT\",\n ) and not salt.utils.systemd.booted(__context__):\n return __virtualname__\n else:\n return (\n False,\n \"The debian_service module could not be loaded: \"\n \"unsupported OS family and/or systemd running.\",\n )\n\n\ndef _service_cmd(*args):\n return \"service {} {}\".format(args[0], \" \".join(args[1:]))\n\n\ndef _get_runlevel():\n \"\"\"\n returns the current runlevel\n \"\"\"\n out = __salt__[\"cmd.run\"](\"runlevel\")\n # unknown can be returned while inside a container environment, since\n # this is due to a lack of init, it should be safe to assume runlevel\n # 2, which is Debian's default. If not, all service related states\n # will throw an out of range exception here which will cause\n # other functions to fail.\n if \"unknown\" in out:\n return \"2\"\n else:\n return out.split()[1]\n\n\ndef METHOD_NAME():\n \"\"\"\n Return a list of service that are enabled on boot\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_enabled\n \"\"\"\n prefix = \"/etc/rc[S{}].d/S\".format(_get_runlevel())\n ret = set()\n for line in [x.rsplit(os.sep, 1)[-1] for x in glob.glob(\"{}*\".format(prefix))]:\n ret.add(re.split(r\"\\d+\", line)[-1])\n return sorted(ret)\n\n\ndef get_disabled():\n \"\"\"\n Return a set of services that are installed but disabled\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_disabled\n \"\"\"\n return sorted(set(get_all()) - set(METHOD_NAME()))\n\n\ndef available(name):\n \"\"\"\n Returns ``True`` if the specified service is available, otherwise returns\n ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.available sshd\n \"\"\"\n return name in get_all()\n\n\ndef missing(name):\n \"\"\"\n The inverse of service.available.\n Returns ``True`` if the specified service is not available, otherwise returns\n ``False``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.missing sshd\n \"\"\"\n return name not in get_all()\n\n\ndef get_all():\n \"\"\"\n Return all available boot services\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_all\n \"\"\"\n ret = set()\n lines = glob.glob(\"/etc/init.d/*\")\n for line in lines:\n service = line.split(\"/etc/init.d/\")[1]\n # Remove README. If it's an enabled service, it will be added back in.\n if service != \"README\":\n ret.add(service)\n return sorted(ret | set(METHOD_NAME()))\n\n\ndef start(name):\n \"\"\"\n Start the specified service\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.start \n \"\"\"\n cmd = _service_cmd(name, \"start\")\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef stop(name):\n \"\"\"\n Stop the specified service\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.stop \n \"\"\"\n cmd = _service_cmd(name, \"stop\")\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef restart(name):\n \"\"\"\n Restart the named service\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.restart \n \"\"\"\n cmd = _service_cmd(name, \"restart\")\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef reload_(name):\n \"\"\"\n Reload the named service\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.reload \n \"\"\"\n cmd = _service_cmd(name, \"reload\")\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef force_reload(name):\n \"\"\"\n Force-reload the named service\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.force_reload \n \"\"\"\n cmd = _service_cmd(name, \"force-reload\")\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef status(name, sig=None):\n \"\"\"\n Return the status for a service.\n If the name contains globbing, a dict mapping service name to True/False\n values is returned.\n\n .. versionchanged:: 2018.3.0\n The service name can now be a glob (e.g. ``salt*``)\n\n Args:\n name (str): The name of the service to check\n sig (str): Signature to use to find the service via ps\n\n Returns:\n bool: True if running, False otherwise\n dict: Maps service name to True if running, False otherwise\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.status [service signature]\n \"\"\"\n if sig:\n return bool(__salt__[\"status.pid\"](sig))\n\n contains_globbing = bool(re.search(r\"\\*|\\?|\\[.+\\]\", name))\n if contains_globbing:\n services = fnmatch.filter(get_all(), name)\n else:\n services = [name]\n results = {}\n for service in services:\n cmd = _service_cmd(service, \"status\")\n results[service] = not __salt__[\"cmd.retcode\"](cmd, ignore_retcode=True)\n if contains_globbing:\n return results\n return results[name]\n\n\ndef enable(name, **kwargs):\n \"\"\"\n Enable the named service to start at boot\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enable \n \"\"\"\n cmd = \"insserv {0} && update-rc.d {0} enable\".format(shlex.quote(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=True)\n\n\ndef disable(name, **kwargs):\n \"\"\"\n Disable the named service to start at boot\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.disable \n \"\"\"\n cmd = \"update-rc.d {} disable\".format(name)\n return not __salt__[\"cmd.retcode\"](cmd)\n\n\ndef enabled(name, **kwargs):\n \"\"\"\n Return True if the named service is enabled, false otherwise\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enabled \n \"\"\"\n return name in METHOD_NAME()\n\n\ndef disabled(name):\n \"\"\"\n Return True if the named service is disabled, false otherwise\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.disabled \n \"\"\"\n return name in get_disabled()"}}},{"rowIdx":2061,"cells":{"id":{"kind":"number","value":2061,"string":"2,061"},"label":{"kind":"string","value":"test resort solo groups"},"text":{"kind":"string","value":"import unittest\n\nfrom drake.tools.lint.formatter import FormatterBase, IncludeFormatter\n\n\nclass TestFormatterBase(unittest.TestCase):\n\n def test_essentials(self):\n original_lines = [\n '// Line 1\\n',\n '/* Line 2 */\\n',\n '\\n',\n ]\n dut = FormatterBase('filename.cc', readlines=original_lines)\n\n # Everything starts out unchanged.\n self.assertTrue(dut.is_same_as_original())\n self.assertTrue(dut.is_permutation_of_original())\n self.assertEqual(dut.get_all_lines(), original_lines)\n self.assertTrue(dut.get_first_differing_original_index() is None)\n\n # Basic getters.\n self.assertEqual(dut.get_num_lines(), 3)\n self.assertTrue(dut.is_blank_line(2))\n self.assertEqual(dut.get_line(0), '// Line 1\\n')\n\n # Reverse it and end up with a permutation.\n dut.set_all_lines(reversed(dut.get_all_lines()))\n self.assertFalse(dut.is_same_as_original())\n self.assertTrue(dut.is_permutation_of_original())\n self.assertEqual(dut.get_first_differing_original_index(), 0)\n\n # Rebuild it using insertion and removal.\n dut.set_all_lines(['\\n'] * 3)\n dut.set_line(0, '/* Line 2 */\\n')\n dut.insert_lines(0, ['AAA\\n', '// Line 1\\n'])\n dut.remove_all([0, 3])\n self.assertEqual(dut.get_all_lines(), original_lines)\n\n def test_format_ranges(self):\n original_lines = [\n '#include \"line0\"\\n',\n '// clang-format off\\n',\n '#include \"line2\"\\n',\n '// clang-format on\\n',\n '#include \"line4\"\\n',\n '#include \"line5\"\\n',\n '/* clang-format off */\\n',\n '#include \"line7\"\\n',\n '#include \"line8\"\\n',\n '/* clang-format on */\\n',\n '#include \"line10\"\\n',\n ]\n dut = FormatterBase(\"filename.cc\", readlines=original_lines)\n\n self.assertEqual(\n dut.get_format_ranges(), [[0], [4, 5], [10]])\n self.assertEqual(\n dut.get_non_format_ranges(), [[1, 2, 3], [6, 7, 8, 9]])\n\n def test_dos(self):\n original_lines = [\n '#include \"line0\"\\r\\n',\n ]\n with self.assertRaisesRegex(Exception, \"DOS newline\"):\n FormatterBase(\"filename.cc\", readlines=original_lines)\n\n def test_missing_eof(self):\n original_lines = [\n '#include \"line0\"',\n ]\n with self.assertRaisesRegex(Exception, \"newline.*end of file\"):\n FormatterBase(\"filename.cc\", readlines=original_lines)\n\n\nclass TestIncludeFormatter(unittest.TestCase):\n\n def _split(self, triple_quoted_file_contents):\n lines = triple_quoted_file_contents.split(\"\\n\")\n assert len(lines) >= 2\n assert lines[0] == \"\" # Detritus from first triple quote.\n assert lines[-1] == \"\" # Detritus from last triple quote.\n del lines[0]\n del lines[-1]\n return [line + \"\\n\" for line in lines]\n\n def _check(self, basename, original, expected, first_differing):\n original_lines = self._split(original)\n expected_lines = self._split(expected)\n dut = IncludeFormatter(\n \"drake/dummy/\" + basename,\n readlines=original_lines)\n dut.format_includes()\n self.assertEqual(dut.get_all_lines(), expected_lines)\n self.assertEqual(dut.get_first_differing_original_index(),\n first_differing)\n\n def test_basic(self):\n # A pile of headers gets sorted per cppguide:\n # - The related header\n # - C system files\n # - C++ system files\n # - Other libraries' .h files\n # - Your project's .h files\n original = \"\"\"\n#include \"drake/common/drake_assert.h\"\n#include \"drake/dummy/bar.h\"\n#include \"drake/dummy/dut.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n\"\"\"\n expected = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n\n#include \"drake/common/drake_assert.h\"\n#include \"drake/dummy/bar.h\"\n\"\"\"\n self._check(\"dut.cc\", original, expected, 0)\n\n def test_nothing(self):\n # A file with _no_ include statements.\n original = \"\"\"\nnamespace { }\n\"\"\"\n self._check(\"dut.cc\", original, original, None)\n\n def test_regroup(self):\n # Wrongly grouped whitespace.\n original = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n#include \n#include \n#include \n\n#include \"drake/common/drake_assert.h\"\n#include \"drake/dummy/bar.h\"\n#include \n\"\"\"\n expected = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n#include \n#include \n\n#include \n#include \n\n#include \"drake/common/drake_assert.h\"\n#include \"drake/dummy/bar.h\"\n\"\"\"\n self._check(\"dut.cc\", original, expected, 2)\n\n def test_format_off(self):\n # \"clang-format off\".\n original = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n// clang-format off\n#ifdef FOO\n#include \n#include \n#else\n#include \n#include \n#endif\n// clang-format on\n\n#include \"drake/common/drake_assert.h\"\n\"\"\"\n self._check(\"dut.cc\", original, original, None)\n\n def test_target_is_header(self):\n # A header file.\n original = \"\"\"\n#include \"drake/common/drake_assert.h\"\n#include \n\nnamespace { }\n\"\"\"\n expected = \"\"\"\n#include \n\n#include \"drake/common/drake_assert.h\"\n\nnamespace { }\n\"\"\"\n self._check(\"dut.h\", original, expected, 0)\n\n def test_associated_comment(self):\n # A comment prior to a line.\n original = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n// Some comment describing the next line.\n#include \n\nnamespace { }\n\"\"\"\n self._check(\"dut.cc\", original, original, None)\n\n def test_file_opening_comment(self):\n # A comment atop the file with no blank line.\n original = \"\"\"\n/// @file dut.cc\n/// Mumble mumble\n///\n#include \n#include \n\"\"\"\n self._check(\"dut.cc\", original, original, None)\n\n def test_internal_related_header(self):\n # Two related headers, guarded by \"clang-format off\".\n original = \"\"\"\n/* clang-format off (with explanatory comment) */\n#include \"drake/dummy/dut.h\"\n#include \"drake/dummy/dut_internal.h\"\n/* clang-format on (with explanatory comment) */\n\n#include \n#include \n\n#include \"drake/dummy/drake_assert.h\"\n#include \"drake/dummy/drake_deprecated.h\"\n\"\"\"\n expected = \"\"\"\n/* clang-format off (with explanatory comment) */\n#include \"drake/dummy/dut.h\"\n#include \"drake/dummy/dut_internal.h\"\n/* clang-format on (with explanatory comment) */\n\n#include \n#include \n\n#include \"drake/dummy/drake_assert.h\"\n#include \"drake/dummy/drake_deprecated.h\"\n\"\"\"\n self._check(\"dut.cc\", original, expected, 5)\n\n def METHOD_NAME(self):\n # Groups of one, but sorted incorrectly.\n original = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n#include \"drake/common/drake_assert.h\"\n\n#include \n\"\"\"\n expected = \"\"\"\n#include \"drake/dummy/dut.h\"\n\n#include \n\n#include \"drake/common/drake_assert.h\"\n\"\"\"\n self._check(\"dut.cc\", original, expected, 2)\n\n def test_nontrivial_reformatting(self):\n # If clang-format changes any lines, we want to fail-fast.\n # (Note the two spaces between #include and the double quote.)\n original_lines = ['#include \"nontrivial.h\"\\n']\n dut = IncludeFormatter(\"nontrivial.cc\", readlines=original_lines)\n dut.format_includes()\n with self.assertRaisesRegex(Exception, 'not just a shuffle'):\n dut.rewrite_file()"}}},{"rowIdx":2062,"cells":{"id":{"kind":"number","value":2062,"string":"2,062"},"label":{"kind":"string","value":"get logger"},"text":{"kind":"string","value":"\"\"\"Ray Module.\"\"\"\nimport logging\nimport os\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, List, Optional, TypeVar, Union\n\nfrom awswrangler._config import apply_configs\nfrom awswrangler._distributed import EngineEnum, engine\n\nif engine.get() == EngineEnum.RAY or TYPE_CHECKING:\n import ray\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\nFunctionType = TypeVar(\"FunctionType\", bound=Callable[..., Any])\n\n\nclass RayLogger:\n \"\"\"Create discrete Logger instance for Ray Tasks.\"\"\"\n\n def __init__(\n self,\n logging_level: int = logging.INFO,\n format: str = \"%(asctime)s::%(levelname)-2s::%(name)s::%(message)s\", # pylint: disable=redefined-builtin\n datefmt: str = \"%Y-%m-%d %H:%M:%S\",\n ):\n logging.basicConfig(level=logging_level, format=format, datefmt=datefmt)\n\n def METHOD_NAME(self, name: Union[str, Any] = None) -> Optional[logging.Logger]:\n \"\"\"Return logger object.\"\"\"\n return logging.getLogger(name)\n\n\n@apply_configs\ndef ray_logger(\n function: FunctionType,\n configure_logging: bool = True,\n logging_level: int = logging.INFO,\n) -> FunctionType:\n \"\"\"\n Decorate callable to add RayLogger.\n\n Parameters\n ----------\n function : Callable[..., Any]\n Callable as input to decorator.\n\n Returns\n -------\n Callable[..., Any]\n \"\"\"\n\n @wraps(function)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n if configure_logging:\n RayLogger(logging_level=logging_level).METHOD_NAME(name=function.__name__)\n return function(*args, **kwargs)\n\n return wrapper\n\n\ndef ray_remote(**options: Any) -> Callable[[FunctionType], FunctionType]:\n \"\"\"\n Decorate with @ray.remote providing .options().\n\n Parameters\n ----------\n options : Any\n Ray remote options\n\n Returns\n -------\n Callable[..., Any]\n \"\"\"\n\n def remote_decorator(function: FunctionType) -> FunctionType:\n \"\"\"\n Decorate callable to wrap within ray.remote.\n\n Parameters\n ----------\n function : Callable[..., Any]\n Callable as input to ray.remote.\n\n Returns\n -------\n Callable[..., Any]\n \"\"\"\n # Access the source function if it exists\n function = getattr(function, \"_source_func\", function)\n\n @wraps(function)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n remote_fn = ray.remote(ray_logger(function))\n if options:\n remote_fn = remote_fn.options(**options)\n return remote_fn.remote(*args, **kwargs)\n\n return wrapper\n\n return remote_decorator\n\n\ndef ray_get(futures: Union[\"ray.ObjectRef[Any]\", List[\"ray.ObjectRef[Any]\"]]) -> Any:\n \"\"\"\n Run ray.get on futures if distributed.\n\n Parameters\n ----------\n futures : List[Any]\n List of Ray futures\n\n Returns\n -------\n List[Any]\n \"\"\"\n if engine.get() == EngineEnum.RAY:\n return ray.get(futures) # type: ignore[attr-defined]\n return futures\n\n\n@apply_configs\ndef initialize_ray(\n address: Optional[str] = None,\n redis_password: Optional[str] = None,\n ignore_reinit_error: bool = True,\n include_dashboard: Optional[bool] = False,\n configure_logging: bool = True,\n log_to_driver: bool = False,\n logging_level: int = logging.INFO,\n object_store_memory: Optional[int] = None,\n cpu_count: Optional[int] = None,\n gpu_count: Optional[int] = None,\n) -> None:\n \"\"\"\n Connect to an existing Ray cluster or start one and connect to it.\n\n Parameters\n ----------\n address : Optional[str]\n Address of the Ray cluster to connect to, by default None\n redis_password : Optional[str]\n Password to the Redis cluster, by default None\n ignore_reinit_error : bool\n If true, Ray suppress errors from calling ray.init() twice, by default True\n include_dashboard : Optional[bool]\n Boolean flag indicating whether or not to start the Ray dashboard, by default False\n configure_logging : Optional[bool]\n Boolean flag indicating whether or not to enable logging, by default True\n log_to_driver : bool\n Boolean flag to enable routing of all worker logs to the driver, by default False\n logging_level : int\n Logging level, defaults to logging.INFO. Ignored unless \"configure_logging\" is True\n object_store_memory : Optional[int]\n The amount of memory (in bytes) to start the object store with, by default None\n cpu_count : Optional[int]\n Number of CPUs to assign to each raylet, by default None\n gpu_count : Optional[int]\n Number of GPUs to assign to each raylet, by default None\n \"\"\"\n if not ray.is_initialized():\n # Detect an existing cluster\n ray_address = os.environ.get(\"RAY_ADDRESS\")\n if not address and ray_address:\n _logger.info(\"Using address %s set in the environment variable RAY_ADDRESS\", ray_address)\n address = ray_address\n\n if address:\n _logger.info(\"Connecting to a Ray instance at: %s\", address)\n ray.init(\n address=address,\n include_dashboard=include_dashboard,\n ignore_reinit_error=ignore_reinit_error,\n configure_logging=configure_logging,\n log_to_driver=log_to_driver,\n logging_level=logging_level,\n )\n else:\n ray_runtime_env_vars = [\n \"__MODIN_AUTOIMPORT_PANDAS__\",\n ]\n\n ray_init_kwargs = {\n \"num_cpus\": cpu_count,\n \"num_gpus\": gpu_count,\n \"include_dashboard\": include_dashboard,\n \"ignore_reinit_error\": ignore_reinit_error,\n \"configure_logging\": configure_logging,\n \"log_to_driver\": log_to_driver,\n \"logging_level\": logging_level,\n \"object_store_memory\": object_store_memory,\n \"_redis_password\": redis_password,\n \"_memory\": object_store_memory,\n \"runtime_env\": {\n \"env_vars\": {var: os.environ.get(var) for var in ray_runtime_env_vars if os.environ.get(var)}\n },\n }\n _logger.info(\"Initializing a Ray instance\")\n ray.init(**ray_init_kwargs)"}}},{"rowIdx":2063,"cells":{"id":{"kind":"number","value":2063,"string":"2,063"},"label":{"kind":"string","value":"test hidetip"},"text":{"kind":"string","value":"\"\"\"Test tooltip, coverage 100%.\n\nCoverage is 100% after excluding 6 lines with \"# pragma: no cover\".\nThey involve TclErrors that either should or should not happen in a\nparticular situation, and which are 'pass'ed if they do.\n\"\"\"\n\nfrom idlelib.tooltip import TooltipBase, Hovertip\nfrom test.support import requires\nrequires('gui')\n\nfrom functools import wraps\nimport time\nfrom tkinter import Button, Tk, Toplevel\nimport unittest\n\n\ndef setUpModule():\n global root\n root = Tk()\n\ndef tearDownModule():\n global root\n root.update_idletasks()\n root.destroy()\n del root\n\n\ndef add_call_counting(func):\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n wrapped_func.call_args_list.append((args, kwargs))\n return func(*args, **kwargs)\n wrapped_func.call_args_list = []\n return wrapped_func\n\n\ndef _make_top_and_button(testobj):\n global root\n top = Toplevel(root)\n testobj.addCleanup(top.destroy)\n top.title(\"Test tooltip\")\n button = Button(top, text='ToolTip test button')\n button.pack()\n testobj.addCleanup(button.destroy)\n top.lift()\n return top, button\n\n\nclass ToolTipBaseTest(unittest.TestCase):\n def setUp(self):\n self.top, self.button = _make_top_and_button(self)\n\n def test_base_class_is_unusable(self):\n global root\n top = Toplevel(root)\n self.addCleanup(top.destroy)\n\n button = Button(top, text='ToolTip test button')\n button.pack()\n self.addCleanup(button.destroy)\n\n with self.assertRaises(NotImplementedError):\n tooltip = TooltipBase(button)\n tooltip.showtip()\n\n\nclass HovertipTest(unittest.TestCase):\n def setUp(self):\n self.top, self.button = _make_top_and_button(self)\n\n def is_tipwindow_shown(self, tooltip):\n return tooltip.tipwindow and tooltip.tipwindow.winfo_viewable()\n\n def test_showtip(self):\n tooltip = Hovertip(self.button, 'ToolTip text')\n self.addCleanup(tooltip.hidetip)\n self.assertFalse(self.is_tipwindow_shown(tooltip))\n tooltip.showtip()\n self.assertTrue(self.is_tipwindow_shown(tooltip))\n\n def test_showtip_twice(self):\n tooltip = Hovertip(self.button, 'ToolTip text')\n self.addCleanup(tooltip.hidetip)\n self.assertFalse(self.is_tipwindow_shown(tooltip))\n tooltip.showtip()\n self.assertTrue(self.is_tipwindow_shown(tooltip))\n orig_tipwindow = tooltip.tipwindow\n tooltip.showtip()\n self.assertTrue(self.is_tipwindow_shown(tooltip))\n self.assertIs(tooltip.tipwindow, orig_tipwindow)\n\n def METHOD_NAME(self):\n tooltip = Hovertip(self.button, 'ToolTip text')\n self.addCleanup(tooltip.hidetip)\n tooltip.showtip()\n tooltip.hidetip()\n self.assertFalse(self.is_tipwindow_shown(tooltip))\n\n def test_showtip_on_mouse_enter_no_delay(self):\n tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)\n self.addCleanup(tooltip.hidetip)\n tooltip.showtip = add_call_counting(tooltip.showtip)\n root.update()\n self.assertFalse(self.is_tipwindow_shown(tooltip))\n self.button.event_generate('', x=0, y=0)\n root.update()\n self.assertTrue(self.is_tipwindow_shown(tooltip))\n self.assertGreater(len(tooltip.showtip.call_args_list), 0)\n\n def test_hover_with_delay(self):\n # Run multiple tests requiring an actual delay simultaneously.\n\n # Test #1: A hover tip with a non-zero delay appears after the delay.\n tooltip1 = Hovertip(self.button, 'ToolTip text', hover_delay=100)\n self.addCleanup(tooltip1.hidetip)\n tooltip1.showtip = add_call_counting(tooltip1.showtip)\n root.update()\n self.assertFalse(self.is_tipwindow_shown(tooltip1))\n self.button.event_generate('', x=0, y=0)\n root.update()\n self.assertFalse(self.is_tipwindow_shown(tooltip1))\n\n # Test #2: A hover tip with a non-zero delay doesn't appear when\n # the mouse stops hovering over the base widget before the delay\n # expires.\n tooltip2 = Hovertip(self.button, 'ToolTip text', hover_delay=100)\n self.addCleanup(tooltip2.hidetip)\n tooltip2.showtip = add_call_counting(tooltip2.showtip)\n root.update()\n self.button.event_generate('', x=0, y=0)\n root.update()\n self.button.event_generate('', x=0, y=0)\n root.update()\n\n time.sleep(0.15)\n root.update()\n\n # Test #1 assertions.\n self.assertTrue(self.is_tipwindow_shown(tooltip1))\n self.assertGreater(len(tooltip1.showtip.call_args_list), 0)\n\n # Test #2 assertions.\n self.assertFalse(self.is_tipwindow_shown(tooltip2))\n self.assertEqual(tooltip2.showtip.call_args_list, [])\n\n def test_hidetip_on_mouse_leave(self):\n tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None)\n self.addCleanup(tooltip.hidetip)\n tooltip.showtip = add_call_counting(tooltip.showtip)\n root.update()\n self.button.event_generate('', x=0, y=0)\n root.update()\n self.button.event_generate('', x=0, y=0)\n root.update()\n self.assertFalse(self.is_tipwindow_shown(tooltip))\n self.assertGreater(len(tooltip.showtip.call_args_list), 0)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)"}}},{"rowIdx":2064,"cells":{"id":{"kind":"number","value":2064,"string":"2,064"},"label":{"kind":"string","value":"test biosample characterization upgrade status encode2"},"text":{"kind":"string","value":"import pytest\n\n\ndef test_antibody_characterization_upgrade(upgrader, antibody_characterization_1):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_1, target_version='3')\n assert value['schema_version'] == '3'\n assert value['status'] == 'PENDING DCC REVIEW'\n assert value['characterization_method'] == 'immunoprecipitation followed by mass spectrometry'\n\n\ndef test_biosample_characterization_upgrade(upgrader, biosample_characterization_1):\n value = upgrader.upgrade('biosample_characterization', biosample_characterization_1, target_version='3')\n assert value['schema_version'] == '3'\n assert value['status'] == 'NOT REVIEWED'\n assert value['characterization_method'] == 'FACs analysis'\n\n\ndef test_antibody_characterization_upgrade_status(upgrader, antibody_characterization_2):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_2, target_version='4')\n assert value['schema_version'] == '4'\n assert value['status'] == 'compliant'\n\n\ndef METHOD_NAME(upgrader, biosample_characterization_2):\n value = upgrader.upgrade('biosample_characterization', biosample_characterization_2, target_version='4')\n assert value['schema_version'] == '4'\n assert value['status'] == 'released'\n\n\ndef test_antibody_characterization_upgrade_primary(upgrader, antibody_characterization_3):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')\n assert value['schema_version'] == '5'\n assert value['primary_characterization_method'] == 'immunoblot'\n assert 'characterization_method' not in value\n\n\ndef test_antibody_characterization_upgrade_secondary(upgrader, antibody_characterization_3):\n antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')\n assert value['schema_version'] == '5'\n assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'\n assert 'characterization_method' not in value\n\n\ndef test_antibody_characterization_upgrade_compliant_status(upgrader, antibody_characterization_3):\n antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'\n antibody_characterization_3['status'] = 'compliant'\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')\n assert value['schema_version'] == '5'\n assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'\n assert 'characterization_method' not in value\n assert value['reviewed_by'] == '81a6cc12-2847-4e2e-8f2c-f566699eb29e'\n assert value['documents'] == ['88dc12f7-c72d-4b43-a6cd-c6f3a9d08821']\n\n\ndef test_antibody_characterization_upgrade_not_compliant_status(upgrader, antibody_characterization_3):\n antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry'\n antibody_characterization_3['status'] = 'not reviewed'\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5')\n assert value['schema_version'] == '5'\n assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry'\n assert 'characterization_method' not in value\n assert value['reviewed_by'] == 'ff7b77e7-bb55-4307-b665-814c9f1e65fb'\n\n\ndef test_biosample_characterization_upgrade_references(root, upgrader, biosample_characterization, biosample_characterization_4, publication, threadlocals, dummy_request):\n context = root.get_by_uuid(biosample_characterization['uuid'])\n dummy_request.context = context\n value = upgrader.upgrade('biosample_characterization', biosample_characterization_4,\n target_version='5', context=context)\n assert value['schema_version'] == '5'\n assert value['references'] == [publication['uuid']]\n\n\ndef test_antibody_characterization_upgrade_inline(testapp, registry, antibody_characterization_1):\n from snovault import TYPES\n schema = registry[TYPES]['antibody_characterization'].schema\n\n res = testapp.post_json('/antibody-characterizations?validate=false&render=uuid', antibody_characterization_1)\n location = res.location\n\n # The properties are stored un-upgraded.\n res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow()\n assert res.json['schema_version'] == '1'\n\n # When the item is fetched, it is upgraded automatically.\n res = testapp.get(location).maybe_follow()\n assert res.json['schema_version'] == schema['properties']['schema_version']['default']\n\n res = testapp.patch_json(location, {})\n\n # The stored properties are now upgraded.\n res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow()\n assert res.json['schema_version'] == schema['properties']['schema_version']['default']\n\n\ndef test_antibody_characterization_comment_to_submitter_comment_upgrade(upgrader, antibody_characterization_10, antibody_characterization):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_10,\n current_version='10', target_version='11')\n assert value['schema_version'] == '11'\n assert 'comment' not in value\n assert value['submitter_comment'] == 'We tried really hard to characterize this antibody.'\n\n\ndef test_upgrade_antibody_characterization_11_to_12(upgrader, antibody_characterization_11, biosample):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_11, current_version='11', target_version='12')\n for characterization_review in value['characterization_reviews']:\n assert characterization_review['biosample_type'] == 'cell line'\n\n\ndef test_upgrade_antibody_characterization_13_to_14(upgrader, antibody_characterization_13, biosample):\n value = upgrader.upgrade('antibody_characterization', antibody_characterization_13, current_version='13', target_version='14')\n for characterization_review in value['characterization_reviews']:\n assert characterization_review['biosample_type'] == 'cell line'\n\n\ndef test_upgrade_antibody_characterization_14_to_15(root, upgrader,\n antibody_characterization_14,\n a549):\n value = upgrader.upgrade('antibody_characterization',\n antibody_characterization_14,\n current_version='14',\n target_version='15',\n context=root.get_by_uuid(a549['uuid']))\n for characterization_review in value['characterization_reviews']:\n assert characterization_review['biosample_ontology'] == a549['uuid']\n\n\ndef test_upgrade_antibody_characterization_15_to_16(upgrader,\n antibody_characterization_14):\n value = upgrader.upgrade(\n 'antibody_characterization', antibody_characterization_14,\n current_version='15', target_version='16'\n )\n for char_review in value['characterization_reviews']:\n assert 'biosample_type' not in char_review\n assert 'biosample_term_id' not in char_review\n assert 'biosample_term_name' not in char_review"}}},{"rowIdx":2065,"cells":{"id":{"kind":"number","value":2065,"string":"2,065"},"label":{"kind":"string","value":"sub"},"text":{"kind":"string","value":"#\n# SPDX-License-Identifier: GPL-2.0-only\n#\n\nimport errno\nimport re\nimport os\n\n\nclass OEList(list):\n \"\"\"OpenEmbedded 'list' type\n\n Acts as an ordinary list, but is constructed from a string value and a\n separator (optional), and re-joins itself when converted to a string with\n str(). Set the variable type flag to 'list' to use this type, and the\n 'separator' flag may be specified (defaulting to whitespace).\"\"\"\n\n name = \"list\"\n\n def __init__(self, value, separator = None):\n if value is not None:\n list.__init__(self, value.split(separator))\n else:\n list.__init__(self)\n\n if separator is None:\n self.separator = \" \"\n else:\n self.separator = separator\n\n def __str__(self):\n return self.separator.join(self)\n\ndef choice(value, choices):\n \"\"\"OpenEmbedded 'choice' type\n\n Acts as a multiple choice for the user. To use this, set the variable\n type flag to 'choice', and set the 'choices' flag to a space separated\n list of valid values.\"\"\"\n if not isinstance(value, str):\n raise TypeError(\"choice accepts a string, not '%s'\" % type(value))\n\n value = value.lower()\n choices = choices.lower()\n if value not in choices.split():\n raise ValueError(\"Invalid choice '%s'. Valid choices: %s\" %\n (value, choices))\n return value\n\nclass NoMatch(object):\n \"\"\"Stub python regex pattern object which never matches anything\"\"\"\n def findall(self, string, flags=0):\n return None\n\n def finditer(self, string, flags=0):\n return None\n\n def match(self, flags=0):\n return None\n\n def search(self, string, flags=0):\n return None\n\n def split(self, string, maxsplit=0):\n return None\n\n def METHOD_NAME(pattern, repl, string, count=0):\n return None\n\n def subn(pattern, repl, string, count=0):\n return None\n\nNoMatch = NoMatch()\n\ndef regex(value, regexflags=None):\n \"\"\"OpenEmbedded 'regex' type\n\n Acts as a regular expression, returning the pre-compiled regular\n expression pattern object. To use this type, set the variable type flag\n to 'regex', and optionally, set the 'regexflags' type to a space separated\n list of the flags to control the regular expression matching (e.g.\n FOO[regexflags] += 'ignorecase'). See the python documentation on the\n 're' module for a list of valid flags.\"\"\"\n\n flagval = 0\n if regexflags:\n for flag in regexflags.split():\n flag = flag.upper()\n try:\n flagval |= getattr(re, flag)\n except AttributeError:\n raise ValueError(\"Invalid regex flag '%s'\" % flag)\n\n if not value:\n # Let's ensure that the default behavior for an undefined or empty\n # variable is to match nothing. If the user explicitly wants to match\n # anything, they can match '.*' instead.\n return NoMatch\n\n try:\n return re.compile(value, flagval)\n except re.error as exc:\n raise ValueError(\"Invalid regex value '%s': %s\" %\n (value, exc.args[0]))\n\ndef boolean(value):\n \"\"\"OpenEmbedded 'boolean' type\n\n Valid values for true: 'yes', 'y', 'true', 't', '1'\n Valid values for false: 'no', 'n', 'false', 'f', '0', None\n \"\"\"\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n if not isinstance(value, str):\n raise TypeError(\"boolean accepts a string, not '%s'\" % type(value))\n\n value = value.lower()\n if value in ('yes', 'y', 'true', 't', '1'):\n return True\n elif value in ('no', 'n', 'false', 'f', '0'):\n return False\n raise ValueError(\"Invalid boolean value '%s'\" % value)\n\ndef integer(value, numberbase=10):\n \"\"\"OpenEmbedded 'integer' type\n\n Defaults to base 10, but this can be specified using the optional\n 'numberbase' flag.\"\"\"\n\n return int(value, int(numberbase))\n\n_float = float\ndef float(value, fromhex='false'):\n \"\"\"OpenEmbedded floating point type\n\n To use this type, set the type flag to 'float', and optionally set the\n 'fromhex' flag to a true value (obeying the same rules as for the\n 'boolean' type) if the value is in base 16 rather than base 10.\"\"\"\n\n if boolean(fromhex):\n return _float.fromhex(value)\n else:\n return _float(value)\n\ndef path(value, relativeto='', normalize='true', mustexist='false'):\n value = os.path.join(relativeto, value)\n\n if boolean(normalize):\n value = os.path.normpath(value)\n\n if boolean(mustexist):\n try:\n with open(value, 'r'):\n pass\n except IOError as exc:\n if exc.errno == errno.ENOENT:\n raise ValueError(\"{0}: {1}\".format(value, os.strerror(errno.ENOENT)))\n\n return value\n\ndef is_x86(arch):\n \"\"\"\n Check whether arch is x86 or x86_64\n \"\"\"\n if arch.startswith('x86_') or re.match('i.*86', arch):\n return True\n else:\n return False\n\ndef qemu_use_kvm(kvm, target_arch):\n \"\"\"\n Enable kvm if target_arch == build_arch or both of them are x86 archs.\n \"\"\"\n\n use_kvm = False\n if kvm and boolean(kvm):\n build_arch = os.uname()[4]\n if is_x86(build_arch) and is_x86(target_arch):\n use_kvm = True\n elif build_arch == target_arch:\n use_kvm = True\n return use_kvm"}}},{"rowIdx":2066,"cells":{"id":{"kind":"number","value":2066,"string":"2,066"},"label":{"kind":"string","value":"test place no place info"},"text":{"kind":"string","value":"\"\"\" Test PlaceInfo \"\"\"\nimport pytest\n\nPHOTOS_DB = \"./tests/Test-Places-Catalina-10_15_1.photoslibrary/database/photos.db\"\n\nUUID_DICT = {\n \"place_dc\": \"128FB4C6-0B16-4E7D-9108-FB2E90DA1546\",\n \"place_maui\": \"FF7AFE2C-49B0-4C9B-B0D7-7E1F8B8F2F0C\",\n \"no_place\": \"A9B73E13-A6F2-4915-8D67-7213B39BAE9F\",\n}\n\nMAUI_DICT = {\n \"name\": \"Maui, Wailea, Hawai'i, United States\",\n \"names\": {\n \"field0\": [],\n \"country\": [\"United States\"],\n \"state_province\": [\"Hawai'i\"],\n \"sub_administrative_area\": [\"Maui\"],\n \"city\": [\"Wailea\", \"Kihei\", \"Kihei\"],\n \"field5\": [],\n \"additional_city_info\": [],\n \"ocean\": [],\n \"area_of_interest\": [],\n \"inland_water\": [],\n \"field10\": [],\n \"region\": [\"Maui\"],\n \"sub_throughfare\": [],\n \"field13\": [],\n \"postal_code\": [],\n \"field15\": [],\n \"field16\": [],\n \"street_address\": [\"3700 Wailea Alanui Dr\"],\n \"body_of_water\": [],\n },\n \"country_code\": \"US\",\n \"ishome\": False,\n \"address_str\": \"3700 Wailea Alanui Dr, Kihei, HI 96753, United States\",\n \"address\": {\n \"street\": \"3700 Wailea Alanui Dr\",\n \"sub_locality\": None,\n \"city\": \"Kihei\",\n \"sub_administrative_area\": \"Maui\",\n \"state_province\": \"HI\",\n \"postal_code\": \"96753\",\n \"country\": \"United States\",\n \"iso_country_code\": \"US\",\n },\n}\n\n\ndef test_place_place_info_1():\n # test valid place info\n import osxphotos\n\n photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)\n photo = photosdb.photos(uuid=[UUID_DICT[\"place_dc\"]])[0]\n assert photo.place is not None\n assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)\n assert not photo.place.ishome\n assert photo.place.name == \"Washington, District of Columbia, United States\"\n assert photo.place.names.country[0] == \"United States\"\n assert photo.place.names.state_province[0] == \"District of Columbia\"\n assert photo.place.names.city[0] == \"Washington\"\n assert photo.place.names.additional_city_info[0] == \"Adams Morgan\"\n assert photo.place.names.street_address[0] == \"2038 18th St NW\"\n assert photo.place.names.ocean == []\n assert photo.place.names.area_of_interest == []\n assert photo.place.names.inland_water == []\n assert photo.place.names.postal_code == []\n assert photo.place.names.sub_throughfare == []\n assert photo.place.names.body_of_water == []\n\n assert photo.place.country_code == \"US\"\n assert (\n photo.place.address_str\n == \"2038 18th St NW, Washington, DC 20009, United States\"\n )\n assert photo.place.address.city == \"Washington\"\n assert photo.place.address.country == \"United States\"\n assert photo.place.address.postal_code == \"20009\"\n assert photo.place.address.state_province == \"DC\"\n assert photo.place.address.street == \"2038 18th St NW\"\n assert photo.place.address.sub_administrative_area is None\n assert photo.place.address.sub_locality == \"Adams Morgan\"\n assert photo.place.address.iso_country_code == \"US\"\n\n\ndef test_place_place_info_2():\n # test valid place info\n import osxphotos\n\n photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)\n photo = photosdb.photos(uuid=[UUID_DICT[\"place_maui\"]])[0]\n\n assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)\n assert photo.place is not None\n assert not photo.place.ishome\n assert photo.place.name == \"Maui, Wailea, Hawai'i, United States\"\n assert photo.place.names.street_address == [\"3700 Wailea Alanui Dr\"]\n assert photo.place.names.city == [\"Wailea\", \"Kihei\", \"Kihei\"]\n assert photo.place.names.region == [\"Maui\"]\n assert photo.place.names.sub_administrative_area == [\"Maui\"]\n assert photo.place.names.state_province == [\"Hawai'i\"]\n assert photo.place.names.country == [\"United States\"]\n\n assert photo.place.country_code == \"US\"\n assert (\n photo.place.address_str\n == \"3700 Wailea Alanui Dr, Kihei, HI 96753, United States\"\n )\n assert type(photo.place.address) == osxphotos.placeinfo.PostalAddress\n assert photo.place.address.city == \"Kihei\"\n assert photo.place.address.country == \"United States\"\n assert photo.place.address.postal_code == \"96753\"\n assert photo.place.address.state_province == \"HI\"\n assert photo.place.address.street == \"3700 Wailea Alanui Dr\"\n assert photo.place.address.sub_administrative_area == \"Maui\"\n assert photo.place.address.sub_locality is None\n assert photo.place.address.iso_country_code == \"US\"\n\n\ndef METHOD_NAME():\n # test valid place info\n import osxphotos\n\n photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)\n photo = photosdb.photos(uuid=[UUID_DICT[\"no_place\"]])[0]\n\n assert photo.place is None\n\n\ndef test_place_place_info_asdict():\n # test PlaceInfo.asdict()\n import osxphotos\n\n photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB)\n photo = photosdb.photos(uuid=[UUID_DICT[\"place_maui\"]])[0]\n\n assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo)\n assert photo.place.asdict() == MAUI_DICT"}}},{"rowIdx":2067,"cells":{"id":{"kind":"number","value":2067,"string":"2,067"},"label":{"kind":"string","value":"test create view no view aborts"},"text":{"kind":"string","value":"import os\nimport pytest\nfrom tempfile import TemporaryDirectory\n\nfrom mindsdb.api.http.initialize import initialize_app\nfrom mindsdb.migrations import migrate\nfrom mindsdb.interfaces.storage import db\nfrom mindsdb.utilities.config import Config\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef app():\n old_minds_db_con = ''\n if 'MINDSDB_DB_CON' in os.environ:\n old_minds_db_con = os.environ['MINDSDB_DB_CON']\n with TemporaryDirectory(prefix='views_test_') as temp_dir:\n db_path = 'sqlite:///' + os.path.join(temp_dir, 'mindsdb.sqlite3.db')\n # Need to change env variable for migrate module, since it calls db.init().\n os.environ['MINDSDB_DB_CON'] = db_path\n db.init()\n migrate.migrate_to_head()\n app = initialize_app(Config(), True, False)\n\n yield app\n os.environ['MINDSDB_DB_CON'] = old_minds_db_con\n\n\n@pytest.fixture()\ndef client(app):\n return app.test_client()\n\n\ndef test_get_view_project_not_found_abort(client):\n response = client.get('/api/projects/zoopy/views', follow_redirects=True)\n assert '404' in response.status\n\n\ndef test_get_view_not_found(client):\n response = client.get('/api/projects/mindsdb/views/vroom', follow_redirects=True)\n assert '404' in response.status\n\n\ndef test_create_view(client):\n view_data = {\n 'view': {\n 'name': 'test_create_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n # Make sure we use the CREATED HTTP status code.\n assert '201' in response.status\n new_view = response.get_json()\n\n expected_view = {\n 'name': 'test_create_view',\n 'query': 'SELECT * FROM example_db.house_sales',\n 'id': new_view['id']\n }\n\n assert new_view == expected_view\n\n\ndef test_create_view_project_not_found_abort(client):\n view_data = {\n 'view': {\n 'name': 'test_create_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n response = client.post('/api/projects/muhproject/views', json=view_data, follow_redirects=True)\n assert '404' in response.status\n\n\ndef test_create_view_already_exists_abort(client):\n view_data = {\n 'view': {\n 'name': 'test_create_view_duplicate',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n assert '201' in response.status\n create_duplicate_response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n # Make sure we use CONFLICT status code.\n assert '409' in create_duplicate_response.status\n\n\ndef METHOD_NAME(client):\n view_data = {\n 'name': 'test_create_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n assert '400' in response.status\n\n\ndef test_create_view_no_name_aborts(client):\n view_data = {\n 'view': {\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n assert '400' in response.status\n\n\ndef test_create_view_no_query_aborts(client):\n view_data = {\n 'view': {\n 'name': 'test_create_view'\n }\n }\n response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n assert '400' in response.status\n\n\ndef test_update_view(client):\n view_data = {\n 'view': {\n 'name': 'test_update_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n\n updated_view = {\n 'view': {\n 'query': 'SELECT * FROM example_db.updated_house_sales'\n }\n }\n client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n response = client.put('/api/projects/mindsdb/views/test_update_view', json=updated_view, follow_redirects=True)\n\n assert '200' in response.status\n\n updated_view = response.get_json()\n expected_view = {\n 'name': 'test_update_view',\n 'query': 'SELECT * FROM example_db.updated_house_sales',\n 'id': updated_view['id']\n }\n\n assert updated_view == expected_view\n\n\ndef test_update_view_creates(client):\n view_data = {\n 'view': {\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n\n response = client.put('/api/projects/mindsdb/views/test_update_view_creates', json=view_data, follow_redirects=True)\n\n assert '201' in response.status\n\n created_view = response.get_json()\n expected_view = {\n 'name': 'test_update_view_creates',\n 'query': 'SELECT * FROM example_db.house_sales',\n 'id': created_view['id']\n }\n\n assert created_view == expected_view\n\n\ndef test_update_view_no_view_aborts(client):\n view_data = {\n 'name': 'test_update_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n response = client.put('/api/projects/mindsdb/views/test_update_view', json=view_data, follow_redirects=True)\n assert '400' in response.status\n\n\ndef test_delete_view(client):\n view_data = {\n 'view': {\n 'name': 'test_delete_view',\n 'query': 'SELECT * FROM example_db.house_sales'\n }\n }\n # Delete newly created DB.\n client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True)\n response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)\n\n assert '200' in response.status\n\n response = client.delete('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)\n\n # Make sure we return NO_CONTENT status since we don't return the deleted DB.\n assert '204' in response.status\n\n response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True)\n assert '404' in response.status\n\n\ndef test_delete_view_does_not_exist(client):\n response = client.delete('/api/projects/mindsdb/views/florp', follow_redirects=True)\n assert '404' in response.status\n\n\ndef test_delete_view_project_not_found(client):\n response = client.delete('/api/projects/dindsmb/views/test_delete_view', follow_redirects=True)\n assert '404' in response.status"}}},{"rowIdx":2068,"cells":{"id":{"kind":"number","value":2068,"string":"2,068"},"label":{"kind":"string","value":"register attributes"},"text":{"kind":"string","value":"from collections import UserList\nfrom collections.abc import Iterable\nfrom datetime import datetime\nfrom functools import reduce\nfrom mage_ai.api.operations.constants import READ\nfrom mage_ai.api.resources.BaseResource import BaseResource\nfrom mage_ai.orchestration.db.models.base import BaseModel\nfrom mage_ai.shared.hash import merge_dict\nimport importlib\nimport inspect\n\n\nclass BasePresenter():\n all_attributes_attr = {}\n all_formats_attr = {}\n default_attributes = []\n\n def __init__(self, resource, current_user, **kwargs):\n self.current_user = current_user\n self.options = kwargs\n self.resource = resource\n\n @classmethod\n def all_attributes(self):\n if not self.all_attributes_attr.get(self.__name__):\n self.all_attributes_attr[self.__name__] = {}\n return self.all_attributes_attr[self.__name__]\n\n @classmethod\n def all_formats(self):\n if not self.all_formats_attr.get(self.__name__):\n self.all_formats_attr[self.__name__] = {\n 'default': self.default_attributes,\n }\n return self.all_formats_attr[self.__name__]\n\n @classmethod\n def formats(self, format_arg):\n if format_arg and self.all_formats().get(format_arg, None) is not None:\n return self.all_formats()[format_arg]\n else:\n return self.all_formats()['default']\n\n @classmethod\n def METHOD_NAME(self, keys, klass_symbol_or_lambda):\n for key in keys:\n self.all_attributes()[key] = klass_symbol_or_lambda\n\n @classmethod\n def register_format(self, format_arg, keys):\n self.all_formats()[format_arg] = keys\n\n @classmethod\n def register_formats(self, formats, keys):\n arr = formats if isinstance(formats, list) else [formats]\n for format_arg in arr:\n self.register_format(format_arg, keys)\n\n @classmethod\n async def present_resource(self, resource, user, **kwargs):\n async def present_lambda(r):\n if r and inspect.isawaitable(r):\n r = await r\n\n results = r.__class__.presenter_class()(\n r,\n user,\n **kwargs,\n ).present(\n **kwargs,\n )\n\n if results and inspect.isawaitable(results):\n results = await results\n\n return results\n\n if isinstance(resource, Iterable):\n return [await present_lambda(r) for r in resource]\n else:\n return await present_lambda(resource)\n\n @classmethod\n def present_model(self, model, resource_class, user, **kwargs):\n if model:\n return self.present_resource(\n resource_class(model, user, **kwargs),\n user,\n **kwargs,\n )\n\n @classmethod\n def present_models(self, models, resource_class, user, **kwargs):\n return self.present_resource(\n resource_class.build_result_set(models, user, **kwargs),\n user,\n **kwargs,\n )\n\n async def present(self, **kwargs):\n def _build(obj, key):\n value = getattr(self, key)\n if callable(value):\n value = value(**kwargs)\n self.__validate_attribute_type(key, value)\n if issubclass(\n value.__class__,\n list) or issubclass(\n value.__class__,\n UserList):\n obj[key] = [\n self.__transform_value(\n key, v, **kwargs) for v in value]\n else:\n obj[key] = self.__transform_value(key, value, **kwargs)\n return obj\n\n format_to_present = kwargs.get('format', None)\n if format_to_present and self.options.get('from_resource'):\n from_resource_name = self.options['from_resource'].resource_name_singular(\n )\n format_to_present = f'{from_resource_name}/{format_to_present}'\n\n return reduce(_build, self.__class__.formats(format_to_present), {})\n\n def __transform_value(self, key, value, **kwargs):\n klass_symbol_or_lambda = self.__class__.all_attributes().get(key, None)\n\n if issubclass(value.__class__, BaseModel):\n resource_class_name = f'{value.__class__.__name__}Resource'\n resource_class = getattr(importlib.import_module(\n f'mage_ai.api.resources.{resource_class_name}'), resource_class_name, )\n value = resource_class(value, self.current_user, **kwargs)\n\n if isinstance(value, datetime):\n return str(value)\n elif klass_symbol_or_lambda is float:\n return float(value)\n elif klass_symbol_or_lambda is int:\n return int(value)\n elif issubclass(value.__class__, BaseResource):\n opts = self.options.copy()\n opts['from_resource'] = self.resource\n data = value.presenter_class().present_resource(\n value,\n self.current_user,\n **merge_dict(kwargs, opts),\n )\n\n if not kwargs.get('ignore_permissions'):\n policy = value.policy_class()(value, self.current_user, **opts)\n policy.authorize_attributes(\n READ,\n data.keys(),\n **opts,\n )\n\n return data\n else:\n return value\n\n def __validate_attribute_class(self, klass_symbol, value):\n pass\n\n def __validate_attribute_type(self, key, value):\n pass\n\n def __getattr__(self, name):\n def _missing(*args, **kwargs):\n val = getattr(self.resource, name)\n if callable(val):\n return val(*args, **kwargs)\n else:\n return val\n return _missing()"}}},{"rowIdx":2069,"cells":{"id":{"kind":"number","value":2069,"string":"2,069"},"label":{"kind":"string","value":"rebuild cases"},"text":{"kind":"string","value":"from django.core.management.base import BaseCommand\n\nfrom casexml.apps.case.cleanup import rebuild_case_from_forms\nfrom casexml.apps.case.xform import get_case_updates\nfrom corehq.apps.users.models import CouchUser\nfrom corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL\nfrom corehq.form_processor.models import RebuildWithReason, XFormInstance\nfrom corehq.util.log import with_progress_bar\nfrom corehq.form_processor.interfaces.processor import FormProcessorInterface\nfrom corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions\n\n\nclass Command(BaseCommand):\n help = \"\"\"\n Bulk archive forms for user on domain.\n First archive all forms and then rebuild corresponding cases\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.forms = []\n self.case_ids_to_rebuild = []\n self.user_id = None\n self.domain = None\n\n def add_arguments(self, parser):\n parser.add_argument('user_id')\n parser.add_argument('domain')\n\n def _get_forms_to_archive(self):\n # ordered with latest form's id on top\n get_forms = XFormInstance.objects.get_forms\n form_ids = XFormInstance.objects.get_form_ids_for_user(self.domain, self.user_id)\n return [f for f in get_forms(form_ids, self.domain) if f.is_normal]\n\n def _fetch_case_ids_to_rebuild(self):\n case_ids_to_rebuild = set()\n for form in with_progress_bar(self.forms):\n form_case_ids = set(cu.id for cu in get_case_updates(form))\n if form_case_ids:\n case_ids_to_rebuild.update(form_case_ids)\n return list(case_ids_to_rebuild)\n\n def _archive_forms(self):\n with open(\"forms_archived.txt\", \"w\") as forms_log:\n for form in with_progress_bar(self.forms):\n forms_log.write(\"%s\\n\" % form.form_id)\n form.archive(trigger_signals=False)\n\n def _remove_ledger_transactions(self):\n with open(\"ledger_transactions_removed_case_ids.txt\", \"w\") as case_ids_log:\n forms_iterated = 0\n for xform in with_progress_bar(self.forms):\n forms_iterated += 1\n if forms_iterated % 100 == 0:\n print(\"traversed %s forms\" % forms_iterated)\n ledger_case_ids = get_case_ids_from_stock_transactions(xform)\n if ledger_case_ids:\n ledger_case_ids = list(ledger_case_ids)\n for ledger_case_id in ledger_case_ids:\n case_ids_log.write(\"%s\\n\" % ledger_case_id)\n LedgerAccessorSQL.delete_ledger_transactions_for_form(ledger_case_ids, xform.form_id)\n\n def METHOD_NAME(self):\n user = CouchUser.get_by_user_id(self.user_id)\n reason = \"User %s forms archived for domain %s by system\" % (user.raw_username, self.domain)\n form_processor_interface = FormProcessorInterface(self.domain)\n with open(\"cases_rebuilt.txt\", \"w\") as case_log:\n for case_id in with_progress_bar(self.case_ids_to_rebuild):\n case_log.write(\"%s\\n\" % case_id)\n rebuild_case_from_forms(self.domain, case_id, RebuildWithReason(reason=reason))\n ledgers = form_processor_interface.ledger_db.get_ledgers_for_case(case_id)\n for ledger in ledgers:\n form_processor_interface.ledger_processor.rebuild_ledger_state(\n case_id, ledger.section_id, ledger.entry_id)\n\n def handle(self, user_id, domain, **options):\n self.user_id = user_id\n self.domain = domain\n\n self.forms = self._get_forms_to_archive()\n print(\"Found %s normal forms for user\" % len(self.forms))\n\n self.case_ids_to_rebuild = self._fetch_case_ids_to_rebuild()\n print(\"Found %s cases that would need to be rebuilt\" % len(self.case_ids_to_rebuild))\n\n print(\"Starting with form archival\")\n self._archive_forms()\n\n print(\"Starting with removing ledger transactions\")\n self._remove_ledger_transactions()\n\n print(\"Starting with cases rebuild\")\n self.METHOD_NAME()\n\n print(\"Completed!\")"}}},{"rowIdx":2070,"cells":{"id":{"kind":"number","value":2070,"string":"2,070"},"label":{"kind":"string","value":"short name"},"text":{"kind":"string","value":"# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Tests for cross-lowering.\n\nWe check that we produce the same exact HLO using native lowering and with\ncross-lowering. This will save the HLO for all PrimitiveHarnesses as generated\non the current backend (`jax.default_backend()`) for all of `cpu`, `gpu`, and\n`tpu`. The file names are //for_{cpu,tpu}_on_{cpu,tpu}.mlir.\n\nIf a saved file already exists produced on a different backend, then compare the\ncurrently saved file with the saved one.\n\n\"\"\"\nfrom collections.abc import Sequence\nimport contextlib\nimport dataclasses\nimport os\nimport re\nfrom typing import Callable, Optional\nimport zlib\n\nfrom absl import app\nfrom absl import logging\n\nimport numpy.random as npr\n\nimport jax\nfrom jax import config # Must import before TF\nfrom jax.experimental import jax2tf # Defines needed flags\nfrom jax._src import test_util # Defines needed flags\n\nconfig.parse_flags_with_absl()\n\n# Import after parsing flags\nfrom jax.experimental.jax2tf.tests import primitive_harness\n\n@dataclasses.dataclass\nclass Scenario:\n harness: primitive_harness.Harness\n on_platform: str\n for_platform: str\n\n @property\n def METHOD_NAME(self) -> str:\n basename = re.sub(r\"[^a-zA-Z0-9_\\-]\", \"_\", self.harness.fullname)\n if len(basename) >= 128:\n basename = basename[0:100] + str(hash(self.harness.fullname))\n return basename\n\n def output_file(self, save_directory: str) -> str:\n basename = self.METHOD_NAME\n return os.path.join(\n save_directory, basename,\n f\"for_{self.for_platform}_on_{self.on_platform}.mlir\")\n\n def __str__(self):\n return f\"Scenario(harness={self.harness.fullname}, on={self.on_platform}, for={self.for_platform}, basename={self.METHOD_NAME}\"\n\nclass Io:\n \"\"\"Abstracts a few IO operation over standard \"open\" vs. gfile.\"\"\"\n def __init__(self, use_gfile=False):\n self.use_gfile = use_gfile\n if use_gfile:\n from tensorflow.io import gfile\n self.gfile = gfile\n else:\n self.gfile = None\n\n def exists(self, filename: str) -> bool:\n if self.use_gfile:\n return self.gfile.exists(filename)\n else:\n return os.path.exists(filename)\n\n def makedirs(self, dirname: str):\n if self.use_gfile:\n return self.gfile.makedirs(dirname)\n else:\n return os.makedirs(dirname)\n\n @contextlib.contextmanager\n def open(self, filename: str, mode: str):\n if self.use_gfile:\n f = self.gfile.GFile(filename, mode=mode)\n else:\n f = open(filename, mode=mode)\n try:\n yield f\n finally:\n f.close()\n\n\ndef write_and_check_harness(harness: primitive_harness.Harness,\n io: Io,\n save_directory: str,\n for_platforms: Sequence[str] = (\"cpu\", \"tpu\"),) -> Sequence[str]:\n \"\"\"Writes and checks HLO for a given harness.\n\n Writes the HLOs generated in the current platform for all platforms.\n If it finds previously written HLOs generated on other platforms, compares\n them with the ones generated on this platform.\n\n Returns a list of harnesses on which diffs were found.\n \"\"\"\n diffs = []\n\n func_jax = harness.dyn_fun\n rng = npr.RandomState(zlib.adler32(harness.fullname.encode()))\n args = harness.dyn_args_maker(rng)\n\n # Generate the HLO for all platforms\n for for_platform in for_platforms:\n if not harness.filter(for_platform):\n logging.info(\"Skip harness %s for %s because it is not implemented in JAX\",\n harness.fullname, for_platform)\n continue\n\n scenario1 = Scenario(harness, jax.default_backend(), for_platform)\n output_file = scenario1.output_file(save_directory)\n output_dir = os.path.dirname(output_file)\n if not io.exists(output_dir):\n io.makedirs(output_dir)\n\n if io.exists(output_file):\n with open(output_file) as f:\n hlo = f.read()\n else:\n # For a tighter check, detect the native platform lowering and do not\n # trigger cross-lowering\n if for_platform == jax.default_backend():\n lowered = jax.jit(func_jax).lower(*args)\n else:\n # TODO: replace this with JAX cross-platform API, without going through\n # jax2tf\n from jax.experimental.jax2tf.jax2tf import cross_platform_lowering\n lowered = cross_platform_lowering(func_jax, args,\n platforms=[for_platform])\n hlo = lowered.compiler_ir(dialect=\"stablehlo\") # type: ignore\n with open(output_file, \"w\") as f:\n f.write(str(hlo))\n\n # Compare with previously written files\n for on_platform in ['cpu', 'tpu']:\n if on_platform == jax.default_backend():\n continue\n scenario2 = Scenario(harness, on_platform, for_platform)\n other_file = scenario2.output_file(save_directory)\n if io.exists(other_file):\n logging.info(\"Comparing for %s harness %s on %s vs %s\",\n for_platform, harness.fullname, jax.default_backend(), on_platform)\n with open(other_file) as f:\n other_hlo = f.read()\n\n if hlo != other_hlo:\n logging.info(\"Found diff\",\n for_platform, harness.fullname, jax.default_backend(), on_platform)\n diffs.append(f\"Found diff between {output_file} and {other_file}\")\n\n return diffs\n\ndef write_and_check_harnesses(io: Io,\n save_directory: str,\n *,\n filter_harness: Optional[Callable[[str], bool]] = None,\n for_platforms: Sequence[str] = (\"cpu\", \"tpu\"),\n verbose = False):\n logging.info(\"Writing and checking harnesses at %s\", save_directory)\n nr_harnesses = len(primitive_harness.all_harnesses)\n for i, harness in enumerate(primitive_harness.all_harnesses):\n if i % 100 == 0:\n logging.info(\"Trying cross-lowering for harness #%d/%d\",\n i, nr_harnesses)\n enable_xla = harness.params.get(\"enable_xla\", True)\n if not enable_xla:\n if verbose:\n logging.info(\"Skip %s due to enable_xla=False\", harness.fullname)\n continue\n\n if filter_harness is not None and not filter_harness(harness.fullname):\n if verbose:\n logging.info(\"Skip %s due to filter_harness\", harness.fullname)\n continue\n\n write_and_check_harness(harness, io, save_directory,\n for_platforms=for_platforms)\n\n\ndef main(argv: Sequence[str]) -> None:\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n def filter_harness(name: str) -> bool:\n return \"cummax\" in name\n for_platforms = ('cpu', 'tpu')\n write_and_check_harnesses(Io(False), \"./hlo_dumps\",\n filter_harness=filter_harness,\n for_platforms=for_platforms)\n\n\nif __name__ == \"__main__\":\n app.run(main)"}}},{"rowIdx":2071,"cells":{"id":{"kind":"number","value":2071,"string":"2,071"},"label":{"kind":"string","value":"test status subscriber error"},"text":{"kind":"string","value":"\nimport unittest\nimport multiprocessing\nimport sys\nimport time\nimport ipaddress\n\nimport broker\n\nclass TestCommunication(unittest.TestCase):\n def test_ping(self):\n # --peer-start\n with broker.Endpoint() as ep1, \\\n broker.Endpoint() as ep2, \\\n ep1.make_subscriber(\"/test\") as s1, \\\n ep2.make_subscriber(\"/test\") as s2:\n port = ep1.listen(\"127.0.0.1\", 0)\n self.assertTrue(ep2.peer(\"127.0.0.1\", port, 1.0))\n\n ep1.await_peer(ep2.node_id())\n ep2.await_peer(ep1.node_id())\n\n # --peer-end\n\n # --ping-start\n ep2.publish(\"/test\", [\"ping\"])\n (t, d) = s1.get()\n # t == \"/test\", d == [\"ping\"]\n # --ping-end\n self.assertEqual(t, \"/test\")\n self.assertEqual(d[0], \"ping\")\n\n ep1.publish(t, [\"pong\"])\n\n while True:\n # This loop exists just for sake of test coverage for \"poll()\"\n msgs = s2.poll()\n\n if msgs:\n self.assertEqual(len(msgs), 1)\n (t, d) = msgs[0]\n break;\n\n time.sleep(0.1)\n\n self.assertEqual(t, \"/test\")\n self.assertEqual(d[0], \"pong\")\n\n def test_messages(self):\n with broker.Endpoint() as ep1, \\\n broker.Endpoint() as ep2, \\\n ep1.make_subscriber(\"/test\") as s1:\n\n port = ep1.listen(\"127.0.0.1\", 0)\n self.assertTrue(ep2.peer(\"127.0.0.1\", port, 1.0))\n\n ep1.await_peer(ep2.node_id())\n ep2.await_peer(ep1.node_id())\n\n msg0 = (\"/test/1\", ())\n ep2.publish(*msg0)\n\n # --messages-start\n msg1 = (\"/test/2\", (1, 2, 3))\n msg2 = (\"/test/3\", (42, \"foo\", {\"a\": \"A\", \"b\": ipaddress.IPv4Address('1.2.3.4')}))\n ep2.publish_batch(msg1, msg2)\n # --messages-end\n\n msgs = s1.get(3)\n self.assertFalse(s1.available())\n\n self.assertEqual(msgs[0], msg0)\n self.assertEqual(msgs[1], msg1)\n self.assertEqual(msgs[2], msg2)\n\n # These results are not (all) immutable: try modifying the third\n # value (the dict) of the last message above.\n dict_data = msgs[2][1][2]\n self.assertEqual(len(dict_data), 2)\n dict_data[\"c\"] = \"not immutable\"\n self.assertEqual(len(dict_data), 3)\n\n def test_immutable_messages(self):\n with broker.Endpoint() as ep1, \\\n broker.Endpoint() as ep2, \\\n ep1.make_safe_subscriber(\"/test\") as s1:\n\n port = ep1.listen(\"127.0.0.1\", 0)\n ep2.peer(\"127.0.0.1\", port, 1.0)\n\n msg = (\"/test/1\", ({\"a\": \"A\"}, set([1,2,3]), ('a', 'b', 'c')))\n ep2.publish(*msg)\n\n topic, (dict_data, set_data, tuple_data) = s1.get()\n\n # The return values are immutable, so each of the following triggers\n # a type-specific exception.\n with self.assertRaises(TypeError):\n # 'mappingproxy' object does not support item assignment\n dict_data[\"b\"] = \"B\"\n with self.assertRaises(AttributeError):\n # 'frozenset' object has no attribute 'add'\n set_data.add(4)\n with self.assertRaises(TypeError):\n # 'tuple' object does not support item assignment\n tuple_data[3] = 'd'\n\n def test_publisher(self):\n with broker.Endpoint() as ep1, \\\n broker.Endpoint() as ep2, \\\n ep1.make_subscriber(\"/test\") as s1, \\\n ep2.make_publisher(\"/test\") as p2:\n\n port = ep1.listen(\"127.0.0.1\", 0)\n self.assertTrue(ep2.peer(\"127.0.0.1\", port, 1.0))\n\n ep1.await_peer(ep2.node_id())\n ep2.await_peer(ep1.node_id())\n\n p2.publish([1, 2, 3])\n p2.publish_batch([\"a\", \"b\", \"c\"], [True, False])\n\n msgs = s1.get(3)\n self.assertFalse(s1.available())\n\n self.assertEqual(msgs[0], (\"/test\", (1, 2, 3)))\n self.assertEqual(msgs[1], (\"/test\", (\"a\", \"b\", \"c\")))\n self.assertEqual(msgs[2], (\"/test\", (True, False)))\n\n def test_status_subscriber(self):\n # --status-start\n with broker.Endpoint() as ep1, \\\n broker.Endpoint() as ep2, \\\n ep1.make_status_subscriber(True) as es1, \\\n ep2.make_status_subscriber(True) as es2:\n\n port = ep1.listen(\"127.0.0.1\", 0)\n self.assertEqual(ep2.peer(\"127.0.0.1\", port, 1.0), True)\n\n ep1.await_peer(ep2.node_id())\n ep2.await_peer(ep1.node_id())\n\n st1 = es1.get(2)\n st2 = es2.get(2)\n # st1.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded]\n # st2.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded]\n # --status-end\n\n self.assertEqual(len(st1), 2)\n self.assertEqual(st1[0].code(), broker.SC.EndpointDiscovered)\n self.assertEqual(st1[1].code(), broker.SC.PeerAdded)\n self.assertEqual(len(st2), 2)\n self.assertEqual(st2[0].code(), broker.SC.EndpointDiscovered)\n self.assertEqual(st2[1].code(), broker.SC.PeerAdded)\n self.assertEqual(st2[1].context().network.get().address, \"127.0.0.1\")\n\n def METHOD_NAME(self):\n # --error-start\n with broker.Endpoint() as ep1, \\\n ep1.make_status_subscriber() as es1:\n r = ep1.peer(\"127.0.0.1\", 1947, 0.0) # Try unavailable port, no retry\n self.assertEqual(r, False) # Not shown in docs.\n st1 = es1.get()\n # s1.code() == broker.EC.PeerUnavailable\n # --error-end\n self.assertEqual(st1.code(), broker.EC.PeerUnavailable)\n\n # Async version.\n ep1.peer_nosync(\"127.0.0.1\", 1947, 1.0)\n st1 = es1.get()\n self.assertEqual(st1.code(), broker.EC.PeerUnavailable)\n\n st1 = es1.get()\n self.assertEqual(st1.code(), broker.EC.PeerUnavailable)\n\n def test_idle_endpoint(self):\n with broker.Endpoint() as ep1, \\\n ep1.make_status_subscriber() as es1, \\\n ep1.make_subscriber(\"/test\") as s1:\n\n pass\n\nif __name__ == '__main__':\n unittest.main(verbosity=3)"}}},{"rowIdx":2072,"cells":{"id":{"kind":"number","value":2072,"string":"2,072"},"label":{"kind":"string","value":"drop index"},"text":{"kind":"string","value":"from redis import Redis, RedisError, ConnectionPool\nimport datetime\nimport itertools\nimport json\nimport time\n\n\nclass Document(object):\n\n def __init__(self, id, **fields):\n\n self.id = id\n for k, v in fields.iteritems():\n setattr(self, k, v)\n\n def __repr__(self):\n\n return 'Document %s' % self.__dict__\n\n\n def snippetize(self, field, size=500, boldTokens=[]):\n txt = getattr(self, field, '')\n for tok in boldTokens:\n txt = txt.replace(tok, \"%s\" % tok)\n while size < len(txt) and txt[size] != ' ':\n size+=1\n\n setattr(self, field, (txt[:size] + '...') if len(txt) > size else txt)\n\nclass Result(object):\n\n def __init__(self, res, hascontent, queryText, duration=0):\n\n self.total = res[0]\n self.duration = duration\n self.docs = []\n\n tokens = filter(None, queryText.rstrip(\"\\\" \").lstrip(\" \\\"\").split(' '))\n for i in xrange(1, len(res), 2 if hascontent else 1):\n id = res[i]\n fields = {} \n if hascontent:\n fields = dict(\n dict(itertools.izip(res[i + 1][::2], res[i + 1][1::2]))) if hascontent else {}\n try:\n del fields['id']\n except KeyError:\n pass\n\n doc = Document(id, **fields)\n #print doc\n if hascontent:\n try:\n doc.snippetize('body', size=500, boldTokens = tokens)\n except Exception as e:\n print e\n self.docs.append(doc)\n\n\n def __repr__(self):\n\n return 'Result{%d total, docs: %s}' % (self.total, self.docs)\n\n\nclass Client(object):\n\n NUMERIC = 'numeric'\n\n CREATE_CMD = 'FT.CREATE'\n SEARCH_CMD = 'FT.SEARCH'\n ADD_CMD = 'FT.ADD'\n DROP_CMD = 'FT.DROP'\n\n\n class BatchIndexer(object):\n \"\"\"\n A batch indexer allows you to automatically batch \n document indexeing in pipelines, flushing it every N documents. \n \"\"\"\n\n def __init__(self, client, chunk_size = 1000):\n\n self.client = client\n self.pipeline = client.redis.pipeline(False)\n self.total = 0\n self.chunk_size = chunk_size\n self.current_chunk = 0\n\n def __del__(self):\n if self.current_chunk:\n self.commit()\n \n def add_document(self, doc_id, nosave = False, score=1.0, **fields):\n\n self.client._add_document(doc_id, conn=self.pipeline, nosave = nosave, score = score, **fields)\n self.current_chunk += 1\n self.total += 1\n if self.current_chunk >= self.chunk_size:\n self.commit()\n \n\n def commit(self):\n \n self.pipeline.execute()\n self.current_chunk = 0\n\n def __init__(self, index_name, host='localhost', port=6379):\n self.host = host\n self.port = port\n self.index_name = index_name\n\n self.redis = Redis(\n connection_pool = ConnectionPool(host=host, port=port))\n\n def batch_indexer(self, chunk_size = 100):\n \"\"\"\n Create a new batch indexer from the client with a given chunk size\n \"\"\"\n return Client.BatchIndexer(self, chunk_size = chunk_size)\n \n def create_index(self, **fields):\n \"\"\"\n Create the search index. Creating an existing index juts updates its properties\n :param fields: a kwargs consisting of field=[score|NUMERIC]\n :return:\n \"\"\"\n self.redis.execute_command(\n self.CREATE_CMD, self.index_name, *itertools.chain(*fields.items()))\n\n def METHOD_NAME(self):\n \"\"\"\n Drop the index if it exists\n :return:\n \"\"\"\n self.redis.execute_command(self.DROP_CMD, self.index_name)\n\n def _add_document(self, doc_id, conn = None, nosave = False, score=1.0, **fields):\n \"\"\" \n Internal add_document used for both batch and single doc indexing \n \"\"\"\n if conn is None:\n conn = self.redis\n\n args = [self.ADD_CMD, self.index_name, doc_id, score]\n if nosave:\n args.append('NOSAVE')\n args.append('FIELDS') \n args += list(itertools.chain(*fields.items()))\n return conn.execute_command(*args)\n\n def add_document(self, doc_id, nosave = False, score=1.0, **fields):\n \"\"\"\n Add a single document to the index.\n :param doc_id: the id of the saved document.\n :param nosave: if set to true, we just index the document, and don't save a copy of it. \n this means that searches will just return ids.\n :param score: the document ranking, between 0.0 and 1.0. \n :fields: kwargs dictionary of the document fields to be saved and/or indexed \n \"\"\"\n return self._add_document(doc_id, conn=None, nosave=nosave, score=score, **fields)\n\n def load_document(self, id):\n \"\"\"\n Load a single document by id\n \"\"\"\n fields = self.redis.hgetall(id)\n try:\n del fields['id']\n except KeyError:\n pass\n\n return Document(id=id, **fields)\n\n\n def search(self, query, offset =0, num = 10, verbatim = False, no_content=False, no_stopwords = False, fields=None, **filters):\n \"\"\"\n Search eht\n :param query:\n :param fields:\n :param filters:\n :return:\n \"\"\"\n\n args = [self.index_name, query]\n if no_content:\n args.append('NOCONTENT')\n\n if fields:\n\n args.append('INFIELDS')\n args.append(len(fields))\n args += fields\n \n if verbatim:\n args.append('VERBATIM')\n\n if no_stopwords:\n args.append('NOSTOPWORDS')\n\n if filters:\n for k, v in filters.iteritems():\n args += ['FILTER', k] + list(v)\n\n args += [\"LIMIT\", offset, num]\n\n st = time.time()\n res = self.redis.execute_command(self.SEARCH_CMD, *args)\n\n return Result(res, no_content == False, queryText=query, duration = (time.time()-st)*1000.0)"}}},{"rowIdx":2073,"cells":{"id":{"kind":"number","value":2073,"string":"2,073"},"label":{"kind":"string","value":"create next relation"},"text":{"kind":"string","value":"# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Iterator, Union\n\nfrom amundsen_common.utils.atlas import AtlasCommonParams, AtlasCommonTypes\n\nfrom databuilder.models.atlas_entity import AtlasEntity\nfrom databuilder.models.atlas_relationship import AtlasRelationship\nfrom databuilder.models.atlas_serializable import AtlasSerializable\nfrom databuilder.models.graph_node import GraphNode\nfrom databuilder.models.graph_relationship import GraphRelationship\nfrom databuilder.models.graph_serializable import GraphSerializable\nfrom databuilder.serializers.atlas_serializer import get_entity_attrs\nfrom databuilder.utils.atlas import AtlasRelationshipTypes, AtlasSerializedEntityOperation\n\n\nclass ResourceReport(GraphSerializable, AtlasSerializable):\n \"\"\"\n Resource Report matching model\n\n Report represents a document that can be linked to any resource (like a table) in Amundsen.\n\n Example would be Pandas Profiling HTML report containing full advanced profile of a table.\n \"\"\"\n\n RESOURCE_REPORT_LABEL = 'Report'\n\n RESOURCE_REPORT_NAME = 'name'\n RESOURCE_REPORT_URL = 'url'\n\n REPORT_KEY_FORMAT = '{resource_uri}/_report/{report_name}'\n\n REPORT_RESOURCE_RELATION_TYPE = 'REFERS_TO'\n RESOURCE_REPORT_RELATION_TYPE = 'HAS_REPORT'\n\n def __init__(self,\n name: str,\n url: str,\n resource_uri: str,\n resource_label: str, # for example 'Table'\n ) -> None:\n self.report_name = name\n self.report_url = url\n\n self.resource_uri = resource_uri\n self.resource_label = resource_label\n\n self.resource_report_key = self.get_resource_model_key()\n\n self._node_iter = self._create_node_iterator()\n self._relation_iter = self._create_relation_iterator()\n self._atlas_entity_iterator = self._create_next_atlas_entity()\n self._atlas_relation_iterator = self._create_atlas_relation_iterator()\n\n def get_resource_model_key(self) -> str:\n return ResourceReport.REPORT_KEY_FORMAT.format(resource_uri=self.resource_uri, report_name=self.report_name)\n\n def create_next_node(self) -> Union[GraphNode, None]:\n # creates new node\n try:\n return next(self._node_iter)\n except StopIteration:\n return None\n\n def METHOD_NAME(self) -> Union[GraphRelationship, None]:\n try:\n return next(self._relation_iter)\n except StopIteration:\n return None\n\n def _create_node_iterator(self) -> Iterator[GraphNode]:\n \"\"\"\n Create an application node\n :return:\n \"\"\"\n report_node = GraphNode(\n key=self.resource_report_key,\n label=ResourceReport.RESOURCE_REPORT_LABEL,\n attributes={\n ResourceReport.RESOURCE_REPORT_NAME: self.report_name,\n ResourceReport.RESOURCE_REPORT_URL: self.report_url\n }\n )\n\n yield report_node\n\n def _create_relation_iterator(self) -> Iterator[GraphRelationship]:\n \"\"\"\n Create relations between application and table nodes\n :return:\n \"\"\"\n graph_relationship = GraphRelationship(\n start_key=self.resource_uri,\n start_label=self.resource_label,\n end_key=self.resource_report_key,\n end_label=ResourceReport.RESOURCE_REPORT_LABEL,\n type=ResourceReport.RESOURCE_REPORT_RELATION_TYPE,\n reverse_type=ResourceReport.REPORT_RESOURCE_RELATION_TYPE,\n attributes={}\n )\n\n yield graph_relationship\n\n def create_next_atlas_entity(self) -> Union[AtlasEntity, None]:\n try:\n return next(self._atlas_entity_iterator)\n except StopIteration:\n return None\n\n def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]:\n group_attrs_mapping = [\n (AtlasCommonParams.qualified_name, self.resource_report_key),\n ('name', self.report_name),\n ('url', self.report_url)\n ]\n\n entity_attrs = get_entity_attrs(group_attrs_mapping)\n\n entity = AtlasEntity(\n typeName=AtlasCommonTypes.resource_report,\n operation=AtlasSerializedEntityOperation.CREATE,\n relationships=None,\n attributes=entity_attrs,\n )\n\n yield entity\n\n def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]:\n try:\n return next(self._atlas_relation_iterator)\n except StopIteration:\n return None\n\n def _create_atlas_relation_iterator(self) -> Iterator[AtlasRelationship]:\n relationship = AtlasRelationship(\n relationshipType=AtlasRelationshipTypes.referenceable_report,\n entityType1=self.resource_label,\n entityQualifiedName1=self.resource_uri,\n entityType2=AtlasCommonTypes.resource_report,\n entityQualifiedName2=self.resource_report_key,\n attributes={}\n )\n\n yield relationship"}}},{"rowIdx":2074,"cells":{"id":{"kind":"number","value":2074,"string":"2,074"},"label":{"kind":"string","value":"get source"},"text":{"kind":"string","value":"# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Data Resource Provider implementation.\n\"\"\"\nimport os\nfrom pathlib import Path\n\nfrom c7n.actions import ActionRegistry\nfrom c7n.exceptions import PolicyExecutionError, PolicyValidationError\nfrom c7n.filters import FilterRegistry\nfrom c7n.manager import ResourceManager\nfrom c7n.provider import Provider, clouds\nfrom c7n.query import sources\nfrom c7n.registry import PluginRegistry\nfrom c7n.utils import load_file, jmespath_search\n\n\n@clouds.register(\"c7n\")\nclass CustodianProvider(Provider):\n\n display_name = \"Custodian Core\"\n resources = PluginRegistry(\"policy\")\n resource_prefix = \"c7n\"\n # lazy load chicken sacrifice\n resource_map = {\"c7n.data\": \"c7n.data.Data\"}\n\n def get_session_factory(self, config):\n return NullSession()\n\n def initialize(self, options):\n return\n\n def initialize_policies(self, policy_collection, options):\n return policy_collection\n\n\nclass NullSession:\n \"\"\"dummy session\"\"\"\n\n\n@sources.register('static')\nclass StaticSource:\n def __init__(self, queries):\n self.queries = queries\n\n def __iter__(self):\n records = []\n for q in self.queries:\n records.extend(q.get(\"records\", ()))\n return iter(records)\n\n def validate(self):\n for q in self.queries:\n if not isinstance(q.get(\"records\", None), (list, tuple)):\n raise PolicyValidationError(\"invalid static data source `records`\")\n\n\n@sources.register('disk')\nclass DiskSource:\n def __init__(self, queries):\n self.queries = queries\n\n def validate(self):\n for q in self.queries:\n if not os.path.exists(q[\"path\"]):\n raise PolicyValidationError(\"invalid disk path %s\" % q)\n if os.path.isdir(q[\"path\"]) and \"glob\" not in q:\n raise PolicyValidationError(\"glob pattern required for dir\")\n\n def __iter__(self):\n for q in self.queries:\n for collection in self.scan_path(\n path=q[\"path\"], resource_key=q.get(\"key\"), glob=q.get(\"glob\")\n ):\n for p in collection:\n yield p\n\n def scan_path(self, path, glob, resource_key):\n if os.path.isfile(path):\n yield self.load_file(path, resource_key)\n return\n\n for path in Path(path).glob(glob):\n yield self.load_file(str(path), resource_key)\n\n def load_file(self, path, resource_key):\n data = load_file(path)\n if resource_key:\n data = jmespath_search(resource_key, data)\n if not isinstance(data, list):\n raise PolicyExecutionError(\n \"found disk records at %s in non list format %s\" % (path, type(data))\n )\n return DataFile(path, resource_key, data)\n\n\nclass DataFile:\n\n __slots__ = (\"path\", \"records\", \"resource_key\")\n\n def __init__(self, path, resource_key, records):\n self.path = path\n self.resource_key = resource_key\n self.records = records\n\n def __iter__(self):\n return iter(self.records)\n\n\n@CustodianProvider.resources.register(\"data\")\nclass Data(ResourceManager):\n\n action_registry = ActionRegistry(\"c7n.data.actions\")\n filter_registry = FilterRegistry(\"c7n.data.filters\")\n source_mapping = {\"static\": StaticSource, \"disk\": DiskSource}\n\n def validate(self):\n if self.data.get(\"source\", \"disk\") not in self.source_mapping:\n raise PolicyValidationError(\"invalid source %s\" % self.data[\"source\"])\n self.METHOD_NAME().validate()\n\n def get_resources(self, resource_ids):\n return []\n\n def resources(self):\n with self.ctx.tracer.subsegment(\"resource-fetch\"):\n source = self.METHOD_NAME()\n resources = list(source)\n with self.ctx.tracer.subsegment(\"filter\"):\n resources = self.filter_resources(resources)\n return resources\n\n def METHOD_NAME(self):\n source_type = self.data.get(\"source\", \"disk\")\n return self.source_mapping[source_type](self.data.get(\"query\", []))"}}},{"rowIdx":2075,"cells":{"id":{"kind":"number","value":2075,"string":"2,075"},"label":{"kind":"string","value":"test force open completions event"},"text":{"kind":"string","value":"import unittest\nfrom test.test_support import requires\nfrom Tkinter import Tk, Text\n\nimport idlelib.AutoComplete as ac\nimport idlelib.AutoCompleteWindow as acw\nfrom idlelib.idle_test.mock_idle import Func\nfrom idlelib.idle_test.mock_tk import Event\n\nclass AutoCompleteWindow:\n def complete():\n return\n\nclass DummyEditwin:\n def __init__(self, root, text):\n self.root = root\n self.text = text\n self.indentwidth = 8\n self.tabwidth = 8\n self.context_use_ps1 = True\n\n\nclass AutoCompleteTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n requires('gui')\n cls.root = Tk()\n cls.text = Text(cls.root)\n cls.editor = DummyEditwin(cls.root, cls.text)\n\n @classmethod\n def tearDownClass(cls):\n del cls.editor, cls.text\n cls.root.destroy()\n del cls.root\n\n def setUp(self):\n self.editor.text.delete('1.0', 'end')\n self.autocomplete = ac.AutoComplete(self.editor)\n\n def test_init(self):\n self.assertEqual(self.autocomplete.editwin, self.editor)\n\n def test_make_autocomplete_window(self):\n testwin = self.autocomplete._make_autocomplete_window()\n self.assertIsInstance(testwin, acw.AutoCompleteWindow)\n\n def test_remove_autocomplete_window(self):\n self.autocomplete.autocompletewindow = (\n self.autocomplete._make_autocomplete_window())\n self.autocomplete._remove_autocomplete_window()\n self.assertIsNone(self.autocomplete.autocompletewindow)\n\n def METHOD_NAME(self):\n # Test that force_open_completions_event calls _open_completions\n o_cs = Func()\n self.autocomplete.open_completions = o_cs\n self.autocomplete.force_open_completions_event('event')\n self.assertEqual(o_cs.args, (True, False, True))\n\n def test_try_open_completions_event(self):\n Equal = self.assertEqual\n autocomplete = self.autocomplete\n trycompletions = self.autocomplete.try_open_completions_event\n o_c_l = Func()\n autocomplete._open_completions_later = o_c_l\n\n # _open_completions_later should not be called with no text in editor\n trycompletions('event')\n Equal(o_c_l.args, None)\n\n # _open_completions_later should be called with COMPLETE_ATTRIBUTES (1)\n self.text.insert('1.0', 're.')\n trycompletions('event')\n Equal(o_c_l.args, (False, False, False, 1))\n\n # _open_completions_later should be called with COMPLETE_FILES (2)\n self.text.delete('1.0', 'end')\n self.text.insert('1.0', '\"./Lib/')\n trycompletions('event')\n Equal(o_c_l.args, (False, False, False, 2))\n\n def test_autocomplete_event(self):\n Equal = self.assertEqual\n autocomplete = self.autocomplete\n\n # Test that the autocomplete event is ignored if user is pressing a\n # modifier key in addition to the tab key\n ev = Event(mc_state=True)\n self.assertIsNone(autocomplete.autocomplete_event(ev))\n del ev.mc_state\n\n # If autocomplete window is open, complete() method is called\n self.text.insert('1.0', 're.')\n # This must call autocomplete._make_autocomplete_window()\n Equal(self.autocomplete.autocomplete_event(ev), 'break')\n\n # If autocomplete window is not active or does not exist,\n # open_completions is called. Return depends on its return.\n autocomplete._remove_autocomplete_window()\n o_cs = Func() # .result = None\n autocomplete.open_completions = o_cs\n Equal(self.autocomplete.autocomplete_event(ev), None)\n Equal(o_cs.args, (False, True, True))\n o_cs.result = True\n Equal(self.autocomplete.autocomplete_event(ev), 'break')\n Equal(o_cs.args, (False, True, True))\n\n def test_open_completions_later(self):\n # Test that autocomplete._delayed_completion_id is set\n pass\n\n def test_delayed_open_completions(self):\n # Test that autocomplete._delayed_completion_id set to None and that\n # open_completions only called if insertion index is the same as\n # _delayed_completion_index\n pass\n\n def test_open_completions(self):\n # Test completions of files and attributes as well as non-completion\n # of errors\n pass\n\n def test_fetch_completions(self):\n # Test that fetch_completions returns 2 lists:\n # For attribute completion, a large list containing all variables, and\n # a small list containing non-private variables.\n # For file completion, a large list containing all files in the path,\n # and a small list containing files that do not start with '.'\n pass\n\n def test_get_entity(self):\n # Test that a name is in the namespace of sys.modules and\n # __main__.__dict__\n pass\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)"}}},{"rowIdx":2076,"cells":{"id":{"kind":"number","value":2076,"string":"2,076"},"label":{"kind":"string","value":"replace variables"},"text":{"kind":"string","value":"# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) \n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n\"\"\"Module containing command managers (SearchRunner and CommandRunner).\"\"\"\n\nimport traceback\nimport re\nimport contextlib\nfrom typing import TYPE_CHECKING, Callable, Dict, Iterator, Mapping, MutableMapping\n\nfrom qutebrowser.qt.core import pyqtSlot, QUrl, QObject\n\nfrom qutebrowser.api import cmdutils\nfrom qutebrowser.commands import cmdexc, parser\nfrom qutebrowser.utils import message, objreg, qtutils, usertypes, utils\nfrom qutebrowser.keyinput import macros, modeman\n\nif TYPE_CHECKING:\n from qutebrowser.mainwindow import tabbedbrowser\n_ReplacementFunction = Callable[['tabbedbrowser.TabbedBrowser'], str]\n\n\nlast_command = {}\n\n\ndef _url(tabbed_browser):\n \"\"\"Convenience method to get the current url.\"\"\"\n try:\n return tabbed_browser.current_url()\n except qtutils.QtValueError as e:\n msg = \"Current URL is invalid\"\n if e.reason:\n msg += \" ({})\".format(e.reason)\n msg += \"!\"\n raise cmdutils.CommandError(msg)\n\n\ndef _init_variable_replacements() -> Mapping[str, _ReplacementFunction]:\n \"\"\"Return a dict from variable replacements to fns processing them.\"\"\"\n replacements: Dict[str, _ReplacementFunction] = {\n 'url': lambda tb: _url(tb).toString(\n QUrl.ComponentFormattingOption.FullyEncoded | QUrl.UrlFormattingOption.RemovePassword),\n 'url:pretty': lambda tb: _url(tb).toString(\n QUrl.ComponentFormattingOption.DecodeReserved | QUrl.UrlFormattingOption.RemovePassword),\n 'url:domain': lambda tb: \"{}://{}{}\".format(\n _url(tb).scheme(), _url(tb).host(),\n \":\" + str(_url(tb).port()) if _url(tb).port() != -1 else \"\"),\n 'url:auth': lambda tb: \"{}:{}@\".format(\n _url(tb).userName(),\n _url(tb).password()) if _url(tb).userName() else \"\",\n 'url:scheme': lambda tb: _url(tb).scheme(),\n 'url:username': lambda tb: _url(tb).userName(),\n 'url:password': lambda tb: _url(tb).password(),\n 'url:host': lambda tb: _url(tb).host(),\n 'url:port': lambda tb: str(\n _url(tb).port()) if _url(tb).port() != -1 else \"\",\n 'url:path': lambda tb: _url(tb).path(),\n 'url:query': lambda tb: _url(tb).query(),\n 'title': lambda tb: tb.widget.page_title(tb.widget.currentIndex()),\n 'clipboard': lambda _: utils.get_clipboard(),\n 'primary': lambda _: utils.get_clipboard(selection=True),\n }\n\n for key in list(replacements):\n modified_key = '{' + key + '}'\n # x = modified_key is to avoid binding x as a closure\n replacements[modified_key] = (\n lambda _, x=modified_key: x) # type: ignore[misc]\n return replacements\n\n\nVARIABLE_REPLACEMENTS = _init_variable_replacements()\n# A regex matching all variable replacements\nVARIABLE_REPLACEMENT_PATTERN = re.compile(\n \"{(?P\" + \"|\".join(VARIABLE_REPLACEMENTS.keys()) + \")}\")\n\n\ndef METHOD_NAME(win_id, arglist):\n \"\"\"Utility function to replace variables like {url} in a list of args.\"\"\"\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n values: MutableMapping[str, str] = {}\n args = []\n\n def repl_cb(matchobj):\n \"\"\"Return replacement for given match.\"\"\"\n var = matchobj.group(\"var\")\n if var not in values:\n values[var] = VARIABLE_REPLACEMENTS[var](tabbed_browser)\n return values[var]\n\n try:\n for arg in arglist:\n # using re.sub with callback function replaces all variables in a\n # single pass and avoids expansion of nested variables (e.g.\n # \"{url}\" from clipboard is not expanded)\n args.append(VARIABLE_REPLACEMENT_PATTERN.sub(repl_cb, arg))\n except utils.ClipboardError as e:\n raise cmdutils.CommandError(e)\n return args\n\n\nclass AbstractCommandRunner(QObject):\n\n \"\"\"Abstract base class for CommandRunner.\"\"\"\n\n def run(self, text, count=None, *, safely=False):\n raise NotImplementedError\n\n @pyqtSlot(str, int)\n @pyqtSlot(str)\n def run_safely(self, text, count=None):\n \"\"\"Run a command and display exceptions in the statusbar.\"\"\"\n self.run(text, count, safely=True)\n\n\nclass CommandRunner(AbstractCommandRunner):\n\n \"\"\"Parse and run qutebrowser commandline commands.\n\n Attributes:\n _win_id: The window this CommandRunner is associated with.\n \"\"\"\n\n def __init__(self, win_id, partial_match=False, find_similar=True, parent=None):\n super().__init__(parent)\n self._parser = parser.CommandParser(\n partial_match=partial_match,\n find_similar=find_similar,\n )\n self._win_id = win_id\n\n @contextlib.contextmanager\n def _handle_error(self, safely: bool) -> Iterator[None]:\n \"\"\"Show exceptions as errors if safely=True is given.\"\"\"\n try:\n yield\n except cmdexc.Error as e:\n if safely:\n message.error(str(e), stack=traceback.format_exc())\n else:\n raise\n\n def run(self, text, count=None, *, safely=False):\n \"\"\"Parse a command from a line of text and run it.\n\n Args:\n text: The text to parse.\n count: The count to pass to the command.\n safely: Show CmdError exceptions as messages.\n \"\"\"\n record_last_command = True\n record_macro = True\n\n mode_manager = modeman.instance(self._win_id)\n cur_mode = mode_manager.mode\n\n parsed = None\n with self._handle_error(safely):\n parsed = self._parser.parse_all(text)\n\n if parsed is None:\n return # type: ignore[unreachable]\n\n for result in parsed:\n with self._handle_error(safely):\n if result.cmd.no_replace_variables:\n args = result.args\n else:\n args = METHOD_NAME(self._win_id, result.args)\n\n result.cmd.run(self._win_id, args, count=count)\n\n if result.cmdline[0] in ['repeat-command', 'cmd-repeat-last']:\n record_last_command = False\n\n if result.cmdline[0] in ['macro-record', 'macro-run', 'set-cmd-text', 'cmd-set-text']:\n record_macro = False\n\n if record_last_command:\n last_command[cur_mode] = (text, count)\n\n if record_macro and cur_mode == usertypes.KeyMode.normal:\n macros.macro_recorder.record_command(text, count)"}}},{"rowIdx":2077,"cells":{"id":{"kind":"number","value":2077,"string":"2,077"},"label":{"kind":"string","value":"get name cache info"},"text":{"kind":"string","value":"from functools import lru_cache\nimport logging\nimport re\n\nfrom lona import default_settings\n\nABSTRACT_ROUTE_RE = re.compile(r'<(?P[^:>]+)(:(?P[^>]+))?>')\nROUTE_PART_FORMAT_STRING = r'(?P<{}>{})'\nDEFAULT_PATTERN = r'[^/]+'\nOPTIONAL_TRAILING_SLASH_PATTERN = r'(/)'\n\nMATCH_ALL = 1\n\nlogger = logging.getLogger('lona.routing')\n\n\nclass Route:\n def __init__(self, raw_pattern, view, name='', interactive=True,\n http_pass_through=False, frontend_view=None):\n\n self.raw_pattern = raw_pattern\n self.view = view\n self.name = name\n self.interactive = interactive\n self.http_pass_through = http_pass_through\n self.frontend_view = frontend_view\n\n self.path = None\n self.format_string = ''\n self.optional_trailing_slash = False\n\n # match all\n if self.raw_pattern == MATCH_ALL:\n self.path = MATCH_ALL\n\n # string or regex\n else:\n raw_pattern = self.raw_pattern\n\n if raw_pattern.endswith(OPTIONAL_TRAILING_SLASH_PATTERN):\n self.optional_trailing_slash = True\n\n raw_pattern = \\\n raw_pattern[:-len(OPTIONAL_TRAILING_SLASH_PATTERN)]\n\n groups = ABSTRACT_ROUTE_RE.findall(raw_pattern)\n\n # path is no pattern but simple string\n if not groups:\n self.path = raw_pattern\n self.format_string = raw_pattern\n\n return\n\n pattern_names = [i[0] for i in groups]\n patterns = [(i[0], i[2] or DEFAULT_PATTERN) for i in groups]\n cleaned_pattern = ABSTRACT_ROUTE_RE.sub('{}', raw_pattern)\n\n # setup format string\n self.format_string = cleaned_pattern.format(\n *['{' + i + '}' for i in pattern_names])\n\n # compile pattern\n self.pattern = re.compile(\n r'^{}{}$'.format( # NOQA: FS002\n cleaned_pattern.format(\n *[ROUTE_PART_FORMAT_STRING.format(*i)\n for i in patterns],\n ),\n (r'(/)?'\n if self.optional_trailing_slash else ''),\n ),\n )\n\n def match(self, path):\n # match all\n if self.path == MATCH_ALL:\n return True, {}\n\n # simple string\n if self.path:\n if self.optional_trailing_slash and path.endswith('/'):\n path = path[:-1]\n\n return path == self.path, {}\n\n # pattern\n match_object = self.pattern.match(path)\n\n if not match_object:\n return False, {}\n\n return True, match_object.groupdict()\n\n def __repr__(self):\n raw_pattern = self.raw_pattern\n\n if raw_pattern == MATCH_ALL:\n raw_pattern = 'MATCH_ALL'\n\n return f''\n\n\nclass Router:\n def __init__(self):\n self.routes = []\n\n self.resize_name_cache(\n default_settings.ROUTING_NAME_CACHE_MAX_SIZE,\n )\n\n self.resize_resolve_cache(\n default_settings.ROUTING_RESOLVE_CACHE_MAX_SIZE,\n )\n\n self.resize_reverse_cache(\n default_settings.ROUTING_REVERSE_CACHE_MAX_SIZE,\n )\n\n # caches ##################################################################\n # name\n def resize_name_cache(self, max_size):\n self._name_lru_cache = lru_cache(max_size)(self._get_route)\n\n def METHOD_NAME(self):\n return self._name_lru_cache.cache_info()\n\n def clear_name_cache_info(self):\n return self._name_lru_cache.cache_clear()\n\n # resolve\n def resize_resolve_cache(self, max_size):\n self._resolve_lru_cache = lru_cache(max_size)(self._resolve)\n\n def get_resolve_cache_info(self):\n return self._resolve_lru_cache.cache_info()\n\n def clear_resolve_cache_info(self):\n return self._resolve_lru_cache.cache_clear()\n\n # reverse\n def resize_reverse_cache(self, max_size):\n self._reverse_lru_cache = lru_cache(max_size)(self._reverse)\n\n def get_reverse_cache_info(self):\n return self._reverse_lru_cache.cache_info()\n\n def clear_reverse_cache_info(self):\n return self._reverse_lru_cache.cache_clear()\n\n # routes ##################################################################\n def add_route(self, route):\n # check if route name already exists\n if route.name:\n for _route in self.routes:\n if route.name == _route.name:\n logger.warning(\n \"route name '%s' already exists\",\n route.name,\n )\n\n self.routes.append(route)\n\n def add_routes(self, *routes):\n for route in routes:\n self.add_route(route)\n\n def _get_route(self, name):\n for route in self.routes:\n if route.name == name:\n return route\n\n def get_route(self, *args, **kwargs):\n return self._name_lru_cache(*args, **kwargs)\n\n # resolve #################################################################\n def _resolve(self, path):\n logger.debug(\"resolving '%s'\", path)\n\n for route in self.routes:\n match, match_info = route.match(path)\n\n if match:\n logger.debug('%s matched', route)\n\n return True, route, match_info\n\n logger.debug(\"no match for '%s'\", path)\n\n return False, None, {}\n\n def resolve(self, *args, **kwargs):\n return self._resolve_lru_cache(*args, **kwargs)\n\n # reverse #################################################################\n def _reverse(self, route_name, *args, **kwargs):\n route = None\n\n for _route in self.routes:\n if _route.name == route_name:\n route = _route\n\n break\n\n if not route:\n raise ValueError(f\"no route named '{route_name}' found\")\n\n if route.path:\n return route.path\n\n try:\n return route.format_string.format(*args, **kwargs)\n\n except KeyError as e:\n key_error = e\n\n # raise is outside of except block to avoid stacking tracebacks\n raise ValueError(f'missing URL arg: {key_error.args[0]}')\n\n def reverse(self, *args, **kwargs):\n return self._reverse_lru_cache(*args, **kwargs)"}}},{"rowIdx":2078,"cells":{"id":{"kind":"number","value":2078,"string":"2,078"},"label":{"kind":"string","value":"get function"},"text":{"kind":"string","value":"from math import floor\n\nimport numpy as np\nfrom scipy.signal import savgol_coeffs, savgol_filter\nfrom woodwork.column_schema import ColumnSchema\nfrom woodwork.logical_types import Double\n\nfrom featuretools.primitives.base import TransformPrimitive\n\n\nclass SavgolFilter(TransformPrimitive):\n \"\"\"Applies a Savitzky-Golay filter to a list of values.\n\n Description:\n Given a list of values, return a smoothed list which increases\n the signal to noise ratio without greatly distoring the\n signal. Uses the `Savitzky–Golay filter` method.\n\n If the input list has less than 20 values, it will be returned\n as is.\n\n See the following page for more info:\n https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.savgol_filter.html\n\n Args:\n window_length (int): The length of the filter window (i.e. the number\n of coefficients). `window_length` must be a positive odd integer.\n\n polyorder (int): The order of the polynomial used to fit the samples.\n `polyorder` must be less than `window_length`.\n\n deriv (int): Optional. The order of the derivative to compute. This\n must be a nonnegative integer. The default is 0, which means to\n filter the data without differentiating.\n\n delta (float): Optional. The spacing of the samples to which the filter\n will be applied. This is only used if deriv > 0. Default is 1.0.\n\n mode (str): Optional. Must be 'mirror', 'constant', 'nearest', 'wrap'\n or 'interp'. This determines the type of extension to use for the\n padded signal to which the filter is applied. When `mode` is\n 'constant', the padding value is given by `cval`. See the Notes\n for more details on 'mirror', 'constant', 'wrap', and 'nearest'.\n\n When the 'interp' mode is selected (the default), no extension\n is used. Instead, a degree `polyorder` polynomial is fit to the\n last `window_length` values of the edges, and this polynomial is\n used to evaluate the last `window_length // 2` output values.\n\n cval (scalar): Optional. Value to fill past the edges of the input\n if `mode` is 'constant'. Default is 0.0.\n\n Examples:\n >>> savgol_filter = SavgolFilter()\n >>> data = [0, 1, 1, 2, 3, 4, 5, 7, 8, 7, 9, 9, 12, 11, 12, 14, 15, 17, 17, 17, 20]\n >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]\n [0.0429, 0.8286, 1.2571]\n\n We can control `window_length` and `polyorder` of the filter.\n\n >>> savgol_filter = SavgolFilter(window_length=13, polyorder=3)\n >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]\n [-0.0962, 0.6484, 1.4451]\n\n We can also control the `deriv` and `delta` parameters.\n\n >>> savgol_filter = SavgolFilter(deriv=1, delta=1.5)\n >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]\n [0.754, 0.3492, 0.2778]\n\n Finally, we can use `mode` to control how edge values are handled.\n\n >>> savgol_filter = SavgolFilter(mode='constant', cval=5)\n >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]\n [1.5429, 0.2286, 1.2571]\n \"\"\"\n\n name = \"savgol_filter\"\n input_types = [ColumnSchema(semantic_tags={\"numeric\"})]\n return_type = ColumnSchema(logical_type=Double, semantic_tags={\"numeric\"})\n\n def __init__(\n self,\n window_length=None,\n polyorder=None,\n deriv=0,\n delta=1.0,\n mode=\"interp\",\n cval=0.0,\n ):\n if window_length is not None and polyorder is not None:\n try:\n if mode not in [\"mirror\", \"constant\", \"nearest\", \"interp\", \"wrap\"]:\n raise ValueError(\n \"mode must be 'mirror', 'constant', \"\n \"'nearest', 'wrap' or 'interp'.\",\n )\n savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)\n except Exception:\n raise\n elif (window_length is None and polyorder is not None) or (\n window_length is not None and polyorder is None\n ):\n error_text = (\n \"Both window_length and polyorder must be defined if you define one.\"\n )\n raise ValueError(error_text)\n\n self.window_length = window_length\n self.polyorder = polyorder\n self.deriv = deriv\n self.delta = delta\n self.mode = mode\n self.cval = cval\n\n def METHOD_NAME(self):\n def smooth(x):\n if x.shape[0] < 20:\n return x\n if np.isnan(np.min(x)):\n # interpolate the nan values, works for edges & middle nans\n mask = np.isnan(x)\n x[mask] = np.interp(\n np.flatnonzero(mask),\n np.flatnonzero(~mask),\n x[~mask],\n )\n window_length = self.window_length\n polyorder = self.polyorder\n if window_length is None and polyorder is None:\n window_length = floor(len(x) / 10) * 2 + 1\n polyorder = 3\n return savgol_filter(\n x,\n window_length=window_length,\n polyorder=polyorder,\n deriv=self.deriv,\n delta=self.delta,\n mode=self.mode,\n cval=self.cval,\n )\n\n return smooth"}}},{"rowIdx":2079,"cells":{"id":{"kind":"number","value":2079,"string":"2,079"},"label":{"kind":"string","value":"get revision object"},"text":{"kind":"string","value":"from time import time\n\nfrom django.contrib.admin.utils import unquote\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, JsonResponse\nfrom django.http.request import QueryDict\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View\n\nfrom wagtail.admin.panels import get_edit_handler\nfrom wagtail.models import PreviewableMixin, RevisionMixin\nfrom wagtail.utils.decorators import xframe_options_sameorigin_override\n\n\nclass PreviewOnEdit(View):\n model = None\n form_class = None\n http_method_names = (\"post\", \"get\", \"delete\")\n preview_expiration_timeout = 60 * 60 * 24 # seconds\n session_key_prefix = \"wagtail-preview-\"\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def dispatch(self, request, *args, **kwargs):\n if not isinstance(self.object, PreviewableMixin):\n raise Http404\n return super().dispatch(request, *args, **kwargs)\n\n def remove_old_preview_data(self):\n expiration = time() - self.preview_expiration_timeout\n expired_keys = [\n k\n for k, v in self.request.session.items()\n if k.startswith(self.session_key_prefix) and v[1] < expiration\n ]\n # Removes the session key gracefully\n for k in expired_keys:\n self.request.session.pop(k)\n\n @property\n def session_key(self):\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n unique_key = f\"{app_label}-{model_name}-{self.object.pk}\"\n return f\"{self.session_key_prefix}{unique_key}\"\n\n def get_object(self):\n obj = get_object_or_404(self.model, pk=unquote(self.kwargs[\"pk\"]))\n if isinstance(obj, RevisionMixin):\n obj = obj.get_latest_revision_as_object()\n return obj\n\n def get_form_class(self):\n if self.form_class:\n return self.form_class\n return get_edit_handler(self.model).get_form_class()\n\n def get_form(self, query_dict):\n form_class = self.get_form_class()\n\n if not query_dict:\n # Query dict is empty, return null form\n return form_class(instance=self.object, for_user=self.request.user)\n\n return form_class(query_dict, instance=self.object, for_user=self.request.user)\n\n def _get_data_from_session(self):\n post_data, _ = self.request.session.get(self.session_key, (None, None))\n if not isinstance(post_data, str):\n post_data = \"\"\n return QueryDict(post_data)\n\n def post(self, request, *args, **kwargs):\n self.remove_old_preview_data()\n form = self.get_form(request.POST)\n is_valid = form.is_valid()\n\n if is_valid:\n # TODO: Handle request.FILES.\n request.session[self.session_key] = request.POST.urlencode(), time()\n is_available = True\n else:\n # Check previous data in session to determine preview availability\n form = self.get_form(self._get_data_from_session())\n is_available = form.is_valid()\n\n return JsonResponse({\"is_valid\": is_valid, \"is_available\": is_available})\n\n def error_response(self):\n return TemplateResponse(\n self.request,\n \"wagtailadmin/generic/preview_error.html\",\n {\"object\": self.object},\n )\n\n @method_decorator(xframe_options_sameorigin_override)\n def get(self, request, *args, **kwargs):\n form = self.get_form(self._get_data_from_session())\n\n if not form.is_valid():\n return self.error_response()\n\n form.save(commit=False)\n\n try:\n preview_mode = request.GET.get(\"mode\", self.object.default_preview_mode)\n except IndexError:\n raise PermissionDenied\n\n extra_attrs = {\n \"in_preview_panel\": request.GET.get(\"in_preview_panel\") == \"true\",\n \"is_editing\": True,\n }\n\n return self.object.make_preview_request(request, preview_mode, extra_attrs)\n\n def delete(self, request, *args, **kwargs):\n request.session.pop(self.session_key, None)\n return JsonResponse({\"success\": True})\n\n\nclass PreviewOnCreate(PreviewOnEdit):\n @property\n def session_key(self):\n app_label = self.model._meta.app_label\n model_name = self.model._meta.model_name\n return f\"{self.session_key_prefix}{app_label}-{model_name}\"\n\n def get_object(self):\n return self.model()\n\n\nclass PreviewRevision(View):\n model = None\n http_method_names = (\"get\",)\n\n def setup(self, request, pk, revision_id, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.pk = pk\n self.revision_id = revision_id\n self.object = self.get_object()\n self.revision_object = self.METHOD_NAME()\n\n def get_object(self):\n if not issubclass(self.model, RevisionMixin):\n raise Http404\n return get_object_or_404(self.model, pk=unquote(self.pk))\n\n def METHOD_NAME(self):\n revision = get_object_or_404(self.object.revisions, id=self.revision_id)\n return revision.as_object()\n\n def get(self, request, *args, **kwargs):\n try:\n preview_mode = request.GET.get(\n \"mode\", self.revision_object.default_preview_mode\n )\n except IndexError:\n raise PermissionDenied\n\n return self.revision_object.make_preview_request(request, preview_mode)"}}},{"rowIdx":2080,"cells":{"id":{"kind":"number","value":2080,"string":"2,080"},"label":{"kind":"string","value":"execution instance"},"text":{"kind":"string","value":"# Python\nimport pytest\nfrom unittest import mock\nfrom contextlib import contextmanager\n\nfrom awx.main.models import Credential, UnifiedJob, Instance\nfrom awx.main.tests.factories import (\n create_organization,\n create_job_template,\n create_instance,\n create_instance_group,\n create_notification_template,\n create_survey_spec,\n create_workflow_job_template,\n)\n\nfrom django.core.cache import cache\nfrom django.conf import settings\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--genschema\", action=\"store_true\", default=False, help=\"execute schema validator\")\n\n\ndef pytest_configure(config):\n import sys\n\n sys._called_from_test = True\n\n\ndef pytest_unconfigure(config):\n import sys\n\n del sys._called_from_test\n\n\n@pytest.fixture\ndef mock_access():\n @contextmanager\n def access_given_class(TowerClass):\n try:\n mock_instance = mock.MagicMock(__name__='foobar')\n MockAccess = mock.MagicMock(return_value=mock_instance)\n the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False)\n the_patch.__enter__()\n yield mock_instance\n finally:\n the_patch.__exit__()\n\n return access_given_class\n\n\n@pytest.fixture\ndef job_template_factory():\n return create_job_template\n\n\n@pytest.fixture\ndef organization_factory():\n return create_organization\n\n\n@pytest.fixture\ndef notification_template_factory():\n return create_notification_template\n\n\n@pytest.fixture\ndef survey_spec_factory():\n return create_survey_spec\n\n\n@pytest.fixture\ndef instance_factory():\n return create_instance\n\n\n@pytest.fixture\ndef instance_group_factory():\n return create_instance_group\n\n\n@pytest.fixture\ndef controlplane_instance_group(instance_factory, instance_group_factory):\n \"\"\"There always has to be a controlplane instancegroup and at least one instance in it\"\"\"\n return create_instance_group(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, create_instance('hybrid-1', node_type='hybrid', capacity=500))\n\n\n@pytest.fixture\ndef default_instance_group(instance_factory, instance_group_factory):\n return create_instance_group(\"default\", instances=[create_instance(\"hostA\", node_type='execution')])\n\n\n@pytest.fixture\ndef control_instance():\n '''Control instance in the controlplane automatic IG'''\n inst = create_instance('control-1', node_type='control', capacity=500)\n return inst\n\n\n@pytest.fixture\ndef control_instance_low_capacity():\n '''Control instance in the controlplane automatic IG that has low capacity'''\n inst = create_instance('control-1', node_type='control', capacity=5)\n return inst\n\n\n@pytest.fixture\ndef METHOD_NAME():\n '''Execution node in the automatic default IG'''\n ig = create_instance_group('default')\n inst = create_instance('receptor-1', node_type='execution', capacity=500)\n ig.instances.add(inst)\n return inst\n\n\n@pytest.fixture\ndef hybrid_instance():\n '''Hybrid node in the default controlplane IG'''\n inst = create_instance('hybrid-1', node_type='hybrid', capacity=500)\n return inst\n\n\n@pytest.fixture\ndef job_template_with_survey_passwords_factory(job_template_factory):\n def rf(persisted):\n \"Returns job with linked JT survey with password survey questions\"\n objects = job_template_factory(\n 'jt',\n organization='org1',\n survey=[\n {'variable': 'submitter_email', 'type': 'text', 'default': 'foobar@redhat.com'},\n {'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'},\n {'variable': 'SSN', 'type': 'password'},\n ],\n persisted=persisted,\n )\n return objects.job_template\n\n return rf\n\n\n@pytest.fixture\ndef job_with_secret_key_unit(job_with_secret_key_factory):\n return job_with_secret_key_factory(persisted=False)\n\n\n@pytest.fixture\ndef workflow_job_template_factory():\n return create_workflow_job_template\n\n\n@pytest.fixture\ndef job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):\n return job_template_with_survey_passwords_factory(persisted=False)\n\n\n@pytest.fixture\ndef mock_cache():\n class MockCache(object):\n cache = {}\n\n def get(self, key, default=None):\n return self.cache.get(key, default)\n\n def set(self, key, value, timeout=60):\n self.cache[key] = value\n\n def delete(self, key):\n del self.cache[key]\n\n return MockCache()\n\n\ndef pytest_runtest_teardown(item, nextitem):\n # clear Django cache at the end of every test ran\n # NOTE: this should not be memcache (as it is deprecated), nor should it be redis.\n # This is a local test cache, so we want every test to start with an empty cache\n cache.clear()\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef mock_external_credential_input_sources():\n # Credential objects query their related input sources on initialization.\n # We mock that behavior out of credentials by default unless we need to\n # test it explicitly.\n with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:\n yield _fixture\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef mock_has_unpartitioned_events():\n # has_unpartitioned_events determines if there are any events still\n # left in the old, unpartitioned job events table. In order to work,\n # this method looks up when the partition migration occurred. When\n # Django's unit tests run, however, there will be no record of the migration.\n # We mock this out to circumvent the migration query.\n with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:\n yield _fixture\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef mock_get_event_queryset_no_job_created():\n \"\"\"\n SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the\n job_created field. That field does not actually exist in a non-partition scenario.\n \"\"\"\n\n def event_qs(self):\n kwargs = {self.event_parent_key: self.id}\n return self.event_class.objects.filter(**kwargs)\n\n with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:\n yield _fixture\n\n\n@pytest.fixture\ndef mock_me():\n me_mock = mock.MagicMock(return_value=Instance(id=1, hostname=settings.CLUSTER_HOST_ID, uuid='00000000-0000-0000-0000-000000000000'))\n with mock.patch.object(Instance.objects, 'me', me_mock):\n yield"}}},{"rowIdx":2081,"cells":{"id":{"kind":"number","value":2081,"string":"2,081"},"label":{"kind":"string","value":"type"},"text":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by pulumi. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom ... import _utilities\nfrom . import outputs\n\n__all__ = [\n 'GetApiPortalResult',\n 'AwaitableGetApiPortalResult',\n 'get_api_portal',\n 'get_api_portal_output',\n]\n\n@pulumi.output_type\nclass GetApiPortalResult:\n \"\"\"\n API portal resource\n \"\"\"\n def __init__(__self__, id=None, name=None, properties=None, sku=None, system_data=None, METHOD_NAME=None):\n if id and not isinstance(id, str):\n raise TypeError(\"Expected argument 'id' to be a str\")\n pulumi.set(__self__, \"id\", id)\n if name and not isinstance(name, str):\n raise TypeError(\"Expected argument 'name' to be a str\")\n pulumi.set(__self__, \"name\", name)\n if properties and not isinstance(properties, dict):\n raise TypeError(\"Expected argument 'properties' to be a dict\")\n pulumi.set(__self__, \"properties\", properties)\n if sku and not isinstance(sku, dict):\n raise TypeError(\"Expected argument 'sku' to be a dict\")\n pulumi.set(__self__, \"sku\", sku)\n if system_data and not isinstance(system_data, dict):\n raise TypeError(\"Expected argument 'system_data' to be a dict\")\n pulumi.set(__self__, \"system_data\", system_data)\n if METHOD_NAME and not isinstance(METHOD_NAME, str):\n raise TypeError(\"Expected argument 'type' to be a str\")\n pulumi.set(__self__, \"type\", METHOD_NAME)\n\n @property\n @pulumi.getter\n def id(self) -> str:\n \"\"\"\n Fully qualified resource Id for the resource.\n \"\"\"\n return pulumi.get(self, \"id\")\n\n @property\n @pulumi.getter\n def name(self) -> str:\n \"\"\"\n The name of the resource.\n \"\"\"\n return pulumi.get(self, \"name\")\n\n @property\n @pulumi.getter\n def properties(self) -> 'outputs.ApiPortalPropertiesResponse':\n \"\"\"\n API portal properties payload\n \"\"\"\n return pulumi.get(self, \"properties\")\n\n @property\n @pulumi.getter\n def sku(self) -> Optional['outputs.SkuResponse']:\n \"\"\"\n Sku of the API portal resource\n \"\"\"\n return pulumi.get(self, \"sku\")\n\n @property\n @pulumi.getter(name=\"systemData\")\n def system_data(self) -> 'outputs.SystemDataResponse':\n \"\"\"\n Metadata pertaining to creation and last modification of the resource.\n \"\"\"\n return pulumi.get(self, \"system_data\")\n\n @property\n @pulumi.getter\n def METHOD_NAME(self) -> str:\n \"\"\"\n The type of the resource.\n \"\"\"\n return pulumi.get(self, \"type\")\n\n\nclass AwaitableGetApiPortalResult(GetApiPortalResult):\n # pylint: disable=using-constant-test\n def __await__(self):\n if False:\n yield self\n return GetApiPortalResult(\n id=self.id,\n name=self.name,\n properties=self.properties,\n sku=self.sku,\n system_data=self.system_data,\n METHOD_NAME=self.METHOD_NAME)\n\n\ndef get_api_portal(api_portal_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n service_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiPortalResult:\n \"\"\"\n Get the API portal and its properties.\n\n\n :param str api_portal_name: The name of API portal.\n :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.\n :param str service_name: The name of the Service resource.\n \"\"\"\n __args__ = dict()\n __args__['apiPortalName'] = api_portal_name\n __args__['resourceGroupName'] = resource_group_name\n __args__['serviceName'] = service_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230701preview:getApiPortal', __args__, opts=opts, typ=GetApiPortalResult).value\n\n return AwaitableGetApiPortalResult(\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n properties=pulumi.get(__ret__, 'properties'),\n sku=pulumi.get(__ret__, 'sku'),\n system_data=pulumi.get(__ret__, 'system_data'),\n METHOD_NAME=pulumi.get(__ret__, 'type'))\n\n\n@_utilities.lift_output_func(get_api_portal)\ndef get_api_portal_output(api_portal_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiPortalResult]:\n \"\"\"\n Get the API portal and its properties.\n\n\n :param str api_portal_name: The name of API portal.\n :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.\n :param str service_name: The name of the Service resource.\n \"\"\"\n ..."}}},{"rowIdx":2082,"cells":{"id":{"kind":"number","value":2082,"string":"2,082"},"label":{"kind":"string","value":"test invalid human handle"},"text":{"kind":"string","value":"# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS\nfrom __future__ import annotations\n\nimport zlib\nfrom unicodedata import normalize\n\nimport pytest\n\nfrom parsec._parsec import (\n DataError,\n DeviceID,\n DeviceName,\n EntryName,\n EntryNameError,\n HumanHandle,\n OrganizationID,\n SecretKey,\n UserID,\n)\nfrom parsec._parsec import (\n FileManifest as RemoteFileManifest,\n)\nfrom parsec._parsec import (\n FolderManifest as RemoteFolderManifest,\n)\nfrom parsec._parsec import (\n UserManifest as RemoteUserManifest,\n)\nfrom parsec._parsec import (\n WorkspaceManifest as RemoteWorkspaceManifest,\n)\nfrom parsec.serde import packb\nfrom tests.common import LocalDevice\n\n\n@pytest.mark.parametrize(\"cls\", (UserID, DeviceName, OrganizationID))\n@pytest.mark.parametrize(\n \"data\",\n (\n \"!x\", # Invalid character\n \" x\", # Invalid character\n \"x\" * 33, # Too long\n # Sinogram encoded on 3 bytes with utf8, so those 11 characters\n # form a 33 bytes long utf8 string !\n \"飞\" * 11,\n \"😀\", # Not a unicode word\n \"\",\n ),\n)\ndef test_max_bytes_size(cls, data):\n with pytest.raises(ValueError):\n cls(data)\n\n\n@pytest.mark.parametrize(\"cls\", (UserID, DeviceName, OrganizationID))\ndef test_normalization(cls):\n nfc_str = normalize(\"NFC\", \"àæßšūÿź\") # cspell: disable-line\n nfd_str = normalize(\"NFD\", nfc_str)\n\n assert nfc_str != nfd_str\n assert cls(nfd_str).str == nfc_str\n assert cls(nfc_str).str == nfc_str\n assert cls(nfc_str + nfd_str).str == nfc_str + nfc_str\n\n\n@pytest.mark.parametrize(\"cls\", (UserID, DeviceName, OrganizationID))\n@pytest.mark.parametrize(\n \"data\", (\"x\", \"x\" * 32, \"飞\" * 10 + \"xx\", \"X1-_é飞\") # 32 bytes long utf8 string # Mix-and-match\n)\ndef test_good_pattern(cls, data):\n cls(data)\n\n\n@pytest.mark.parametrize(\n \"data\",\n (\n \"!x@x\", # Invalid character\n \"x@ \", # Invalid character\n \"x\" * 66, # Too long\n # Sinogram encoded on 3 bytes with utf8, so those 22 characters\n # form a 66 bytes long utf8 string !\n \"飞\" * 22,\n \"😀@x\", # Not a unicode word\n \"x\", # Missing @ separator\n \"@x\",\n \"x@\",\n \"x\" * 62 + \"@x\", # Respect overall length but not UserID length\n \"x@\" + \"x\" * 62, # Respect overall length but not DeviceName length\n \"\",\n ),\n)\ndef test_max_bytes_size_device_id(data):\n with pytest.raises(ValueError):\n DeviceID(data)\n\n\n@pytest.mark.parametrize(\n \"data\",\n (\n \"x@x\",\n \"x\" * 32 + \"@\" + \"x\" * 32,\n \"飞\" * 10 + \"xx@xx\" + \"飞\" * 10, # 65 bytes long utf8 string\n \"X1-_é飞@X1-_é飞\", # Mix-and-match\n ),\n)\ndef test_good_pattern_device_id(data):\n DeviceID(data)\n\n\ndef test_human_handle_compare():\n a = HumanHandle(email=\"alice@example.com\", label=\"Alice\")\n a2 = HumanHandle(email=\"alice@example.com\", label=\"Whatever\")\n b = HumanHandle(email=\"bob@example.com\", label=\"Bob\")\n assert a == a2\n assert a != b\n assert b == b\n\n\n@pytest.mark.parametrize(\n \"email,label\",\n (\n (\"alice@example.com\", \"Alice\"),\n (\"a@x\", \"A\"), # Smallest size\n (f\"{'a' * 64}@{'x' * 185}.com\", \"x\" * 254), # Max sizes\n (f\"{'飞' * 21}@{'飞' * 62}.com\", f\"{'飞' * 84}xx\"), # Unicode & max size\n (\"john.doe@example.com\", \"J.D.\"),\n ),\n)\ndef test_valid_human_handle(email, label):\n HumanHandle(email, label)\n\n\n@pytest.mark.parametrize(\n \"email,label\",\n (\n (\"alice@example.com\", \"x\" * 255),\n (f\"{'@example.com':a>255}\", \"Alice\"),\n (\"alice@example.com\", \"飞\" * 85), # 255 bytes long utf8 label\n (f\"{'飞' * 21}@{'飞' * 63}.x\", \"Alice\"), # 255 bytes long utf8 email\n (\"alice@example.com\", \"\"), # Empty label\n (\"\", \"Alice\"), # Empty email\n (\"\", \"Alice \"), # Empty email and misleading label\n (\"Alice \", \"\"), # Empty label and misleading label\n (\"Alice <@example.com>\", \"Alice\"), # Missing local part in email\n ),\n)\ndef METHOD_NAME(email, label):\n with pytest.raises(ValueError):\n HumanHandle(email, label)\n\n\ndef test_human_handle_normalization():\n nfc_label = normalize(\"NFC\", \"àæßšūÿź\") # cspell: disable-line\n nfd_label = normalize(\"NFD\", nfc_label)\n nfc_email = normalize(\"NFC\", \"àæßš@ūÿ.ź\") # cspell: disable-line\n nfd_email = normalize(\"NFD\", nfc_email)\n assert nfc_label != nfd_label\n assert nfc_email != nfd_email\n\n hh = HumanHandle(nfd_email, nfd_label)\n assert hh.email == nfc_email\n assert hh.label == nfc_label\n\n hh = HumanHandle(nfc_email, nfc_label)\n assert hh.email == nfc_email\n assert hh.label == nfc_label\n\n\n@pytest.mark.parametrize(\n \"data\",\n (\n \"foo\",\n \"foo.txt\",\n \"x\" * 255, # Max size\n \"飞\" * 85, # Unicode & max size\n \"X1-_é飞\",\n \"🌍☄️==🦕🦖💀\", # Probably a bad name for a real folder...\n \".a\", # Dot and dot-dot are allowed if they are not alone\n \"..a\",\n \"a..\",\n \"a.\",\n ),\n)\ndef test_valid_entry_name(data):\n EntryName(data)\n\n\n@pytest.mark.parametrize(\"data\", (\"x\" * 256, \"飞\" * 85 + \"x\"))\ndef test_entry_name_too_long(data):\n with pytest.raises(EntryNameError):\n EntryName(data)\n\n\n@pytest.mark.parametrize(\n \"data\",\n (\n \".\", # Not allowed\n \"..\", # Not allowed\n \"/x\", # Slash not allowed\n \"x/x\",\n \"x/\",\n \"/\",\n \"\\x00x\", # Null-byte not allowed\n \"x\\x00x\",\n \"x\\x00\",\n \"\\x00\",\n ),\n)\ndef test_invalid_entry_name(data):\n with pytest.raises(ValueError):\n EntryName(data)\n\n\ndef test_entry_name_normalization():\n nfc_str = normalize(\n \"NFC\", \"àáâäæãåāçćčèéêëēėęîïíīįìłñńôöòóœøōõßśšûüùúūÿžźż\" # cspell: disable-line\n )\n nfd_str = normalize(\"NFD\", nfc_str)\n\n assert nfc_str != nfd_str\n assert EntryName(nfd_str).str == nfc_str\n assert EntryName(nfc_str).str == nfc_str\n assert EntryName(nfc_str + nfd_str).str == nfc_str + nfc_str\n\n\ndef test_remote_manifests_load_invalid_data(alice: LocalDevice):\n key = SecretKey.generate()\n valid_zip_msgpack_but_bad_fields = zlib.compress(packb({\"foo\": 42}))\n valid_zip_bud_bad_msgpack = zlib.compress(b\"dummy\")\n invalid_zip = b\"\\x42\" * 10\n\n for cls in (\n RemoteFileManifest,\n RemoteFolderManifest,\n RemoteWorkspaceManifest,\n RemoteUserManifest,\n ):\n print(f\"Testing class {cls.__name__}\")\n with pytest.raises(DataError):\n cls.decrypt_verify_and_load(\n b\"\",\n key=key,\n author_verify_key=alice.verify_key,\n expected_author=alice.device_id,\n expected_timestamp=alice.timestamp(),\n )\n\n with pytest.raises(DataError):\n cls.decrypt_verify_and_load(\n invalid_zip,\n key=key,\n author_verify_key=alice.verify_key,\n expected_author=alice.device_id,\n expected_timestamp=alice.timestamp(),\n )\n\n with pytest.raises(DataError):\n cls.decrypt_verify_and_load(\n valid_zip_bud_bad_msgpack,\n key=key,\n author_verify_key=alice.verify_key,\n expected_author=alice.device_id,\n expected_timestamp=alice.timestamp(),\n )\n\n # Valid to deserialize, invalid fields\n with pytest.raises(DataError):\n cls.decrypt_verify_and_load(\n valid_zip_msgpack_but_bad_fields,\n key=key,\n author_verify_key=alice.verify_key,\n expected_author=alice.device_id,\n expected_timestamp=alice.timestamp(),\n )"}}},{"rowIdx":2083,"cells":{"id":{"kind":"number","value":2083,"string":"2,083"},"label":{"kind":"string","value":"test out file"},"text":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2008, Google Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Unit test for the gtest_xml_output module.\"\"\"\n\nimport os\nfrom xml.dom import minidom, Node\nfrom googletest.test import gtest_test_utils\nfrom googletest.test import gtest_xml_test_utils\n\nGTEST_OUTPUT_SUBDIR = \"xml_outfiles\"\nGTEST_OUTPUT_1_TEST = \"gtest_xml_outfile1_test_\"\nGTEST_OUTPUT_2_TEST = \"gtest_xml_outfile2_test_\"\n\nEXPECTED_XML_1 = \"\"\"\n\n \n \n \n \n \n \n \n \n \n\n\"\"\"\n\nEXPECTED_XML_2 = \"\"\"\n\n \n \n \n \n \n \n \n \n \n\n\"\"\"\n\n\nclass GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):\n \"\"\"Unit test for Google Test's XML output functionality.\"\"\"\n\n def setUp(self):\n # We want the trailing '/' that the last \"\" provides in os.path.join, for\n # telling Google Test to create an output directory instead of a single file\n # for xml output.\n self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),\n GTEST_OUTPUT_SUBDIR, \"\")\n self.DeleteFilesAndDir()\n\n def tearDown(self):\n self.DeleteFilesAndDir()\n\n def DeleteFilesAndDir(self):\n try:\n os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + \".xml\"))\n except os.error:\n pass\n try:\n os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + \".xml\"))\n except os.error:\n pass\n try:\n os.rmdir(self.output_dir_)\n except os.error:\n pass\n\n def testOutfile1(self):\n self.METHOD_NAME(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)\n\n def testOutfile2(self):\n self.METHOD_NAME(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)\n\n def METHOD_NAME(self, test_name, expected_xml):\n gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)\n command = [gtest_prog_path, \"--gtest_output=xml:%s\" % self.output_dir_]\n p = gtest_test_utils.Subprocess(command,\n working_dir=gtest_test_utils.GetTempDir())\n self.assert_(p.exited)\n self.assertEquals(0, p.exit_code)\n\n output_file_name1 = test_name + \".xml\"\n output_file1 = os.path.join(self.output_dir_, output_file_name1)\n output_file_name2 = 'lt-' + output_file_name1\n output_file2 = os.path.join(self.output_dir_, output_file_name2)\n self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),\n output_file1)\n\n expected = minidom.parseString(expected_xml)\n if os.path.isfile(output_file1):\n actual = minidom.parse(output_file1)\n else:\n actual = minidom.parse(output_file2)\n self.NormalizeXml(actual.documentElement)\n self.AssertEquivalentNodes(expected.documentElement,\n actual.documentElement)\n expected.unlink()\n actual.unlink()\n\n\nif __name__ == \"__main__\":\n os.environ[\"GTEST_STACK_TRACE_DEPTH\"] = \"0\"\n gtest_test_utils.Main()"}}},{"rowIdx":2084,"cells":{"id":{"kind":"number","value":2084,"string":"2,084"},"label":{"kind":"string","value":"replace tests"},"text":{"kind":"string","value":"#!/usr/bin/env python3\n# NUnit test validator for csplugin tasks\nimport json\nimport os\nimport re\nimport sys\nfrom subprocess import call, DEVNULL\n\n\ndef replace_all(lines, s1, s2):\n for i in range(len(lines)):\n s = lines[i]\n lines[i] = re.sub(s1, s2, lines[i])\n\n\ndef replace_by(lines, instructions):\n replace = instructions.get(\"replace\", None)\n if not replace:\n return\n for cond in replace:\n s1 = cond.get(\"sub\", \"\")\n if not s1:\n continue\n s2 = cond.get(\"by\", \"\")\n replace_all(lines, s1, s2)\n\n\ndef find_test(lines, testname):\n if not testname:\n return -1, -1\n reg = re.compile(testname)\n i1 = i2 = -1\n n = 0\n for i in range(len(lines)):\n s = lines[i]\n res = reg.match(s)\n if res:\n i1 = i - 1\n else:\n if i1 >= 0 and s.find(\"{\") >= 0:\n n += 1\n if i1 >= 0 and s.find(\"}\") >= 0:\n i2 = i\n n -= 1\n if n <= 0:\n break\n\n return i1, i2\n\n\ndef METHOD_NAME(lines, test):\n n = 1\n for t in test:\n i1, i2 = find_test(lines, t.get(\"replaceline\", None))\n if i1 < 0 or i2 < 0:\n continue\n tlines = lines[i1 : i2 + 1]\n del lines[i1 : i2 + 1]\n replacecall = t.get(\"replacecall\", \"\")\n byline = t.get(\"byline\", \"\")\n\n for tst in t.get(\"bycalls\", []):\n tmethod = list(tlines)\n tc = tst.get(\"call\", \"\")\n tr = tst.get(\"result\", \"\")\n if byline:\n tst[\"name\"] = tc + \"xxxx\" + str(n)\n tmethod[1] = byline + tst[\"name\"] + \"()\\n\"\n n += 1\n # replace_all(tmethod, replacecall, tr)\n replace_all(tmethod, replacecall, tc)\n lines[i1:i1] = tmethod\n\n\ndef count_points(lines, test):\n p = 0\n for t in test:\n for tst in t.get(\"bycalls\", []):\n name = tst.get(\"call\", \"XXXX\")\n result = tst.get(\"result\", \"XXXX\")\n # NUnit 3 uses \"Passed\" instead of \"Success\" and \"Failed\" instead of \"Failure\"\n if result == \"Success\":\n result = \"Passed\"\n elif result == \"Failure\":\n result = \"Failed\"\n expl = tst.get(\"expl\", \"???\")\n pts = tst.get(\"pts\", 1)\n line = [s for s in lines if s.find(name + \"xxxx\") >= 0]\n if line:\n line = line[0]\n rst = 'result=\"'\n i = line.find(rst)\n if i >= 0:\n line = line[i + len(rst) :]\n i = line.find('\"')\n if i >= 0:\n xmlres = line[:i]\n if xmlres == result:\n p += pts\n else:\n pts = tst.get(\"wrong\", 0)\n p += pts\n print(\n expl\n + \": pitäisi tulla \"\n + result\n + \", tuli: \"\n + xmlres\n + \". Pisteitä:\",\n pts,\n )\n return p\n\n\ndef scale_points(pts, points):\n if not points:\n return pts\n p = 0\n for pt in points:\n if pts < pt.get(\"from\", 0):\n return p\n p = pt.get(\"p\", pts)\n return p\n\n\nGLOBAL_NUGET_PACKAGES_PATH = \"/cs_data/dotnet/nuget_cache\"\n\n\ndef get_build_refs(ref_type):\n with open(f\"/cs_data/dotnet/configs/{ref_type}.build.deps\", encoding=\"utf-8\") as f:\n dep_paths = [\n os.path.join(GLOBAL_NUGET_PACKAGES_PATH, dep_line.strip())\n for dep_line in f.readlines()\n ]\n return [f\"-r:{p}\" for p in dep_paths]\n\n\ndef main():\n filename = sys.argv[1]\n filename2 = sys.argv[2]\n filename3 = \"T\" + filename\n lines = open(filename).readlines()\n lines2 = open(filename2).read()\n # yaml\n # instructions = yaml.load(lines2, CLoader)\n # insert = instructions.get(\"insert\", None)\n\n # json\n instructions = json.loads(lines2)\n insertfile = instructions.get(\"insert\", None)\n insert = \"\"\n if insertfile:\n insert = open(insertfile).read()\n\n replace_by(lines, instructions)\n\n METHOD_NAME(lines, instructions.get(\"test\", None))\n\n # print(\"\".join(lines))\n # print(insert)\n\n f = open(filename3, \"w\")\n f.writelines(lines)\n if insert:\n f.write(insert)\n f.close()\n args1 = [\n \"/cs/dotnet/csc\",\n \"-nologo\",\n f\"-out:{filename3}.dll\",\n \"-target:library\",\n *get_build_refs(\"nunit_test\"),\n *get_build_refs(\"jypeli\"),\n filename3,\n ]\n\n sourceFiles = instructions.get(\"sourceFiles\", [])\n for sourceFile in sourceFiles:\n args1.append(sourceFile)\n\n ret = call(args1)\n\n # print(ret)\n # print(args1)\n if ret != 0:\n print(\"Testikoodi ei käänny\")\n return\n\n args = [\"/cs/dotnet/nunit-test-dll\", f\"{filename3}.dll\"]\n ret = call(args, stdout=DEVNULL, stderr=DEVNULL, timeout=20)\n\n # https://docs.nunit.org/articles/nunit/running-tests/Console-Runner.html\n # print(args)\n if ret < 0:\n print(\"Testikoodia ei voi ajaa\")\n\n xml = open(\"TestResult.xml\").readlines()\n # print(\"\\n\".join(xml))\n\n points = count_points(xml, instructions.get(\"test\", None))\n points = scale_points(points, instructions.get(\"points\", None))\n print(\"Points: \" + f\"{points:.2f}\")\n\n\nif __name__ == \"__main__\":\n main()"}}},{"rowIdx":2085,"cells":{"id":{"kind":"number","value":2085,"string":"2,085"},"label":{"kind":"string","value":"locals example defined before"},"text":{"kind":"string","value":"# pylint: disable=missing-docstring, invalid-name, too-few-public-methods, import-outside-toplevel, fixme, line-too-long, broad-exception-raised\n\ndef test_regression_737():\n import xml # [unused-import]\n\ndef test_regression_923():\n import unittest.case # [unused-import]\n import xml as sql # [unused-import]\n\ndef test_unused_with_prepended_underscore():\n _foo = 42\n _ = 24\n __a = 24\n dummy = 24\n _a_ = 42 # [unused-variable]\n __a__ = 24 # [unused-variable]\n __never_used = 42\n\ndef test_local_field_prefixed_with_unused_or_ignored():\n flagged_local_field = 42 # [unused-variable]\n unused_local_field = 42\n ignored_local_field = 42\n\n\nclass HasUnusedDunderClass:\n\n def test(self):\n __class__ = 42 # [unused-variable]\n\n def best(self):\n self.test()\n\n\ndef METHOD_NAME():\n value = 42 # [possibly-unused-variable]\n return locals()\n\n\ndef locals_example_defined_after():\n local_variables = locals()\n value = 42 # [unused-variable]\n return local_variables\n\n\ndef locals_does_not_account_for_subscopes():\n value = 42 # [unused-variable]\n\n def some_other_scope():\n return locals()\n return some_other_scope\n\n\ndef unused_import_from():\n from functools import wraps as abc # [unused-import]\n from collections import namedtuple # [unused-import]\n\n\ndef unused_import_in_function(value):\n from string import digits, hexdigits # [unused-import]\n return value if value in digits else \"Nope\"\n\n\ndef hello(arg):\n my_var = 'something' # [unused-variable]\n if arg:\n return True\n raise Exception\n\n# pylint: disable=wrong-import-position\nPATH = OS = collections = deque = None\n\n\ndef function(matches):\n \"\"\"\"yo\"\"\"\n aaaa = 1 # [unused-variable]\n index = -1\n for match in matches:\n index += 1\n print(match)\n\nfrom astroid import nodes\ndef visit_if(self, node: nodes.If) -> None:\n \"\"\"increments the branches counter\"\"\"\n branches = 1\n # don't double count If nodes coming from some 'elif'\n if node.orelse and len(node.orelse) > 1:\n branches += 1\n self.inc_branch(branches)\n self.stmts += branches\n\n\ndef test_global():\n \"\"\" Test various assignments of global\n variables through imports.\n \"\"\"\n # pylint: disable=redefined-outer-name\n global PATH, OS, collections, deque # [global-statement]\n from os import path as PATH\n import os as OS\n import collections\n from collections import deque\n # make sure that these triggers unused-variable\n from sys import platform # [unused-import]\n from sys import version as VERSION # [unused-import]\n import this # [unused-import]\n import re as RE # [unused-import]\n\n# test cases that include exceptions\ndef function2():\n unused = 1 # [unused-variable]\n try:\n 1 / 0\n except ZeroDivisionError as error:\n try:\n 1 / 0\n except ZeroDivisionError as error: # [redefined-outer-name]\n raise Exception(\"\") from error\n\ndef func():\n try:\n 1 / 0\n except ZeroDivisionError as error:\n try:\n 1 / 0\n except error:\n print(\"error\")\n\ndef func2():\n try:\n 1 / 0\n except ZeroDivisionError as error:\n try:\n 1 / 0\n except:\n raise Exception(\"\") from error\n\ndef func3():\n try:\n 1 / 0\n except ZeroDivisionError as error:\n print(f\"{error}\")\n try:\n 1 / 2\n except TypeError as error: # [unused-variable, redefined-outer-name]\n print(\"warning\")\n\ndef func4():\n try:\n 1 / 0\n except ZeroDivisionError as error: # [unused-variable]\n try:\n 1 / 0\n except ZeroDivisionError as error: # [redefined-outer-name]\n print(\"error\")\n\n\ndef main(lst):\n \"\"\"https://github.com/pylint-dev/astroid/pull/1111#issuecomment-890367609\"\"\"\n try:\n raise ValueError\n except ValueError as e: # [unused-variable]\n pass\n\n for e in lst:\n pass\n\n # e will be undefined if lst is empty\n print(e) # [undefined-loop-variable]\n\nmain([])\n\n\ndef func5():\n \"\"\"No unused-variable for a container if iterated in comprehension\"\"\"\n x = []\n # Test case requires homonym between \"for x\" and \"in x\"\n assert [True for x in x]\n\n\ndef sibling_except_handlers():\n try:\n pass\n except ValueError as e:\n print(e)\n try:\n pass\n except ValueError as e:\n print(e)\n\ndef func6():\n a = 1\n\n def nonlocal_writer():\n nonlocal a\n\n for a in range(10):\n pass\n\n nonlocal_writer()\n\n assert a == 9, a\n\ndef test_regression_8595():\n # pylint: disable=broad-exception-caught\n import logging\n def compute():\n pass\n try:\n compute()\n error = False\n except Exception as e:\n logging.error(e)\n error = True\n if error:\n try:\n compute()\n except Exception as e: # [unused-variable]\n pass"}}},{"rowIdx":2086,"cells":{"id":{"kind":"number","value":2086,"string":"2,086"},"label":{"kind":"string","value":"can see ban details"},"text":{"kind":"string","value":"from django.apps import AppConfig\nfrom django.utils.translation import pgettext_lazy\n\nfrom .pages import user_profile, usercp, users_list\n\n\nclass MisagoUsersConfig(AppConfig):\n name = \"misago.users\"\n label = \"misago_users\"\n verbose_name = \"Misago Auth\"\n\n def ready(self):\n from . import signals as _\n from .admin import tasks # pylint: disable=unused-import\n\n self.register_default_usercp_pages()\n self.register_default_users_list_pages()\n self.register_default_user_profile_pages()\n\n def register_default_usercp_pages(self):\n def auth_is_not_delegated(request):\n return not request.settings.enable_oauth2_client\n\n usercp.add_section(\n link=\"misago:usercp-change-forum-options\",\n name=pgettext_lazy(\"user options page\", \"Forum options\"),\n component=\"forum-options\",\n icon=\"settings\",\n )\n usercp.add_section(\n link=\"misago:usercp-edit-details\",\n name=pgettext_lazy(\"user options page\", \"Edit details\"),\n component=\"edit-details\",\n icon=\"person_outline\",\n )\n usercp.add_section(\n link=\"misago:usercp-change-username\",\n name=pgettext_lazy(\"user options page\", \"Change username\"),\n component=\"change-username\",\n icon=\"card_membership\",\n visible_if=auth_is_not_delegated,\n )\n usercp.add_section(\n link=\"misago:usercp-change-email-password\",\n name=pgettext_lazy(\"user options page\", \"Change email or password\"),\n component=\"sign-in-credentials\",\n icon=\"vpn_key\",\n visible_if=auth_is_not_delegated,\n )\n\n def can_download_own_data(request):\n return request.settings.allow_data_downloads\n\n usercp.add_section(\n link=\"misago:usercp-download-data\",\n name=pgettext_lazy(\"user options page\", \"Download data\"),\n component=\"download-data\",\n icon=\"save_alt\",\n visible_if=can_download_own_data,\n )\n\n def can_delete_own_account(request):\n if not auth_is_not_delegated(request):\n return False\n\n return request.settings.allow_delete_own_account\n\n usercp.add_section(\n link=\"misago:usercp-delete-account\",\n name=pgettext_lazy(\"user options page\", \"Delete account\"),\n component=\"delete-account\",\n icon=\"cancel\",\n visible_if=can_delete_own_account,\n )\n\n def register_default_users_list_pages(self):\n users_list.add_section(\n link=\"misago:users-active-posters\",\n component=\"active-posters\",\n name=pgettext_lazy(\"users lists page\", \"Top posters\"),\n )\n\n def register_default_user_profile_pages(self):\n def can_see_names_history(request, profile):\n if request.user.is_authenticated:\n is_account_owner = profile.pk == request.user.pk\n has_permission = request.user_acl[\"can_see_users_name_history\"]\n return is_account_owner or has_permission\n return False\n\n def METHOD_NAME(request, profile):\n if request.user.is_authenticated:\n if request.user_acl[\"can_see_ban_details\"]:\n from .bans import get_user_ban\n\n return bool(get_user_ban(profile, request.cache_versions))\n return False\n return False\n\n user_profile.add_section(\n link=\"misago:user-posts\",\n name=pgettext_lazy(\"user profile page\", \"Posts\"),\n icon=\"message\",\n component=\"posts\",\n )\n user_profile.add_section(\n link=\"misago:user-threads\",\n name=pgettext_lazy(\"user profile page\", \"Threads\"),\n icon=\"forum\",\n component=\"threads\",\n )\n user_profile.add_section(\n link=\"misago:user-followers\",\n name=pgettext_lazy(\"user profile page\", \"Followers\"),\n icon=\"favorite\",\n component=\"followers\",\n )\n user_profile.add_section(\n link=\"misago:user-follows\",\n name=pgettext_lazy(\"user profile page\", \"Follows\"),\n icon=\"favorite_border\",\n component=\"follows\",\n )\n user_profile.add_section(\n link=\"misago:user-details\",\n name=pgettext_lazy(\"user profile page\", \"Details\"),\n icon=\"person_outline\",\n component=\"details\",\n )\n user_profile.add_section(\n link=\"misago:username-history\",\n name=pgettext_lazy(\"user profile page\", \"Username history\"),\n icon=\"card_membership\",\n component=\"username-history\",\n visible_if=can_see_names_history,\n )\n user_profile.add_section(\n link=\"misago:user-ban\",\n name=pgettext_lazy(\"user profile page\", \"Ban details\"),\n icon=\"remove_circle_outline\",\n component=\"ban-details\",\n visible_if=METHOD_NAME,\n )"}}},{"rowIdx":2087,"cells":{"id":{"kind":"number","value":2087,"string":"2,087"},"label":{"kind":"string","value":"add xml attr"},"text":{"kind":"string","value":"## Original version of code heavily based on recipe written by Wai Yip\n## Tung, released under PSF license.\n## http://code.activestate.com/recipes/534109/\n\nimport re\nimport os\nimport xml.sax.handler\n\nclass DataNode (object):\n\n def __init__ (self, **kwargs):\n self._attrs = {} # XML attributes and child elements\n self._data = None # child text data\n self._ncDict = kwargs.get ('nameChangeDict', {})\n\n def __len__ (self):\n # treat single element as a list of 1\n return 1\n\n def __getitem__ (self, key):\n if isinstance (key, str):\n return self._attrs.get(key,None)\n else:\n return [self][key]\n\n def __contains__ (self, name):\n return name in self._attrs\n\n def __nonzero__ (self):\n return bool (self._attrs or self._data)\n\n def __getattr__ (self, name):\n if name.startswith('__'):\n # need to do this for Python special methods???\n raise AttributeError (name)\n return self._attrs.get (name, None)\n\n def METHOD_NAME (self, name, value):\n change = self._ncDict.get (name)\n if change:\n name = change\n if name in self._attrs:\n # multiple attribute of the same name are represented by a list\n children = self._attrs[name]\n if not isinstance(children, list):\n children = [children]\n self._attrs[name] = children\n children.append(value)\n else:\n self._attrs[name] = value\n\n def __str__ (self):\n return self._data or ''\n\n def __repr__ (self):\n items = sorted (self._attrs.items())\n if self._data:\n items.append(('data', self._data))\n return u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n def attributes (self):\n return self._attrs\n\n\nclass TreeBuilder (xml.sax.handler.ContentHandler):\n\n non_id_char = re.compile('[^_0-9a-zA-Z]')\n\n def __init__ (self, **kwargs):\n self._stack = []\n self._text_parts = []\n self._ncDict = kwargs.get ('nameChangeDict', {})\n self._root = DataNode (nameChangeDict = self._ncDict)\n self.current = self._root\n\n def startElement (self, name, attrs):\n self._stack.append( (self.current, self._text_parts))\n self.current = DataNode (nameChangeDict = self._ncDict)\n self._text_parts = []\n # xml attributes --> python attributes\n for k, v in attrs.items():\n self.current.METHOD_NAME (TreeBuilder._name_mangle(k), v)\n\n def endElement (self, name):\n text = ''.join (self._text_parts).strip()\n if text:\n self.current._data = text\n if self.current.attributes():\n obj = self.current\n else:\n # a text only node is simply represented by the string\n obj = text or ''\n self.current, self._text_parts = self._stack.pop()\n self.current.METHOD_NAME (TreeBuilder._name_mangle(name), obj)\n\n def characters (self, content):\n self._text_parts.append(content)\n\n def root (self):\n return self._root\n\n def topLevel (self):\n '''Returns top level object'''\n return self._root.attributes().values()[0]\n \n\n @staticmethod\n def _name_mangle (name):\n return TreeBuilder.non_id_char.sub('_', name)\n\n\nregexList = [ (re.compile (r'&'), '&amp;' ),\n (re.compile (r'<'), '&lt;' ),\n (re.compile (r'>'), '&gt;' ),\n (re.compile (r'\"'), '&quote;' ),\n (re.compile (r\"'\"), '&#39;' )\n ]\n\nquoteRE = re.compile (r'(\\w\\s*=\\s*\")([^\"]+)\"')\n\ndef fixQuoteValue (match):\n '''Changes all characters inside of the match'''\n quote = match.group(2)\n for regexTup in regexList:\n quote = regexTup[0].sub( regexTup[1], quote )\n return match.group(1) + quote + '\"'\n\n\ndef xml2obj (**kwargs):\n ''' Converts XML data into native Python object. Takes either\n file handle or string as input. Does NOT fix illegal characters.\n\n input source: Exactly one of the three following is needed\n filehandle - input from file handle\n contents - input from string\n filename - input from filename\n\n options:\n filtering - boolean value telling code whether or not to fileter\n input selection to remove illegal XML characters\n nameChangeDict - dictionaries of names to change in python object'''\n\n # make sure we have exactly 1 input source\n filehandle = kwargs.get ('filehandle')\n contents = kwargs.get ('contents')\n filename = kwargs.get ('filename')\n if not filehandle and not contents and not filename:\n raise RuntimeError(\"You must provide 'filehandle', 'contents', or 'filename'\")\n if filehandle and contents or \\\n filehandle and filename or \\\n contents and filename:\n raise RuntimeError(\"You must provide only ONE of 'filehandle', 'contents', or 'filename'\")\n\n # are we filtering?\n filtering = kwargs.get ('filtering')\n if filtering:\n # if we are filtering, we need to read in the contents to modify them\n if not contents:\n if not filehandle:\n try:\n filehandle = open (filename, 'r')\n except:\n raise RuntimeError(\"Failed to open '%s'\" % filename)\n contents = ''\n for line in filehandle:\n contents += line\n filehandle.close()\n filehandle = filename = ''\n contents = quoteRE.sub (fixQuoteValue, contents)\n \n ncDict = kwargs.get ('nameChangeDict', {})\n builder = TreeBuilder (nameChangeDict = ncDict)\n if contents:\n xml.sax.parseString(contents, builder)\n else:\n if not filehandle:\n try:\n filehandle = open (filename, 'r')\n except:\n raise RuntimeError(\"Failed to open '%s'\" % filename)\n xml.sax.parse(filehandle, builder)\n return builder.topLevel()"}}},{"rowIdx":2088,"cells":{"id":{"kind":"number","value":2088,"string":"2,088"},"label":{"kind":"string","value":"retry"},"text":{"kind":"string","value":"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"LICENSE.txt\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport re\n\n# A nosec comment is appended to the following line in order to disable the B404 check.\n# In this file the input of the module subprocess is trusted.\nimport subprocess as sub # nosec B404\nimport time\nimport webbrowser\nfrom typing import List\n\nfrom argparse import ArgumentParser, Namespace\n\nfrom pcluster.cli.commands.common import CliCommand\nfrom pcluster.constants import PCLUSTER_ISSUES_LINK\nfrom pcluster.models.cluster import Cluster\nfrom pcluster.utils import error\n\nDCV_CONNECT_SCRIPT = \"/opt/parallelcluster/scripts/pcluster_dcv_connect.sh\"\nLOGGER = logging.getLogger(__name__)\n\n\nclass DCVConnectionError(Exception):\n \"\"\"Error raised with DCV connection fails.\"\"\"\n\n pass\n\n\ndef _check_command_output(cmd):\n # A nosec comment is appended to the following line in order to disable the B602 check.\n # This is done because it's needed to enable the desired functionality. The only caller\n # of this function is _retrieve_dcv_session_url, which passes a command that is safe.\n return sub.check_output(cmd, shell=True, universal_newlines=True, stderr=sub.STDOUT).strip() # nosec B602 nosemgrep\n\n\ndef _dcv_connect(args):\n \"\"\"\n Execute pcluster dcv connect command.\n\n :param args: pcluster cli arguments.\n \"\"\"\n try:\n head_node = Cluster(args.cluster_name).head_node_instance\n except Exception as e:\n error(f\"Unable to connect to the cluster.\\n{e}\")\n else:\n head_node_ip = head_node.public_ip or head_node.private_ip\n # Prepare ssh command to execute in the head node instance\n cmd = 'ssh {CFN_USER}@{HEAD_NODE_IP} {KEY} \"{REMOTE_COMMAND} /home/{CFN_USER}\"'.format(\n CFN_USER=head_node.default_user,\n HEAD_NODE_IP=head_node_ip,\n KEY=\"-i {0}\".format(args.key_path) if args.key_path else \"\",\n REMOTE_COMMAND=DCV_CONNECT_SCRIPT,\n )\n\n try:\n url = METHOD_NAME(_retrieve_dcv_session_url, func_args=[cmd, args.cluster_name, head_node_ip], attempts=4)\n url_message = f\"Please use the following one-time URL in your browser within 30 seconds:\\n{url}\"\n\n if args.show_url:\n print(url_message)\n return\n\n try:\n if not webbrowser.open_new(url):\n raise webbrowser.Error(\"Unable to open the Web browser.\")\n except webbrowser.Error as e:\n print(f\"{e}\\n{url_message}\")\n\n except DCVConnectionError as e:\n error(\n \"Something went wrong during DCV connection.\\n{0}\"\n \"Please check the logs in the /var/log/parallelcluster/ folder \"\n \"of the head node and submit an issue {1}\\n\".format(e, PCLUSTER_ISSUES_LINK)\n )\n\n\ndef _retrieve_dcv_session_url(ssh_cmd, cluster_name, head_node_ip):\n \"\"\"Connect by ssh to the head node instance, prepare DCV session and return the DCV session URL.\"\"\"\n try:\n LOGGER.debug(\"SSH command: %s\", ssh_cmd)\n output = _check_command_output(ssh_cmd)\n # At first ssh connection, the ssh command alerts it is adding the host to the known hosts list\n if re.search(\"Permanently added .* to the list of known hosts.\", output):\n output = _check_command_output(ssh_cmd)\n\n dcv_parameters = re.search(\n r\"PclusterDcvServerPort=([\\d]+) PclusterDcvSessionId=([\\w]+) PclusterDcvSessionToken=([\\w-]+)\", output\n )\n if dcv_parameters:\n dcv_server_port = dcv_parameters.group(1)\n dcv_session_id = dcv_parameters.group(2)\n dcv_session_token = dcv_parameters.group(3)\n else:\n error(\n \"Something went wrong during DCV connection. Please manually execute the command:\\n{0}\\n\"\n \"If the problem persists, please check the logs in the /var/log/parallelcluster/ folder \"\n \"of the head node and submit an issue {1}\".format(ssh_cmd, PCLUSTER_ISSUES_LINK)\n )\n\n except sub.CalledProcessError as e:\n if \"{0}: No such file or directory\".format(DCV_CONNECT_SCRIPT) in e.output:\n error(\n \"The cluster {0} has been created with an old version of ParallelCluster \"\n \"without the DCV support.\".format(cluster_name)\n )\n else:\n raise DCVConnectionError(e.output)\n\n return \"https://{IP}:{PORT}?authToken={TOKEN}#{SESSION_ID}\".format(\n IP=head_node_ip, PORT=dcv_server_port, TOKEN=dcv_session_token, SESSION_ID=dcv_session_id\n )\n\n\ndef METHOD_NAME(func, func_args, attempts=1, wait=0): # pylint: disable=R1710\n \"\"\"\n Call function and re-execute it if it raises an Exception.\n\n :param func: the function to execute.\n :param func_args: the positional arguments of the function.\n :param attempts: the maximum number of attempts. Default: 1.\n :param wait: delay between attempts. Default: 0.\n :returns: the result of the function.\n \"\"\"\n while attempts:\n try:\n return func(*func_args)\n except Exception as e:\n attempts -= 1\n if not attempts:\n raise e\n\n LOGGER.debug(\"%s, retrying in %s seconds..\", e, wait)\n time.sleep(wait)\n return None\n\n\nclass DcvConnectCommand(CliCommand):\n \"\"\"Implement pcluster dcv connect command.\"\"\"\n\n # CLI\n name = \"dcv-connect\"\n help = \"Permits to connect to the head node through an interactive session by using NICE DCV.\"\n description = help\n\n def __init__(self, subparsers):\n super().__init__(subparsers, name=self.name, help=self.help, description=self.description)\n\n def register_command_args(self, parser: ArgumentParser) -> None: # noqa: D102\n parser.add_argument(\"-n\", \"--cluster-name\", help=\"Name of the cluster to connect to\", required=True)\n parser.add_argument(\"--key-path\", dest=\"key_path\", help=\"Key path of the SSH key to use for the connection\")\n parser.add_argument(\"--show-url\", action=\"store_true\", default=False, help=\"Print URL and exit\")\n\n def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument\n _dcv_connect(args)"}}},{"rowIdx":2089,"cells":{"id":{"kind":"number","value":2089,"string":"2,089"},"label":{"kind":"string","value":"display participation result"},"text":{"kind":"string","value":"from datetime import timedelta\n\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Min, OuterRef, Subquery\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _, gettext_lazy\n\nfrom judge.contest_format.default import DefaultContestFormat\nfrom judge.contest_format.registry import register_contest_format\nfrom judge.utils.timedelta import nice_repr\n\n\n@register_contest_format('ioi')\nclass LegacyIOIContestFormat(DefaultContestFormat):\n name = gettext_lazy('IOI (pre-2016)')\n config_defaults = {'cumtime': False}\n \"\"\"\n cumtime: Specify True if time penalties are to be computed. Defaults to False.\n \"\"\"\n\n @classmethod\n def validate(cls, config):\n if config is None:\n return\n\n if not isinstance(config, dict):\n raise ValidationError('IOI-styled contest expects no config or dict as config')\n\n for key, value in config.items():\n if key not in cls.config_defaults:\n raise ValidationError('unknown config key \"%s\"' % key)\n if not isinstance(value, type(cls.config_defaults[key])):\n raise ValidationError('invalid type for config key \"%s\"' % key)\n\n def __init__(self, contest, config):\n self.config = self.config_defaults.copy()\n self.config.update(config or {})\n self.contest = contest\n\n def update_participation(self, participation):\n cumtime = 0\n score = 0\n format_data = {}\n\n queryset = (participation.submissions.values('problem_id')\n .filter(points=Subquery(\n participation.submissions.filter(problem_id=OuterRef('problem_id'))\n .order_by('-points').values('points')[:1]))\n .annotate(time=Min('submission__date'))\n .values_list('problem_id', 'time', 'points'))\n\n for problem_id, time, points in queryset:\n if self.config['cumtime']:\n dt = (time - participation.start).total_seconds()\n if points:\n cumtime += dt\n else:\n dt = 0\n\n format_data[str(problem_id)] = {'points': points, 'time': dt}\n score += points\n\n participation.cumtime = max(cumtime, 0)\n participation.score = round(score, self.contest.points_precision)\n participation.tiebreaker = 0\n participation.format_data = format_data\n participation.save()\n\n def display_user_problem(self, participation, contest_problem):\n format_data = (participation.format_data or {}).get(str(contest_problem.id))\n if format_data:\n return format_html(\n '{points}
{time}
',\n state=(('pretest-' if self.contest.run_pretests_only and contest_problem.is_pretested else '') +\n self.best_solution_state(format_data['points'], contest_problem.points)),\n url=reverse('contest_user_submissions',\n args=[self.contest.key, participation.user.user.username, contest_problem.problem.code]),\n points=floatformat(format_data['points']),\n time=nice_repr(timedelta(seconds=format_data['time']), 'noday') if self.config['cumtime'] else '',\n )\n else:\n return mark_safe('')\n\n def METHOD_NAME(self, participation):\n return format_html(\n '{points}
{cumtime}
',\n url=reverse('contest_all_user_submissions',\n args=[self.contest.key, participation.user.user.username]),\n points=floatformat(participation.score, -self.contest.points_precision),\n cumtime=nice_repr(timedelta(seconds=participation.cumtime), 'noday') if self.config['cumtime'] else '',\n )\n\n def get_short_form_display(self):\n yield _('The maximum score submission for each problem will be used.')\n\n if self.config['cumtime']:\n yield _('Ties will be broken by the sum of the last score altering submission time on problems with a '\n 'non-zero score.')\n else:\n yield _('Ties by score will **not** be broken.')"}}},{"rowIdx":2090,"cells":{"id":{"kind":"number","value":2090,"string":"2,090"},"label":{"kind":"string","value":"d set p"},"text":{"kind":"string","value":"\"\"\"DistributedNode module: contains the DistributedNode class\"\"\"\n\nfrom panda3d.core import NodePath\nfrom . import GridParent\nfrom . import DistributedObject\n\n\nclass DistributedNode(DistributedObject.DistributedObject, NodePath):\n \"\"\"Distributed Node class:\"\"\"\n\n def __init__(self, cr):\n if not hasattr(self, 'DistributedNode_initialized'):\n self.DistributedNode_initialized = 1\n self.gotStringParentToken = 0\n DistributedObject.DistributedObject.__init__(self, cr)\n if not self.this:\n NodePath.__init__(self, \"DistributedNode\")\n\n # initialize gridParent\n self.gridParent = None\n\n def disable(self):\n if self.activeState != DistributedObject.ESDisabled:\n if not self.isEmpty():\n self.reparentTo(hidden)\n DistributedObject.DistributedObject.disable(self)\n\n def delete(self):\n if not hasattr(self, 'DistributedNode_deleted'):\n self.DistributedNode_deleted = 1\n if not self.isEmpty():\n self.removeNode()\n if self.gridParent:\n self.gridParent.delete()\n DistributedObject.DistributedObject.delete(self)\n\n def generate(self):\n DistributedObject.DistributedObject.generate(self)\n self.gotStringParentToken = 0\n\n def setLocation(self, parentId, zoneId, teleport=0):\n # Redefine DistributedObject setLocation, so that when\n # location is set to the ocean grid, we can update our parenting\n # under gridParent\n DistributedObject.DistributedObject.setLocation(self, parentId, zoneId)\n parentObj = self.cr.doId2do.get(parentId)\n if parentObj:\n # Make sure you in a zone that is in the grid before making a GridParent\n if (parentObj.isGridParent() and (zoneId >= parentObj.startingZone)):\n if not self.gridParent:\n self.gridParent = GridParent.GridParent(self)\n self.gridParent.setGridParent(parentObj, zoneId, teleport)\n else:\n if self.gridParent:\n self.gridParent.delete()\n self.gridParent = None\n else:\n if self.gridParent:\n self.gridParent.delete()\n self.gridParent = None\n\n def __cmp__(self, other):\n # DistributedNode inherits from NodePath, which inherits a\n # definition of __cmp__ from FFIExternalObject that uses the\n # NodePath's compareTo() method to compare different\n # NodePaths. But we don't want this behavior for\n # DistributedNodes; DistributedNodes should only be compared\n # pointerwise.\n if self is other:\n return 0\n else:\n return 1\n\n ### setParent ###\n\n def b_setParent(self, parentToken):\n if isinstance(parentToken, str):\n self.setParentStr(parentToken)\n else:\n self.setParent(parentToken)\n # it's important to call the local setParent first.\n self.d_setParent(parentToken)\n\n def d_setParent(self, parentToken):\n if isinstance(parentToken, str):\n self.sendUpdate(\"setParentStr\", [parentToken])\n else:\n self.sendUpdate(\"setParent\", [parentToken])\n\n def setParentStr(self, parentTokenStr):\n assert self.notify.debug('setParentStr: %s' % parentTokenStr)\n assert self.notify.debug('isGenerated: %s' % self.isGenerated())\n if len(parentTokenStr) > 0:\n self.do_setParent(parentTokenStr)\n self.gotStringParentToken = 1\n\n def setParent(self, parentToken):\n assert self.notify.debug('setParent: %s' % parentToken)\n assert self.notify.debug('isGenerated: %s' % self.isGenerated())\n # if we are not yet generated and we just got a parent token\n # as a string, ignore whatever value comes in here\n justGotRequiredParentAsStr = ((not self.isGenerated()) and\n self.gotStringParentToken)\n if not justGotRequiredParentAsStr:\n if parentToken != 0:\n self.do_setParent(parentToken)\n self.gotStringParentToken = 0\n\n def do_setParent(self, parentToken):\n \"\"\"do_setParent(self, int parentToken)\n\n This function is defined simply to allow a derived class (like\n DistributedAvatar) to override the behavior of setParent if\n desired.\n \"\"\"\n if not self.isDisabled():\n self.cr.parentMgr.requestReparent(self, parentToken)\n\n ###### set pos and hpr functions #######\n\n # setX provided by NodePath\n def d_setX(self, x):\n self.sendUpdate(\"setX\", [x])\n\n # setY provided by NodePath\n def d_setY(self, y):\n self.sendUpdate(\"setY\", [y])\n\n # setZ provided by NodePath\n def d_setZ(self, z):\n self.sendUpdate(\"setZ\", [z])\n\n # setH provided by NodePath\n def d_setH(self, h):\n self.sendUpdate(\"setH\", [h])\n\n # setP provided by NodePath\n def METHOD_NAME(self, p):\n self.sendUpdate(\"setP\", [p])\n\n # setR provided by NodePath\n def d_setR(self, r):\n self.sendUpdate(\"setR\", [r])\n\n def setXY(self, x, y):\n self.setX(x)\n self.setY(y)\n def d_setXY(self, x, y):\n self.sendUpdate(\"setXY\", [x, y])\n\n def setXZ(self, x, z):\n self.setX(x)\n self.setZ(z)\n def d_setXZ(self, x, z):\n self.sendUpdate(\"setXZ\", [x, z])\n\n # setPos provided by NodePath\n def d_setPos(self, x, y, z):\n self.sendUpdate(\"setPos\", [x, y, z])\n\n # setHpr provided by NodePath\n def d_setHpr(self, h, p, r):\n self.sendUpdate(\"setHpr\", [h, p, r])\n\n def setXYH(self, x, y, h):\n self.setX(x)\n self.setY(y)\n self.setH(h)\n def d_setXYH(self, x, y, h):\n self.sendUpdate(\"setXYH\", [x, y, h])\n\n def setXYZH(self, x, y, z, h):\n self.setPos(x, y, z)\n self.setH(h)\n def d_setXYZH(self, x, y, z, h):\n self.sendUpdate(\"setXYZH\", [x, y, z, h])\n\n # setPosHpr provided by NodePath\n def d_setPosHpr(self, x, y, z, h, p, r):\n self.sendUpdate(\"setPosHpr\", [x, y, z, h, p, r])"}}},{"rowIdx":2091,"cells":{"id":{"kind":"number","value":2091,"string":"2,091"},"label":{"kind":"string","value":"test delete"},"text":{"kind":"string","value":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nimport shutil\nimport unittest\nfrom unittest import mock\n\nfrom azure.monitor.opentelemetry.exporter._storage import (\n LocalFileBlob,\n LocalFileStorage,\n _now,\n _seconds,\n)\n\nTEST_FOLDER = os.path.abspath(\".test.storage\")\n\ndef throw(exc_type, *args, **kwargs):\n def func(*_args, **_kwargs):\n raise exc_type(*args, **kwargs)\n\n return func\n\n\ndef clean_folder(folder):\n if os.path.isfile(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\n# pylint: disable=no-self-use\nclass TestLocalFileBlob(unittest.TestCase):\n @classmethod\n def setup_class(cls):\n os.makedirs(TEST_FOLDER, exist_ok=True)\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(TEST_FOLDER, True)\n\n def tearDown(self):\n clean_folder(TEST_FOLDER)\n\n def METHOD_NAME(self):\n blob = LocalFileBlob(os.path.join(TEST_FOLDER, \"foobar\"))\n blob.delete()\n\n def test_get(self):\n blob = LocalFileBlob(os.path.join(TEST_FOLDER, \"foobar\"))\n self.assertIsNone(blob.get())\n blob.get()\n\n def test_put_error(self):\n blob = LocalFileBlob(os.path.join(TEST_FOLDER, \"foobar\"))\n with mock.patch(\"os.rename\", side_effect=throw(Exception)):\n blob.put([1, 2, 3])\n\n @unittest.skip(\"transient storage\")\n def test_put(self):\n blob = LocalFileBlob(os.path.join(TEST_FOLDER, \"foobar.blob\"))\n test_input = (1, 2, 3)\n blob.put(test_input)\n self.assertGreaterEqual(len(os.listdir(TEST_FOLDER)), 1)\n\n @unittest.skip(\"transient storage\")\n def test_lease_error(self):\n blob = LocalFileBlob(os.path.join(TEST_FOLDER, \"foobar.blob\"))\n blob.delete()\n self.assertEqual(blob.lease(0.01), None)\n\n\n# pylint: disable=protected-access\nclass TestLocalFileStorage(unittest.TestCase):\n @classmethod\n def setup_class(cls):\n os.makedirs(TEST_FOLDER, exist_ok=True)\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(TEST_FOLDER, True)\n\n def test_get_nothing(self):\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"test\", \"a\")) as stor:\n pass\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"test\")) as stor:\n self.assertIsNone(stor.get())\n\n def test_get(self):\n now = _now()\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"foo\")) as stor:\n stor.put((1, 2, 3), lease_period=10)\n with mock.patch(\"azure.monitor.opentelemetry.exporter._storage._now\") as m:\n m.return_value = now - _seconds(30 * 24 * 60 * 60)\n stor.put((1, 2, 3))\n stor.put((1, 2, 3), lease_period=10)\n with mock.patch(\"os.rename\"):\n stor.put((1, 2, 3))\n with mock.patch(\"os.rename\"):\n stor.put((1, 2, 3))\n with mock.patch(\"os.remove\", side_effect=throw(Exception)):\n with mock.patch(\"os.rename\", side_effect=throw(Exception)):\n self.assertIsNone(stor.get())\n self.assertIsNone(stor.get())\n\n def test_put(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"bar\")) as stor:\n stor.put(test_input, 0)\n self.assertEqual(stor.get().get(), test_input)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"bar\")) as stor:\n self.assertEqual(stor.get().get(), test_input)\n with mock.patch(\"os.rename\", side_effect=throw(Exception)):\n self.assertIsNone(stor.put(test_input))\n\n def test_put_max_size(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd\")) as stor:\n size_mock = mock.Mock()\n size_mock.return_value = False\n stor._check_storage_size = size_mock\n stor.put(test_input)\n self.assertEqual(stor.get(), None)\n\n def test_check_storage_size_full(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd2\"), 1) as stor:\n stor.put(test_input)\n self.assertFalse(stor._check_storage_size())\n\n def test_check_storage_size_not_full(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd3\"), 1000) as stor:\n stor.put(test_input)\n self.assertTrue(stor._check_storage_size())\n\n def test_check_storage_size_no_files(self):\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd3\"), 1000) as stor:\n self.assertTrue(stor._check_storage_size())\n\n def test_check_storage_size_links(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd4\"), 1000) as stor:\n stor.put(test_input)\n with mock.patch(\"os.path.islink\") as os_mock:\n os_mock.return_value = True\n self.assertTrue(stor._check_storage_size())\n\n def test_check_storage_size_error(self):\n test_input = (1, 2, 3)\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"asd5\"), 1) as stor:\n with mock.patch(\"os.path.getsize\", side_effect=throw(OSError)):\n stor.put(test_input)\n with mock.patch(\"os.path.islink\") as os_mock:\n os_mock.return_value = True\n self.assertTrue(stor._check_storage_size())\n\n def test_maintenance_routine(self):\n with mock.patch(\"os.makedirs\") as m:\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"baz\")) as stor:\n m.return_value = None\n with mock.patch(\"os.makedirs\", side_effect=throw(Exception)):\n stor = LocalFileStorage(os.path.join(TEST_FOLDER, \"baz\"))\n stor.close()\n with mock.patch(\"os.listdir\", side_effect=throw(Exception)):\n stor = LocalFileStorage(os.path.join(TEST_FOLDER, \"baz\"))\n stor.close()\n with LocalFileStorage(os.path.join(TEST_FOLDER, \"baz\")) as stor:\n with mock.patch(\"os.listdir\", side_effect=throw(Exception)):\n stor._maintenance_routine()\n with mock.patch(\"os.path.isdir\", side_effect=throw(Exception)):\n stor._maintenance_routine()"}}},{"rowIdx":2092,"cells":{"id":{"kind":"number","value":2092,"string":"2,092"},"label":{"kind":"string","value":"test create association in new diagram should"},"text":{"kind":"string","value":"import pytest\n\nfrom gaphor import UML\nfrom gaphor.core.modeling import Diagram\nfrom gaphor.diagram.tests.fixtures import allow, connect, disconnect, get_connected\nfrom gaphor.UML.classes.association import AssociationItem\nfrom gaphor.UML.classes.klass import ClassItem\n\n\n@pytest.fixture\ndef connected_association(create):\n asc = create(AssociationItem)\n c1 = create(ClassItem, UML.Class)\n c2 = create(ClassItem, UML.Class)\n\n connect(asc, asc.head, c1)\n assert asc.subject is None # no UML metaclass yet\n\n connect(asc, asc.tail, c2)\n assert asc.subject is not None\n\n return asc, c1, c2\n\n\n@pytest.fixture\ndef clone(create):\n def _clone(item):\n new = create(type(item))\n new.subject = item.subject\n new.head_subject = item.head_subject\n new.tail_subject = item.tail_subject\n return new\n\n return _clone\n\n\ndef test_glue_to_class(connected_association):\n asc, c1, c2 = connected_association\n\n glued = allow(asc, asc.head, c1)\n assert glued\n\n connect(asc, asc.head, c1)\n\n glued = allow(asc, asc.tail, c2)\n assert glued\n\n\ndef test_association_item_connect(connected_association, element_factory):\n asc, c1, c2 = connected_association\n\n # Diagram, Class *2, Property *2, Association\n assert len(element_factory.lselect()) == 9\n assert asc.head_subject is not None\n assert asc.tail_subject is not None\n\n\ndef test_association_item_reconnect_copies_properties(connected_association, create):\n asc, c1, c2 = connected_association\n c3 = create(ClassItem, UML.Class)\n\n asc.subject.name = \"Name\"\n\n a = asc.subject\n\n connect(asc, asc.tail, c3)\n\n assert a is not asc.subject\n ends = [p.type for p in asc.subject.memberEnd]\n assert c1.subject in ends\n assert c3.subject in ends\n assert c2.subject not in ends\n assert asc.subject.name == \"Name\"\n\n\ndef test_association_item_reconnect_with_navigability(connected_association, create):\n asc, c1, c2 = connected_association\n c3 = create(ClassItem, UML.Class)\n\n UML.recipes.set_navigability(asc.subject, asc.tail_subject, True)\n connect(asc, asc.tail, c3)\n\n assert asc.tail_subject.navigability is True\n\n\ndef test_association_item_reconnect_with_aggregation(connected_association, create):\n asc, c1, c2 = connected_association\n c3 = create(ClassItem, UML.Class)\n\n asc.tail_subject.aggregation = \"composite\"\n connect(asc, asc.tail, c3)\n\n assert asc.tail_subject.aggregation == \"composite\"\n\n\ndef test_disconnect_should_disconnect_model(connected_association, element_factory):\n asc, c1, c2 = connected_association\n\n disconnect(asc, asc.head)\n disconnect(asc, asc.tail)\n assert c1 is not get_connected(asc, asc.head)\n assert c2 is not get_connected(asc, asc.tail)\n\n assert not asc.subject\n assert not asc.head_subject\n assert not asc.tail_subject\n assert not element_factory.lselect(UML.Property)\n\n\ndef test_disconnect_of_second_association_should_leave_model_in_tact(\n connected_association, clone\n):\n asc, c1, c2 = connected_association\n new = clone(asc)\n\n disconnect(new, new.head)\n assert asc.subject.memberEnd[0].type is c1.subject\n assert asc.subject.memberEnd[1].type is c2.subject\n assert new.subject is asc.subject\n\n\ndef test_disconnect_of_navigable_end_should_remove_owner_relationship(\n connected_association, element_factory\n):\n asc, c1, c2 = connected_association\n\n UML.recipes.set_navigability(asc.subject, asc.head_subject, True)\n\n assert asc.head_subject in c2.subject.ownedAttribute\n\n disconnect(asc, asc.head)\n\n assert not asc.subject\n assert not asc.head_subject\n assert not asc.tail_subject\n assert not element_factory.lselect(UML.Property)\n\n\ndef test_allow_reconnect_for_single_presentation(connected_association, create):\n asc, c1, c2 = connected_association\n c3 = create(ClassItem, UML.Class)\n\n assert allow(asc, asc.head, c3)\n\n\ndef test_allow_reconnect_on_same_class_for_multiple_presentations(\n connected_association, clone, create\n):\n asc, c1, c2 = connected_association\n new = clone(asc)\n\n assert allow(new, new.head, c1)\n assert allow(new, new.tail, c2)\n\n\ndef test_allow_reconnect_if_only_one_connected_presentations(\n connected_association, clone, create\n):\n asc, c1, c2 = connected_association\n clone(asc)\n\n c3 = create(ClassItem, UML.Class)\n\n assert allow(asc, asc.head, c3)\n\n\ndef METHOD_NAME(\n connected_association, element_factory\n):\n asc, c1, c2 = connected_association\n\n diagram2 = element_factory.create(Diagram)\n c3 = diagram2.create(ClassItem, subject=c1.subject)\n c4 = diagram2.create(ClassItem, subject=c2.subject)\n asc2 = diagram2.create(AssociationItem)\n\n connect(asc2, asc2.head, c3)\n connect(asc2, asc2.tail, c4)\n\n assert asc.subject is asc2.subject\n assert asc.head_subject is asc2.head_subject\n assert asc.tail_subject is asc2.tail_subject\n\n\ndef test_create_association_in_new_diagram_reversed_should_reuse_existing(\n connected_association, element_factory\n):\n asc, c1, c2 = connected_association\n\n diagram2 = element_factory.create(Diagram)\n c3 = diagram2.create(ClassItem, subject=c1.subject)\n c4 = diagram2.create(ClassItem, subject=c2.subject)\n asc2 = diagram2.create(AssociationItem)\n\n connect(asc2, asc2.tail, c3)\n connect(asc2, asc2.head, c4)\n\n assert asc.subject is asc2.subject\n assert asc.head_subject is asc2.tail_subject\n assert asc.tail_subject is asc2.head_subject\n\n\ndef test_disconnect_association_in_new_diagram_should_clear_ends(\n connected_association, element_factory\n):\n asc, c1, c2 = connected_association\n\n diagram2 = element_factory.create(Diagram)\n c3 = diagram2.create(ClassItem, subject=c1.subject)\n c4 = diagram2.create(ClassItem, subject=c2.subject)\n asc2 = diagram2.create(AssociationItem)\n\n connect(asc2, asc2.tail, c3)\n connect(asc2, asc2.head, c4)\n disconnect(asc, asc.head)\n\n assert not asc.subject\n assert not asc.head_subject\n assert not asc.tail_subject"}}},{"rowIdx":2093,"cells":{"id":{"kind":"number","value":2093,"string":"2,093"},"label":{"kind":"string","value":"test add noise column df"},"text":{"kind":"string","value":"import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.core.fromnumeric import sort\n\nfrom autogluon.core.utils.feature_selection import *\nfrom autogluon.core.utils.utils import unevaluated_fi_df_template\n\n\ndef evaluated_fi_df_template(features, importance=None, n=None):\n rng = np.random.default_rng(0)\n importance_df = pd.DataFrame({\"name\": features})\n importance_df[\"importance\"] = rng.standard_normal(len(features)) if importance is None else importance\n importance_df[\"stddev\"] = rng.standard_normal(len(features))\n importance_df[\"p_value\"] = None\n importance_df[\"n\"] = 5 if n is None else n\n importance_df.set_index(\"name\", inplace=True)\n importance_df.index.name = None\n return importance_df\n\n\n@pytest.fixture\ndef sample_features():\n return [\"a\", \"b\", \"c\", \"d\", \"e\"]\n\n\n@pytest.fixture\ndef sample_importance_df_1(sample_features):\n return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1.0, None], n=[10, 5, 0, 5, 0])\n\n\n@pytest.fixture\ndef sample_importance_df_2(sample_features):\n return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])\n\n\ndef METHOD_NAME():\n # test noise columns are appended to input dataframe and feature_metadata\n X = pd.DataFrame({\"a\": [1, 2]})\n args = {\"rng\": np.random.default_rng(0), \"count\": 2}\n X_noised, noise_columns = add_noise_column(X, **args)\n expected_features = X.columns.tolist() + noise_columns\n assert expected_features == X_noised.columns.tolist()\n\n\ndef test_merge_importance_dfs_base(sample_features):\n # test the scenario when previous feature importance df is none\n prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)\n assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df\n\n\ndef test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):\n # test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model\n prev_df, curr_df = sample_importance_df_1, sample_importance_df_2\n result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())\n assert [score if score == score else None for score in result_df[\"importance\"].tolist()] == [0.0, 0.1, 0.1, 1.0, None]\n assert result_df[\"n\"].tolist() == [15, 15, 10, 5, 0]\n\n\ndef test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):\n # test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model\n prev_df, curr_df = sample_importance_df_1, sample_importance_df_2\n using_prev_fit_fi = set(sample_features)\n result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()\n assert len(using_prev_fit_fi) == 2\n assert [score if score == score else None for score in result_df[\"importance\"].tolist()] == [-0.1, -0.1, 0.1, 1.0, None]\n assert result_df[\"n\"].tolist() == [5, 10, 10, 5, 0]\n\n\ndef test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):\n # test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models\n prev_df, curr_df = sample_importance_df_1, sample_importance_df_2\n using_prev_fit_fi = set([sample_features[0]])\n result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()\n assert [score if score == score else None for score in result_df[\"importance\"].tolist()] == [-0.1, 0.0, 0.1, 1.0, None]\n assert result_df[\"n\"].tolist() == [5, 15, 10, 5, 0]\n assert using_prev_fit_fi == set()\n\n\ndef test_sort_features_by_priority_base(sample_features):\n # test the ordering of feature importance computation when no prior feature importance computation was done\n sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())\n assert sorted_features == sample_features\n\n\ndef test_sort_features_by_priority_same_model(sample_features):\n # test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done\n prev_importance_df = evaluated_fi_df_template(sample_features)\n sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())\n assert sorted_features == prev_importance_df.sort_values(\"importance\").index.tolist()\n\n\ndef test_sort_features_by_priority_different_model(sample_features):\n # test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done\n prev_importance_df = evaluated_fi_df_template(sample_features)\n using_prev_fit_fi = sample_features[-2:]\n sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)\n sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values(\"importance\").index.tolist()\n sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values(\"importance\").index.tolist()\n expected_features = sorted_prev_fit_features + sorted_curr_fit_features\n assert sorted_features == expected_features\n\n\ndef test_sort_features_by_priority_all(sample_features):\n # test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,\n # and some feature are unevaluated\n length = len(sample_features)\n using_prev_fit_fi = set(sample_features[: length // 3])\n evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[: length // 2]), unevaluated_fi_df_template(sample_features[length // 2 :])\n prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])\n sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)\n unevaluated_features = unevaluated_rows.index.tolist()\n sorted_prev_fit_features = (\n evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (evaluated_rows.index.isin(using_prev_fit_fi))]\n .sort_values(\"importance\")\n .index.tolist()\n )\n sorted_curr_fit_features = (\n evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (~evaluated_rows.index.isin(using_prev_fit_fi))]\n .sort_values(\"importance\")\n .index.tolist()\n )\n expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features\n assert sorted_features == expected_features"}}},{"rowIdx":2094,"cells":{"id":{"kind":"number","value":2094,"string":"2,094"},"label":{"kind":"string","value":"test nonhashable"},"text":{"kind":"string","value":"import unittest\nimport twowaymap\n\nclass TestTwoWayMap(unittest.TestCase):\n def assertTwoWayMap(self, twmap, forward, reverse):\n map_repr = (\n { k: twmap.lookup_left(k) for k in twmap.left_all() },\n { k: twmap.lookup_right(k) for k in twmap.right_all() }\n )\n self.assertEqual(map_repr, (forward, reverse))\n\n def test_set_list(self):\n tmap = twowaymap.TwoWayMap(left=set, right=list)\n\n self.assertFalse(tmap)\n tmap.insert(1, \"a\")\n self.assertTrue(tmap)\n self.assertTwoWayMap(tmap, {1: [\"a\"]}, {\"a\": {1}})\n\n tmap.insert(1, \"a\") # should be a no-op, since this pair already exists\n tmap.insert(1, \"b\")\n tmap.insert(2, \"a\")\n self.assertTwoWayMap(tmap, {1: [\"a\", \"b\"], 2: [\"a\"]}, {\"a\": {1,2}, \"b\": {1}})\n\n tmap.insert(1, \"b\")\n tmap.insert(2, \"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\", \"b\"], 2: [\"a\", \"b\"]}, {\"a\": {1,2}, \"b\": {1,2}})\n\n tmap.remove(1, \"b\")\n tmap.remove(2, \"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\"], 2: [\"a\"]}, {\"a\": {1,2}})\n\n tmap.insert(1, \"b\")\n tmap.insert(2, \"b\")\n tmap.remove_left(1)\n self.assertTwoWayMap(tmap, {2: [\"a\", \"b\"]}, {\"a\": {2}, \"b\": {2}})\n\n tmap.insert(1, \"a\")\n tmap.insert(2, \"b\")\n tmap.remove_right(\"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\"], 2: [\"a\"]}, {\"a\": {1,2}})\n\n self.assertTrue(tmap)\n tmap.clear()\n self.assertTwoWayMap(tmap, {}, {})\n self.assertFalse(tmap)\n\n def test_set_single(self):\n tmap = twowaymap.TwoWayMap(left=set, right=\"single\")\n\n self.assertFalse(tmap)\n tmap.insert(1, \"a\")\n self.assertTrue(tmap)\n self.assertTwoWayMap(tmap, {1: \"a\"}, {\"a\": {1}})\n\n tmap.insert(1, \"a\") # should be a no-op, since this pair already exists\n tmap.insert(1, \"b\")\n tmap.insert(2, \"a\")\n self.assertTwoWayMap(tmap, {1: \"b\", 2: \"a\"}, {\"a\": {2}, \"b\": {1}})\n\n tmap.insert(1, \"b\")\n tmap.insert(2, \"b\")\n self.assertTwoWayMap(tmap, {1: \"b\", 2: \"b\"}, {\"b\": {1,2}})\n\n tmap.remove(1, \"b\")\n self.assertTwoWayMap(tmap, {2: \"b\"}, {\"b\": {2}})\n tmap.remove(2, \"b\")\n self.assertTwoWayMap(tmap, {}, {})\n\n tmap.insert(1, \"b\")\n tmap.insert(2, \"b\")\n self.assertTwoWayMap(tmap, {1: \"b\", 2: \"b\"}, {\"b\": {1,2}})\n tmap.remove_left(1)\n self.assertTwoWayMap(tmap, {2: \"b\"}, {\"b\": {2}})\n\n tmap.insert(1, \"a\")\n tmap.insert(2, \"b\")\n tmap.remove_right(\"b\")\n self.assertTwoWayMap(tmap, {1: \"a\"}, {\"a\": {1}})\n\n self.assertTrue(tmap)\n tmap.clear()\n self.assertTwoWayMap(tmap, {}, {})\n self.assertFalse(tmap)\n\n def test_strict_list(self):\n tmap = twowaymap.TwoWayMap(left=\"strict\", right=list)\n\n self.assertFalse(tmap)\n tmap.insert(1, \"a\")\n self.assertTrue(tmap)\n self.assertTwoWayMap(tmap, {1: [\"a\"]}, {\"a\": 1})\n\n tmap.insert(1, \"a\") # should be a no-op, since this pair already exists\n tmap.insert(1, \"b\")\n with self.assertRaises(ValueError):\n tmap.insert(2, \"a\")\n self.assertTwoWayMap(tmap, {1: [\"a\", \"b\"]}, {\"a\": 1, \"b\": 1})\n\n tmap.insert(1, \"b\")\n with self.assertRaises(ValueError):\n tmap.insert(2, \"b\")\n tmap.insert(2, \"c\")\n self.assertTwoWayMap(tmap, {1: [\"a\", \"b\"], 2: [\"c\"]}, {\"a\": 1, \"b\": 1, \"c\": 2})\n\n tmap.remove(1, \"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\"], 2: [\"c\"]}, {\"a\": 1, \"c\": 2})\n tmap.remove(2, \"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\"], 2: [\"c\"]}, {\"a\": 1, \"c\": 2})\n\n tmap.insert(1, \"b\")\n with self.assertRaises(ValueError):\n tmap.insert(2, \"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\", \"b\"], 2: [\"c\"]}, {\"a\": 1, \"b\": 1, \"c\": 2})\n tmap.remove_left(1)\n self.assertTwoWayMap(tmap, {2: [\"c\"]}, {\"c\": 2})\n\n tmap.insert(1, \"a\")\n tmap.insert(2, \"b\")\n tmap.remove_right(\"b\")\n self.assertTwoWayMap(tmap, {1: [\"a\"], 2: [\"c\"]}, {\"a\": 1, \"c\": 2})\n\n self.assertTrue(tmap)\n tmap.clear()\n self.assertTwoWayMap(tmap, {}, {})\n self.assertFalse(tmap)\n\n def test_strict_single(self):\n tmap = twowaymap.TwoWayMap(left=\"strict\", right=\"single\")\n tmap.insert(1, \"a\")\n tmap.insert(2, \"b\")\n tmap.insert(2, \"c\")\n self.assertTwoWayMap(tmap, {1: \"a\", 2: \"c\"}, {\"a\": 1, \"c\": 2})\n with self.assertRaises(ValueError):\n tmap.insert(2, \"a\")\n tmap.insert(2, \"c\") # This pair already exists, so not an error.\n self.assertTwoWayMap(tmap, {1: \"a\", 2: \"c\"}, {\"a\": 1, \"c\": 2})\n\n def METHOD_NAME(self):\n # Test that we don't get into an inconsistent state if we attempt to use a non-hashable value.\n tmap = twowaymap.TwoWayMap(left=list, right=list)\n tmap.insert(1, \"a\")\n self.assertTwoWayMap(tmap, {1: [\"a\"]}, {\"a\": [1]})\n\n with self.assertRaises(TypeError):\n tmap.insert(1, {})\n with self.assertRaises(TypeError):\n tmap.insert({}, \"a\")\n\n self.assertTwoWayMap(tmap, {1: [\"a\"]}, {\"a\": [1]})\n\n\nif __name__ == \"__main__\":\n unittest.main()"}}},{"rowIdx":2095,"cells":{"id":{"kind":"number","value":2095,"string":"2,095"},"label":{"kind":"string","value":"get default display mode"},"text":{"kind":"string","value":"# ***************************************************************************\n# * Copyright (c) 2017 Markus Hovorka *\n# * *\n# * This file is part of the FreeCAD CAx development system. *\n# * *\n# * This program is free software; you can redistribute it and/or modify *\n# * it under the terms of the GNU Lesser General Public License (LGPL) *\n# * as published by the Free Software Foundation; either version 2 of *\n# * the License, or (at your option) any later version. *\n# * for detail see the LICENCE text file. *\n# * *\n# * This program is distributed in the hope that it will be useful, *\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of *\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\n# * GNU Library General Public License for more details. *\n# * *\n# * You should have received a copy of the GNU Library General Public *\n# * License along with this program; if not, write to the Free Software *\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *\n# * USA *\n# * *\n# ***************************************************************************\n\n__title__ = \"FreeCAD FEM solver equation base object\"\n__author__ = \"Markus Hovorka\"\n__url__ = \"https://www.freecad.org\"\n\n## \\addtogroup FEM\n# @{\n\nimport FreeCAD\n\nif FreeCAD.GuiUp:\n from pivy import coin\n\n\nclass BaseProxy(object):\n\n BaseType = \"App::FeaturePython\"\n\n def __init__(self, obj):\n obj.Proxy = self\n obj.addProperty(\n \"App::PropertyLinkSubList\", \"References\",\n \"Base\", \"\")\n\n def execute(self, obj):\n return True\n\n\nclass BaseViewProxy(object):\n\n def __init__(self, vobj):\n vobj.Proxy = self\n\n def attach(self, vobj):\n default = coin.SoGroup()\n vobj.addDisplayMode(default, \"Default\")\n\n def getDisplayModes(self, obj):\n \"Return a list of display modes.\"\n modes = [\"Default\"]\n return modes\n\n def METHOD_NAME(self):\n return \"Default\"\n\n def setDisplayMode(self, mode):\n return mode\n\n\nclass DeformationProxy(BaseProxy):\n pass\n\n\nclass DeformationViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationDeformation.svg\"\n\n\nclass ElasticityProxy(BaseProxy):\n pass\n\n\nclass ElasticityViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationElasticity.svg\"\n\n\nclass ElectricforceProxy(BaseProxy):\n pass\n\n\nclass ElectricforceViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationElectricforce.svg\"\n\n\nclass ElectrostaticProxy(BaseProxy):\n pass\n\n\nclass ElectrostaticViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationElectrostatic.svg\"\n\n\nclass FlowProxy(BaseProxy):\n pass\n\n\nclass FlowViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationFlow.svg\"\n\n\nclass FluxProxy(BaseProxy):\n pass\n\n\nclass FluxViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationFlux.svg\"\n\n\nclass HeatProxy(BaseProxy):\n pass\n\n\nclass HeatViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationHeat.svg\"\n\n\nclass MagnetodynamicProxy(BaseProxy):\n pass\n\n\nclass MagnetodynamicViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationMagnetodynamic.svg\"\n\n\nclass Magnetodynamic2DProxy(BaseProxy):\n pass\n\n\nclass Magnetodynamic2DViewProxy(BaseViewProxy):\n\n def getIcon(self):\n return \":/icons/FEM_EquationMagnetodynamic2D.svg\"\n\n\n## @}"}}},{"rowIdx":2096,"cells":{"id":{"kind":"number","value":2096,"string":"2,096"},"label":{"kind":"string","value":"write parameters"},"text":{"kind":"string","value":"# Copyright (c) 2017 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy\nfrom spinn_utilities.overrides import overrides\nfrom spinn_front_end_common.interface.ds import DataType\nfrom spinn_front_end_common.utilities.constants import (\n BYTES_PER_WORD, BYTES_PER_SHORT)\n\nfrom spynnaker.pyNN.data import SpynnakerDataView\nfrom .abstract_timing_dependence import AbstractTimingDependence\nfrom spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import (\n SynapseStructureWeightAccumulator)\nfrom spynnaker.pyNN.models.neuron.plasticity.stdp.common import (\n STDP_FIXED_POINT_ONE)\n\n\nclass TimingDependenceRecurrent(AbstractTimingDependence):\n \"\"\"\n A timing dependence STDP rule based on recurrences.\n \"\"\"\n __slots__ = [\n \"__accumulator_depression_plus_one\",\n \"__accumulator_potentiation_minus_one\",\n \"__dual_fsm\",\n \"__mean_post_window\",\n \"__mean_pre_window\",\n \"__synapse_structure\",\n \"__a_plus\",\n \"__a_minus\"]\n __PARAM_NAMES = (\n 'accumulator_depression', 'accumulator_potentiation',\n 'mean_pre_window', 'mean_post_window', 'dual_fsm')\n\n default_parameters = {\n 'accumulator_depression': -6, 'accumulator_potentiation': 6,\n 'mean_pre_window': 35.0, 'mean_post_window': 35.0, 'dual_fsm': True}\n\n def __init__(\n self, accumulator_depression=default_parameters[\n 'accumulator_depression'],\n accumulator_potentiation=default_parameters[\n 'accumulator_potentiation'],\n mean_pre_window=default_parameters['mean_pre_window'],\n mean_post_window=default_parameters['mean_post_window'],\n dual_fsm=default_parameters['dual_fsm'],\n A_plus=0.01, A_minus=0.01):\n \"\"\"\n :param int accumulator_depression:\n :param int accumulator_potentiation:\n :param float mean_pre_window:\n :param float mean_post_window:\n :param bool dual_fsm:\n :param float A_plus: :math:`A^+`\n :param float A_minus: :math:`A^-`\n \"\"\"\n # pylint: disable=too-many-arguments\n self.__accumulator_depression_plus_one = accumulator_depression + 1\n self.__accumulator_potentiation_minus_one = \\\n accumulator_potentiation - 1\n self.__mean_pre_window = mean_pre_window\n self.__mean_post_window = mean_post_window\n self.__dual_fsm = dual_fsm\n self.__a_plus = A_plus\n self.__a_minus = A_minus\n\n self.__synapse_structure = SynapseStructureWeightAccumulator()\n\n @property\n def A_plus(self):\n r\"\"\"\n :math:`A^+`\n\n :rtype: float\n \"\"\"\n return self.__a_plus\n\n @A_plus.setter\n def A_plus(self, new_value):\n self.__a_plus = new_value\n\n @property\n def A_minus(self):\n r\"\"\"\n :math:`A^-`\n\n :rtype: float\n \"\"\"\n return self.__a_minus\n\n @A_minus.setter\n def A_minus(self, new_value):\n self.__a_minus = new_value\n\n @overrides(AbstractTimingDependence.is_same_as)\n def is_same_as(self, timing_dependence):\n if timing_dependence is None or not isinstance(\n timing_dependence, TimingDependenceRecurrent):\n return False\n return ((self.__accumulator_depression_plus_one ==\n timing_dependence.accumulator_depression_plus_one) and\n (self.__accumulator_potentiation_minus_one ==\n timing_dependence.accumulator_potentiation_minus_one) and\n (self.__mean_pre_window ==\n timing_dependence.mean_pre_window) and\n (self.__mean_post_window ==\n timing_dependence.mean_post_window))\n\n @property\n def vertex_executable_suffix(self):\n \"\"\"\n The suffix to be appended to the vertex executable for this rule.\n\n :rtype: str\n \"\"\"\n if self.__dual_fsm:\n return \"recurrent_dual_fsm\"\n return \"recurrent_pre_stochastic\"\n\n @property\n def pre_trace_n_bytes(self):\n \"\"\"\n The number of bytes used by the pre-trace of the rule per neuron.\n\n :rtype: int\n \"\"\"\n # When using the separate FSMs, pre-trace contains window length,\n # otherwise it's in the synapse\n return BYTES_PER_SHORT if self.__dual_fsm else 0\n\n @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes)\n def get_parameters_sdram_usage_in_bytes(self):\n # 2 * 32-bit parameters\n # 2 * LUTS with STDP_FIXED_POINT_ONE * 16-bit entries\n return (2 * BYTES_PER_WORD) + (\n 2 * STDP_FIXED_POINT_ONE * BYTES_PER_SHORT)\n\n @property\n def n_weight_terms(self):\n \"\"\"\n The number of weight terms expected by this timing rule.\n\n :rtype: int\n \"\"\"\n return 1\n\n @overrides(AbstractTimingDependence.METHOD_NAME)\n def METHOD_NAME(\n self, spec, global_weight_scale, synapse_weight_scales):\n\n # Write parameters\n spec.write_value(data=self.__accumulator_depression_plus_one,\n data_type=DataType.INT32)\n spec.write_value(data=self.__accumulator_potentiation_minus_one,\n data_type=DataType.INT32)\n\n # Convert mean times into machine timesteps\n time_step_per_ms = SpynnakerDataView.get_simulation_time_step_per_ms()\n\n mean_pre_timesteps = float(self.__mean_pre_window * time_step_per_ms)\n mean_post_timesteps = float(self.__mean_post_window * time_step_per_ms)\n\n # Write lookup tables\n self._write_exp_dist_lut(spec, mean_pre_timesteps)\n self._write_exp_dist_lut(spec, mean_post_timesteps)\n\n @staticmethod\n def _write_exp_dist_lut(spec, mean):\n \"\"\"\n :param .DataSpecificationGenerator spec:\n :param float mean:\n \"\"\"\n indices = numpy.arange(STDP_FIXED_POINT_ONE)\n inv_cdf = numpy.log(1.0 - indices/float(STDP_FIXED_POINT_ONE)) * -mean\n spec.write_array(\n inv_cdf.astype(numpy.uint16), data_type=DataType.UINT16)\n\n @property\n def synaptic_structure(self):\n \"\"\"\n The synaptic structure of the plastic part of the rows.\n\n :rtype: AbstractSynapseStructure\n \"\"\"\n return self.__synapse_structure\n\n @overrides(AbstractTimingDependence.get_parameter_names)\n def get_parameter_names(self):\n return self.__PARAM_NAMES"}}},{"rowIdx":2097,"cells":{"id":{"kind":"number","value":2097,"string":"2,097"},"label":{"kind":"string","value":"build transform"},"text":{"kind":"string","value":"# Copyright 2019 Open Source Robotics Foundation, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of the Willow Garage nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport pytest\nimport rclpy\n\nfrom tf2_ros.buffer import Buffer\nfrom geometry_msgs.msg import TransformStamped, PointStamped\n\nclass TestBuffer:\n def METHOD_NAME(self, target, source, rclpy_time):\n transform = TransformStamped()\n transform.header.frame_id = target\n transform.header.stamp = rclpy_time.to_msg()\n transform.child_frame_id = source\n transform.transform.translation.x = 42.0\n transform.transform.translation.y = -3.14\n transform.transform.translation.z = 0.0\n transform.transform.rotation.w = 1.0\n transform.transform.rotation.x = 0.0\n transform.transform.rotation.y = 0.0\n transform.transform.rotation.z = 0.0\n return transform\n\n def test_can_transform_valid_transform(self):\n buffer = Buffer()\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n assert buffer.set_transform(transform, 'unittest') is None\n assert buffer.can_transform('foo', 'bar', rclpy_time)\n\n output = buffer.lookup_transform('foo', 'bar', rclpy_time)\n\n assert transform.child_frame_id == output.child_frame_id\n assert transform.transform.translation.x == output.transform.translation.x\n assert transform.transform.translation.y == output.transform.translation.y\n assert transform.transform.translation.z == output.transform.translation.z\n\n def test_await_transform_immediately_available(self):\n # wait for a transform that is already available to test short-cut code\n buffer = Buffer()\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n buffer.set_transform(transform, 'unittest')\n\n coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time)\n with pytest.raises(StopIteration) as excinfo:\n coro.send(None)\n\n assert transform == excinfo.value.value\n coro.close()\n\n def test_await_transform_full_immediately_available(self):\n # wait for a transform that is already available to test short-cut code\n buffer = Buffer()\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n buffer.set_transform(transform, 'unittest')\n\n coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo')\n with pytest.raises(StopIteration) as excinfo:\n coro.send(None)\n\n assert transform == excinfo.value.value\n coro.close()\n\n def test_await_transform_delayed(self):\n # wait for a transform that is not yet available\n buffer = Buffer()\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time)\n coro.send(None)\n\n buffer.set_transform(transform, 'unittest')\n with pytest.raises(StopIteration) as excinfo:\n coro.send(None)\n\n assert transform == excinfo.value.value\n coro.close()\n\n def test_await_transform_full_delayed(self):\n # wait for a transform that is not yet available\n buffer = Buffer()\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo')\n coro.send(None)\n\n buffer.set_transform(transform, 'unittest')\n with pytest.raises(StopIteration) as excinfo:\n coro.send(None)\n\n assert transform == excinfo.value.value\n coro.close()\n\n def test_buffer_non_default_cache(self):\n buffer = Buffer(cache_time=rclpy.duration.Duration(seconds=10.0))\n clock = rclpy.clock.Clock()\n rclpy_time = clock.now()\n transform = self.METHOD_NAME('foo', 'bar', rclpy_time)\n\n assert buffer.set_transform(transform, 'unittest') is None\n\n assert buffer.can_transform('foo', 'bar', rclpy_time)\n\n output = buffer.lookup_transform('foo', 'bar', rclpy_time)\n assert transform.child_frame_id == output.child_frame_id\n assert transform.transform.translation.x == output.transform.translation.x\n assert transform.transform.translation.y == output.transform.translation.y\n assert transform.transform.translation.z == output.transform.translation.z"}}},{"rowIdx":2098,"cells":{"id":{"kind":"number","value":2098,"string":"2,098"},"label":{"kind":"string","value":"get vertexai job client"},"text":{"kind":"string","value":"# Copyright 2022 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"CRMint's abstract worker dealing with Vertex AI.\"\"\"\n\nimport time\n\nimport google.auth\n\nfrom google.cloud import aiplatform\n\nfrom google.cloud.aiplatform_v1.types import job_state as js\nfrom google.cloud.aiplatform_v1.types import pipeline_state as ps\n\nfrom jobs.workers import worker\n\n\n_PIPELINE_COMPLETE_STATES = frozenset([\n ps.PipelineState.PIPELINE_STATE_SUCCEEDED,\n ps.PipelineState.PIPELINE_STATE_FAILED,\n ps.PipelineState.PIPELINE_STATE_CANCELLED,\n ps.PipelineState.PIPELINE_STATE_PAUSED])\n\n_JOB_COMPLETE_STATES = frozenset([\n js.JobState.JOB_STATE_SUCCEEDED,\n js.JobState.JOB_STATE_FAILED,\n js.JobState.JOB_STATE_CANCELLED,\n js.JobState.JOB_STATE_PAUSED])\n\n\nclass VertexAIWorker(worker.Worker):\n \"\"\"Worker that polls job status and respawns itself if the job is not done.\"\"\"\n\n def METHOD_NAME(self, location):\n api_endpoint = f'{location}-aiplatform.googleapis.com'\n client_options = {'api_endpoint': api_endpoint}\n return aiplatform.gapic.JobServiceClient(client_options=client_options)\n\n def _get_vertexai_pipeline_client(self, location):\n api_endpoint = f'{location}-aiplatform.googleapis.com'\n client_options = {'api_endpoint': api_endpoint}\n return aiplatform.gapic.PipelineServiceClient(client_options=client_options)\n\n def _get_vertexai_dataset_client(self, location):\n api_endpoint = f'{location}-aiplatform.googleapis.com'\n client_options = {'api_endpoint': api_endpoint}\n return aiplatform.gapic.DatasetServiceClient(client_options=client_options)\n\n def _get_vertexai_model_client(self, location):\n api_endpoint = f'{location}-aiplatform.googleapis.com'\n client_options = {'api_endpoint': api_endpoint}\n return aiplatform.gapic.ModelServiceClient(client_options=client_options)\n\n def _get_batch_prediction_job(self, job_client, job_name):\n return job_client.get_batch_prediction_job(name=job_name)\n\n def _get_training_pipeline(self, pipeline_client, pipeline_name):\n return pipeline_client.get_training_pipeline(name=pipeline_name)\n\n def _get_location_from_pipeline_name(self, pipeline_name):\n return pipeline_name.split('/')[3]\n\n def _get_location_from_job_name(self, job_name):\n return job_name.split('/')[3]\n\n def _get_project_id(self):\n _, project_id = google.auth.default()\n return project_id\n\n def _get_parent_resource(self, location):\n project_id = self._get_project_id()\n return f'projects/{project_id}/locations/{location}'\n\n def _wait_for_pipeline(self, pipeline):\n \"\"\"Waits for pipeline completion.\n\n It will relay to VertexAIWaiter if it takes too long.\n \"\"\"\n delay = 5\n waiting_time = 5\n time.sleep(delay)\n while pipeline.state not in _PIPELINE_COMPLETE_STATES:\n if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter.\n self._enqueue(\n 'VertexAIWaiter', {\n 'id': pipeline.name,\n 'worker_class': 'VertexAITabularTrainer'\n }, 60)\n return None\n if delay < 30:\n delay = [5, 10, 15, 20, 30][int(waiting_time / 60)]\n time.sleep(delay)\n waiting_time += delay\n if pipeline.state == ps.PipelineState.PIPELINE_STATE_FAILED:\n raise worker.WorkerException(f'Training pipeline {pipeline.name} failed.')\n\n def _wait_for_job(self, job):\n \"\"\"Waits for batch prediction job completion.\n\n It will relay to VertexAIWaiter if it takes too long.\n \"\"\"\n delay = 5\n waiting_time = 5\n time.sleep(delay)\n while job.state not in _JOB_COMPLETE_STATES:\n if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter.\n self._enqueue(\n 'VertexAIWaiter', {\n 'id': job.name,\n 'worker_class': 'VertexAIBatchPredictorToBQ'},\n 60)\n return None\n if delay < 30:\n delay = [5, 10, 15, 20, 30][int(waiting_time / 60)]\n time.sleep(delay)\n waiting_time += delay\n if job.state == js.JobState.JOB_STATE_FAILED:\n raise worker.WorkerException(f'Job {job.name} failed.')\n\n def _clean_up_datasets(self, dataset_client, project, region, display_name):\n parent = f'projects/{project}/locations/{region}'\n datasets = list(\n dataset_client.list_datasets({\n 'parent': parent,\n 'filter': f'display_name=\"{display_name}\"',\n 'order_by': 'create_time asc'}))\n configs = map(lambda x: (x.create_time, {'name': x.name}), datasets)\n sorted_configs = sorted(configs)\n for _, config in sorted_configs[:-1]:\n dataset_name = config['name']\n dataset_client.delete_dataset({'name': dataset_name})\n self.log_info(f'Deleted dataset: {dataset_name}')\n\n def _clean_up_training_pipelines(self, pipeline_client, project, region,\n display_name):\n parent = f'projects/{project}/locations/{region}'\n training_pipelines = list(\n pipeline_client.list_training_pipelines({\n 'parent': parent,\n 'filter': f'display_name=\"{display_name}\"'}))\n configs = map(\n lambda x: (x.create_time, {'state': x.state, 'name': x.name}),\n training_pipelines)\n sorted_configs = sorted(configs)\n for _, config in sorted_configs[:-1]:\n training_pipeline_name = config['name']\n if config['state'] in _PIPELINE_COMPLETE_STATES:\n pipeline_client.delete_training_pipeline(name=training_pipeline_name)\n else:\n pipeline_client.cancel_training_pipeline(\n name=training_pipeline_name, timeout=300)\n pipeline_client.delete_training_pipeline(name=training_pipeline_name)\n self.log_info(f'Deleted training pipeline: {training_pipeline_name}')\n\n def _clean_up_batch_predictions(self, job_client, project, region,\n display_name):\n parent = f'projects/{project}/locations/{region}'\n batch_predictions = list(\n job_client.list_batch_prediction_jobs({\n 'parent': parent,\n 'filter': f'display_name=\"{display_name}\"'}))\n configs = map(\n lambda x: (x.create_time, {'state': x.state, 'name': x.name}),\n batch_predictions)\n sorted_configs = sorted(configs)\n for _, config in sorted_configs[:-1]:\n batch_prediction_name = config['name']\n if config['state'] in _JOB_COMPLETE_STATES:\n job_client.delete_batch_prediction_job(name=batch_prediction_name)\n else:\n job_client.cancel_batch_prediction_job(\n name=batch_prediction_name, timeout=300)\n job_client.delete_batch_prediction_job(name=batch_prediction_name)\n self.log_info(f'Deleted batch prediction: {batch_prediction_name}')"}}},{"rowIdx":2099,"cells":{"id":{"kind":"number","value":2099,"string":"2,099"},"label":{"kind":"string","value":"get cluster log groups from boto3"},"text":{"kind":"string","value":"import json\nimport logging\n\nimport boto3\nimport utils\nfrom botocore.exceptions import ClientError\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _dumps_json(obj):\n \"\"\"Dump obj to a JSON string.\"\"\"\n return json.dumps(obj, indent=2)\n\n\ndef METHOD_NAME(cluster_log_group_prefix):\n \"\"\"\n Get log groups with cluster log group prefix from boto3.\n\n Raises ClientError.\n \"\"\"\n try:\n log_groups = (\n boto3.client(\"logs\").describe_log_groups(logGroupNamePrefix=cluster_log_group_prefix).get(\"logGroups\")\n )\n LOGGER.info(\"Log groups: {0}\\n\".format(_dumps_json(log_groups)))\n return log_groups\n except ClientError as e:\n LOGGER.error(\"Unable to retrieve any log group with prefix {0}\\nError: {1}\".format(cluster_log_group_prefix, e))\n raise ClientError\n\n\ndef _get_log_stream_pages(log_client, log_group_name):\n \"\"\"\n Get paged list of log streams.\n\n Raises ClientError if the log group doesn't exist.\n \"\"\"\n next_token = None\n while True:\n kwargs = {\"logGroupName\": log_group_name}\n if next_token:\n kwargs.update({\"nextToken\": next_token})\n response = log_client.describe_log_streams(**kwargs)\n\n streams = response.get(\"logStreams\")\n LOGGER.info(\"Log streams for {group}:\\n{streams}\".format(group=log_group_name, streams=_dumps_json(streams)))\n\n yield streams\n\n next_token = response.get(\"nextToken\")\n if next_token is None:\n break\n\n\ndef get_log_streams(log_group_name):\n \"\"\"\n Get list of log streams.\n\n Raises ClientError if the log group doesn't exist.\n \"\"\"\n log_client = boto3.client(\"logs\")\n for stream_page in _get_log_stream_pages(log_client, log_group_name):\n for stream in stream_page:\n yield stream\n\n\ndef get_log_events(log_group_name, log_stream_name):\n \"\"\"\n Get log events for the given log_stream_name.\n\n Raises ClientError if the given log group or stream doesn't exist.\n \"\"\"\n logs_client = boto3.client(\"logs\")\n # get_log_events is not page-able using utils.paginate_boto3\n response = logs_client.get_log_events(\n logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True\n )\n prev_token = None\n next_token = response.get(\"nextForwardToken\")\n LOGGER.info(f\"Starting pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}\")\n while next_token != prev_token:\n for event in response.get(\"events\"):\n LOGGER.info(f\"event from stream {log_group_name}/{log_stream_name}:\\n{json.dumps(event, indent=2)}\")\n yield event\n response = logs_client.get_log_events(\n logGroupName=log_group_name, logStreamName=log_stream_name, nextToken=next_token\n )\n prev_token = next_token\n next_token = response.get(\"nextForwardToken\")\n LOGGER.info(f\"Continuing pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}\")\n\n\ndef get_ec2_instances():\n \"\"\"Iterate through ec2's describe_instances.\"\"\"\n for instance_page in utils.paginate_boto3(boto3.client(\"ec2\").describe_instances):\n for instance in instance_page.get(\"Instances\"):\n yield instance\n\n\ndef _get_log_group_for_stack(stack_name):\n \"\"\"Return a list of log groups belonging to the given stack.\"\"\"\n log_groups = []\n for resource in utils.get_cfn_resources(stack_name):\n if resource.get(\"ResourceType\") == \"AWS::Logs::LogGroup\":\n log_groups.append(resource.get(\"PhysicalResourceId\"))\n return log_groups\n\n\ndef get_cluster_log_groups(stack_name):\n \"\"\"Return list of PhysicalResourceIds for log groups created by cluster with given stack name.\"\"\"\n log_groups = []\n substack_phys_ids = utils.get_substacks(stack_name)\n for substack_phys_id in substack_phys_ids:\n log_groups.extend(_get_log_group_for_stack(substack_phys_id))\n return log_groups\n\n\ndef delete_log_group(log_group):\n \"\"\"Delete the given log group.\"\"\"\n try:\n boto3.client(\"logs\").delete_log_group(logGroupName=log_group)\n except ClientError as client_err:\n if client_err.response.get(\"Error\").get(\"Code\") == \"ResourceNotFoundException\":\n return # Log group didn't exist.\n LOGGER.warning(\n \"Error when deleting log group {log_group}: {msg}\".format(\n log_group=log_group, msg=client_err.response.get(\"Error\").get(\"Message\")\n )\n )\n\n\ndef delete_log_groups(log_groups):\n \"\"\"Delete the given log groups, if they exist.\"\"\"\n for log_group in log_groups:\n delete_log_group(log_group)"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":20,"numItemsPerPage":100,"numTotalItems":300000,"offset":2000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzMwMDY4Mywic3ViIjoiL2RhdGFzZXRzL2tvbnN0YW50Z3IvbWV0aG9kLW5hbWUtcHJlZGljdGlvbiIsImV4cCI6MTc1NzMwNDI4MywiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.90qK-e4pW3Km9hXqijfkFLWIJb26izLmY-pRPjOvJS5vsRfKxwNetR38kiM9yvm5WprYYnvjlXHchGOZkN1GCQ","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
int64
0
300k
label
stringlengths
1
74
text
stringlengths
4k
8k
2,000
find
# xml.etree test for cElementTree import doctest import sys from test import test_support from xml.etree import cElementTree as ET SAMPLE_XML = """ <body> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """ SAMPLE_XML_NS = """ <body xmlns="http://effbot.org/ns"> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """ def sanity(): """ Import sanity. >>> from xml.etree import cElementTree """ def check_method(method): if not hasattr(method, '__call__'): print method, "not callable" def serialize(ET, elem, encoding=None): import StringIO file = StringIO.StringIO() tree = ET.ElementTree(elem) if encoding: tree.write(file, encoding) else: tree.write(file) return file.getvalue() def summarize(elem): return elem.tag def summarize_list(seq): return map(summarize, seq) def interface(): """ Test element tree interface. >>> element = ET.Element("tag", key="value") >>> tree = ET.ElementTree(element) Make sure all standard element methods exist. >>> check_method(element.append) >>> check_method(element.insert) >>> check_method(element.remove) >>> check_method(element.getchildren) >>> check_method(element.find) >>> check_method(element.findall) >>> check_method(element.findtext) >>> check_method(element.clear) >>> check_method(element.get) >>> check_method(element.set) >>> check_method(element.keys) >>> check_method(element.items) >>> check_method(element.getiterator) Basic method sanity checks. >>> serialize(ET, element) # 1 '<tag key="value" />' >>> subelement = ET.Element("subtag") >>> element.append(subelement) >>> serialize(ET, element) # 2 '<tag key="value"><subtag /></tag>' >>> element.insert(0, subelement) >>> serialize(ET, element) # 3 '<tag key="value"><subtag /><subtag /></tag>' >>> element.remove(subelement) >>> serialize(ET, element) # 4 '<tag key="value"><subtag /></tag>' >>> element.remove(subelement) >>> serialize(ET, element) # 5 '<tag key="value" />' >>> element.remove(subelement) Traceback (most recent call last): ValueError: list.remove(x): x not in list >>> serialize(ET, element) # 6 '<tag key="value" />' """ def METHOD_NAME(): """ Test find methods (including xpath syntax). >>> elem = ET.XML(SAMPLE_XML) >>> elem.find("tag").tag 'tag' >>> ET.ElementTree(elem).find("tag").tag 'tag' >>> elem.find("section/tag").tag 'tag' >>> ET.ElementTree(elem).find("section/tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ET.ElementTree(elem).findtext("tag") 'text' >>> elem.findtext("section/tag") 'subtext' >>> ET.ElementTree(elem).findtext("section/tag") 'subtext' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("*")) ['tag', 'tag', 'section'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("section/tag")) ['tag'] >>> summarize_list(elem.findall("section//tag")) ['tag'] >>> summarize_list(elem.findall("section/*")) ['tag'] >>> summarize_list(elem.findall("section//*")) ['tag'] >>> summarize_list(elem.findall("section/.//*")) ['tag'] >>> summarize_list(elem.findall("*/*")) ['tag'] >>> summarize_list(elem.findall("*//*")) ['tag'] >>> summarize_list(elem.findall("*/tag")) ['tag'] >>> summarize_list(elem.findall("*/./tag")) ['tag'] >>> summarize_list(elem.findall("./tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("././tag")) ['tag', 'tag'] >>> summarize_list(ET.ElementTree(elem).findall("/tag")) ['tag', 'tag'] >>> summarize_list(ET.ElementTree(elem).findall("./tag")) ['tag', 'tag'] >>> elem = ET.XML(SAMPLE_XML_NS) >>> summarize_list(elem.findall("tag")) [] >>> summarize_list(elem.findall("{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] >>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] """ def parseliteral(): r""" >>> element = ET.XML("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> element = ET.fromstring("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> print ET.tostring(element) <html><body>text</body></html> >>> print ET.tostring(element, "ascii") <?xml version='1.0' encoding='ascii'?> <html><body>text</body></html> >>> _, ids = ET.XMLID("<html><body>text</body></html>") >>> len(ids) 0 >>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>") >>> len(ids) 1 >>> ids["body"].tag 'body' """ def check_encoding(encoding): """ >>> check_encoding("ascii") >>> check_encoding("us-ascii") >>> check_encoding("iso-8859-1") >>> check_encoding("iso-8859-15") >>> check_encoding("cp437") >>> #check_encoding("mac-roman") """ ET.XML( "<?xml version='1.0' encoding='%s'?><xml />" % encoding ) def bug_1534630(): """ >>> bob = ET.TreeBuilder() >>> e = bob.data("data") >>> e = bob.start("tag", {}) >>> e = bob.end("tag") >>> e = bob.close() >>> serialize(ET, e) '<tag />' """ def test_main(): from test import test_xml_etree_c test_support.run_doctest(test_xml_etree_c, verbosity=True) if __name__ == '__main__': test_main()
2,001
visualise result
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ # SPDX-FileCopyrightText: 2021 Janek Groehl # SPDX-License-Identifier: MIT from simpa import Tags import simpa as sp import numpy as np from skimage.data import shepp_logan_phantom from scipy.ndimage import zoom from simpa_tests.manual_tests import ManualIntegrationTestClass # FIXME temporary workaround for newest Intel architectures import os os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" class SegmentationLoaderTest(ManualIntegrationTestClass): def setup(self): self.path_manager = sp.PathManager() target_spacing = 1.0 label_mask = shepp_logan_phantom() label_mask = np.digitize(label_mask, bins=np.linspace(0.0, 1.0, 11), right=True) label_mask = np.reshape(label_mask, (400, 1, 400)) input_spacing = 0.2 segmentation_volume_tiled = np.tile(label_mask, (1, 128, 1)) segmentation_volume_mask = np.round(zoom(segmentation_volume_tiled, input_spacing/target_spacing, order=0)).astype(int) def segmentation_class_mapping(): ret_dict = dict() ret_dict[0] = sp.TISSUE_LIBRARY.heavy_water() ret_dict[1] = sp.TISSUE_LIBRARY.blood() ret_dict[2] = sp.TISSUE_LIBRARY.epidermis() ret_dict[3] = sp.TISSUE_LIBRARY.muscle() ret_dict[4] = sp.TISSUE_LIBRARY.mediprene() ret_dict[5] = sp.TISSUE_LIBRARY.ultrasound_gel() ret_dict[6] = sp.TISSUE_LIBRARY.heavy_water() ret_dict[7] = (sp.MolecularCompositionGenerator() .append(sp.MOLECULE_LIBRARY.oxyhemoglobin(0.01)) .append(sp.MOLECULE_LIBRARY.deoxyhemoglobin(0.01)) .append(sp.MOLECULE_LIBRARY.water(0.98)) .get_molecular_composition(sp.SegmentationClasses.COUPLING_ARTIFACT)) ret_dict[8] = sp.TISSUE_LIBRARY.heavy_water() ret_dict[9] = sp.TISSUE_LIBRARY.heavy_water() ret_dict[10] = sp.TISSUE_LIBRARY.heavy_water() ret_dict[11] = sp.TISSUE_LIBRARY.heavy_water() return ret_dict self.settings = sp.Settings() self.settings[Tags.SIMULATION_PATH] = self.path_manager.get_hdf5_file_save_path() self.settings[Tags.VOLUME_NAME] = "SegmentationTest" self.settings[Tags.RANDOM_SEED] = 1234 self.settings[Tags.WAVELENGTHS] = [700] self.settings[Tags.SPACING_MM] = target_spacing self.settings[Tags.DIM_VOLUME_X_MM] = 400 / (target_spacing / input_spacing) self.settings[Tags.DIM_VOLUME_Y_MM] = 128 / (target_spacing / input_spacing) self.settings[Tags.DIM_VOLUME_Z_MM] = 400 / (target_spacing / input_spacing) # self.settings[Tags.IGNORE_QA_ASSERTIONS] = True self.settings.set_volume_creation_settings({ Tags.INPUT_SEGMENTATION_VOLUME: segmentation_volume_mask, Tags.SEGMENTATION_CLASS_MAPPING: segmentation_class_mapping(), }) self.settings.set_optical_settings({ Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7, Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(), Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_MSOT_ACUITY_ECHO, Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50, }) self.pipeline = [ sp.SegmentationBasedVolumeCreationAdapter(self.settings), sp.MCXAdapter(self.settings) ] def perform_test(self): sp.simulate(self.pipeline, self.settings, sp.RSOMExplorerP50(element_spacing_mm=2.0, number_elements_y=10, number_elements_x=20, device_position_mm=np.asarray([20, 10, 0]))) def tear_down(self): os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH]) def METHOD_NAME(self, show_figure_on_screen=True, save_path=None): if show_figure_on_screen: save_path = None else: save_path = save_path + "SegmentationLoaderExample.png" sp.visualise_data(path_to_hdf5_file=self.path_manager.get_hdf5_file_save_path() + "/" + "SegmentationTest" + ".hdf5", wavelength=700, show_initial_pressure=True, show_segmentation_map=True, show_absorption=True, show_fluence=True, show_tissue_density=True, show_speed_of_sound=True, show_anisotropy=True, show_scattering=True, save_path=save_path, log_scale=False) if __name__ == "__main__": test = SegmentationLoaderTest() test.run_test(show_figure_on_screen=False)
2,002
set up
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from absl.testing import parameterized from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import ( ResNet50V2Backbone, ) from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import ( ResNetV2Backbone, ) from keras_cv.tests.test_case import TestCase from keras_cv.utils.train import get_feature_extractor class ResNetV2BackboneTest(TestCase): def METHOD_NAME(self): self.input_batch = np.ones(shape=(8, 224, 224, 3)) def test_valid_call(self): model = ResNetV2Backbone( stackwise_filters=[64, 128, 256, 512], stackwise_blocks=[2, 2, 2, 2], stackwise_strides=[1, 2, 2, 2], include_rescaling=False, ) model(self.input_batch) def test_valid_call_applications_model(self): model = ResNet50V2Backbone() model(self.input_batch) def test_valid_call_with_rescaling(self): model = ResNetV2Backbone( stackwise_filters=[64, 128, 256, 512], stackwise_blocks=[2, 2, 2, 2], stackwise_strides=[1, 2, 2, 2], include_rescaling=True, ) model(self.input_batch) @pytest.mark.large # Saving is slow, so mark these large. def test_saved_model(self): model = ResNetV2Backbone( stackwise_filters=[64, 128, 256, 512], stackwise_blocks=[2, 2, 2, 2], stackwise_strides=[1, 2, 2, 2], include_rescaling=False, ) model_output = model(self.input_batch) save_path = os.path.join( self.get_temp_dir(), "resnet_v2_backbone.keras" ) model.save(save_path) restored_model = keras.models.load_model(save_path) # Check we got the real object back. self.assertIsInstance(restored_model, ResNetV2Backbone) # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose( ops.convert_to_numpy(model_output), ops.convert_to_numpy(restored_output), ) @pytest.mark.large # Saving is slow, so mark these large. def test_saved_alias_model(self): model = ResNet50V2Backbone() model_output = model(self.input_batch) save_path = os.path.join( self.get_temp_dir(), "resnet_v2_backbone.keras" ) model.save(save_path) restored_model = keras.models.load_model(save_path) # Check we got the real object back. # Note that these aliases serialized as the base class self.assertIsInstance(restored_model, ResNetV2Backbone) # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose( ops.convert_to_numpy(model_output), ops.convert_to_numpy(restored_output), ) def test_feature_pyramid_inputs(self): model = ResNet50V2Backbone() backbone_model = get_feature_extractor( model, model.pyramid_level_inputs.values(), model.pyramid_level_inputs.keys(), ) input_size = 256 inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P2", "P3", "P4", "P5"] self.assertEquals(list(outputs.keys()), levels) self.assertEquals( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 256), ) self.assertEquals( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 512), ) self.assertEquals( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 1024), ) self.assertEquals( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 2048), ) @parameterized.named_parameters( ("one_channel", 1), ("four_channels", 4), ) def test_application_variable_input_channels(self, num_channels): # ResNet50 model model = ResNetV2Backbone( stackwise_filters=[64, 128, 256, 512], stackwise_blocks=[3, 4, 6, 3], stackwise_strides=[1, 2, 2, 2], input_shape=(None, None, num_channels), include_rescaling=False, ) self.assertEqual(model.output_shape, (None, None, None, 2048))
2,003
async neo4j driver
# Copyright (c) "Neo4j" # Neo4j Sweden AB [https://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import sys from functools import wraps import pytest import pytest_asyncio from neo4j import ( AsyncGraphDatabase, GraphDatabase, ) from neo4j.debug import watch from . import env # from neo4j.debug import watch # # watch("neo4j") @pytest.fixture(scope="session") def uri(): return env.NEO4J_SERVER_URI @pytest.fixture(scope="session") def bolt_uri(uri): if env.NEO4J_SCHEME != "bolt": pytest.skip("Test requires bolt scheme") return uri @pytest.fixture(scope="session") def _forced_bolt_uri(): return f"bolt://{env.NEO4J_HOST}:{env.NEO4J_PORT}" @pytest.fixture(scope="session") def neo4j_uri(): if env.NEO4J_SCHEME != "neo4j": pytest.skip("Test requires neo4j scheme") return uri @pytest.fixture(scope="session") def _forced_neo4j_uri(): return f"neo4j://{env.NEO4J_HOST}:{env.NEO4J_PORT}" @pytest.fixture(scope="session") def auth(): return env.NEO4J_USER, env.NEO4J_PASS @pytest.fixture def driver(uri, auth): with GraphDatabase.driver(uri, auth=auth) as driver: yield driver @pytest.fixture def bolt_driver(bolt_uri, auth): with GraphDatabase.driver(bolt_uri, auth=auth) as driver: yield driver @pytest.fixture def neo4j_driver(neo4j_uri, auth): with GraphDatabase.driver(neo4j_uri, auth=auth) as driver: yield driver @wraps(AsyncGraphDatabase.driver) def get_async_driver(*args, **kwargs): return AsyncGraphDatabase.driver(*args, **kwargs) @pytest_asyncio.fixture async def async_driver(uri, auth): async with get_async_driver(uri, auth=auth) as driver: yield driver @pytest_asyncio.fixture async def async_bolt_driver(bolt_uri, auth): async with get_async_driver(bolt_uri, auth=auth) as driver: yield driver @pytest_asyncio.fixture async def METHOD_NAME(neo4j_uri, auth): async with get_async_driver(neo4j_uri, auth=auth) as driver: yield driver @pytest.fixture def _forced_bolt_driver(_forced_bolt_uri): with GraphDatabase.driver(_forced_bolt_uri, auth=auth) as driver: yield driver @pytest.fixture def _forced_neo4j_driver(_forced_neo4j_uri): with GraphDatabase.driver(_forced_neo4j_uri, auth=auth) as driver: yield driver @pytest.fixture(scope="session") def server_info(_forced_bolt_driver): return _forced_bolt_driver.get_server_info() @pytest.fixture(scope="session") def bolt_protocol_version(server_info): return server_info.protocol_version def mark_requires_min_bolt_version(version="3.5"): return pytest.mark.skipif( env.NEO4J_VERSION < version, reason=f"requires server version '{version}' or higher, " f"found '{env.NEO4J_VERSION}'" ) def mark_requires_edition(edition): return pytest.mark.skipif( env.NEO4J_EDITION != edition, reason=f"requires server edition '{edition}', " f"found '{env.NEO4J_EDITION}'" ) @pytest.fixture def session(driver): with driver.session() as session: yield session @pytest.fixture def bolt_session(bolt_driver): with bolt_driver.session() as session: yield session @pytest.fixture def neo4j_session(neo4j_driver): with neo4j_driver.session() as session: yield session # async support for pytest-benchmark # https://github.com/ionelmc/pytest-benchmark/issues/66 @pytest_asyncio.fixture async def aio_benchmark(benchmark, event_loop): def _wrapper(func, *args, **kwargs): if asyncio.iscoroutinefunction(func): @benchmark def _(): return event_loop.run_until_complete(func(*args, **kwargs)) else: benchmark(func, *args, **kwargs) return _wrapper @pytest.fixture def watcher(): with watch("neo4j", out=sys.stdout, colour=True): yield
2,004
recharge connection config
import uuid from typing import Any, Dict, Generator import pydash import pytest import requests from faker import Faker from requests import Response from sqlalchemy.orm import Session from fides.api.db import session from fides.api.models.connectionconfig import ( AccessLevel, ConnectionConfig, ConnectionType, ) from fides.api.models.datasetconfig import DatasetConfig from fides.api.models.sql_models import Dataset as CtlDataset from fides.api.util.saas_util import ( load_config_with_replacement, load_dataset_with_replacement, ) from tests.ops.test_helpers.saas_test_utils import poll_for_existence from tests.ops.test_helpers.vault_client import get_secrets secrets = get_secrets("recharge") @pytest.fixture(scope="function") def recharge_secrets(saas_config): return { "domain": pydash.get(saas_config, "recharge.domain") or secrets["domain"], "api_key": pydash.get(saas_config, "recharge.api_key") or secrets["api_key"], } @pytest.fixture(scope="function") def recharge_identity_email(saas_config): return ( pydash.get(saas_config, "recharge.identity_email") or secrets["identity_email"] ) @pytest.fixture(scope="function") def recharge_erasure_identity_email(): return f"{uuid.uuid4().hex}@email.com" @pytest.fixture def recharge_config() -> Dict[str, Any]: return load_config_with_replacement( "data/saas/config/recharge_config.yml", "<instance_fides_key>", "recharge_instance", ) @pytest.fixture def recharge_dataset() -> Dict[str, Any]: return load_dataset_with_replacement( "data/saas/dataset/recharge_dataset.yml", "<instance_fides_key>", "recharge_instance", )[0] @pytest.fixture(scope="function") def METHOD_NAME( db: session, recharge_config, recharge_secrets ) -> Generator: fides_key = recharge_config["fides_key"] connection_config = ConnectionConfig.create( db=db, data={ "key": fides_key, "name": fides_key, "connection_type": ConnectionType.saas, "access": AccessLevel.write, "secrets": recharge_secrets, "saas_config": recharge_config, }, ) yield connection_config connection_config.delete(db) @pytest.fixture def recharge_dataset_config( db: Session, METHOD_NAME: ConnectionConfig, recharge_dataset: Dict[str, Any], ) -> Generator: fides_key = recharge_dataset["fides_key"] METHOD_NAME.name = fides_key METHOD_NAME.key = fides_key METHOD_NAME.save(db=db) ctl_dataset = CtlDataset.create_from_dataset_dict(db, recharge_dataset) dataset = DatasetConfig.create( db=db, data={ "connection_config_id": METHOD_NAME.id, "fides_key": fides_key, "ctl_dataset_id": ctl_dataset.id, }, ) yield dataset dataset.delete(db=db) ctl_dataset.delete(db) class RechargeTestClient: """Helper to call various Recharge data management requests""" def __init__(self, METHOD_NAME: ConnectionConfig): self.recharge_secrets = METHOD_NAME.secrets self.headers = { "X-Recharge-Access-Token": self.recharge_secrets["api_key"], "Content-Type": "application/json", } self.base_url = f"https://{self.recharge_secrets['domain']}" self.faker = Faker() self.first_name = self.faker.first_name() self.last_name = self.faker.last_name() self.street_address = self.faker.street_address() # 1: Creates, checks for existance and deletes customer def create_customer(self, email) -> Response: customer_body = { "first_name": self.first_name, "last_name": self.last_name, "email": email, "billing_address1": self.street_address, "billing_city": "New York City", "billing_province": "New York", "billing_country": "United States", "billing_first_name": self.first_name, "billing_last_name": self.last_name, "billing_zip": "10001", } customer_response: Response = requests.post( url=f"{self.base_url}/customers", json=customer_body, headers=self.headers, ) assert customer_response.ok return customer_response def get_customer(self, email): customer_response: Response = requests.get( url=f"{self.base_url}/customers", params={"email": email}, headers=self.headers, ) assert customer_response.ok return customer_response.json() def delete_customer(self, customer_id): customer_response: Response = requests.delete( url=f"{self.base_url}/customers/{customer_id}", headers=self.headers ) assert customer_response.ok # 2: Creates, checks for existance and deletes address def create_address(self, customer_id) -> Response: address_body = { "customer_id": customer_id, "address1": self.street_address, "address2": self.street_address, "city": "Los Angeles", "company": "Recharge", "country_code": "US", "country": "United States", "first_name": self.first_name, "last_name": self.last_name, "order_attributes": [{"name": "custom name", "value": "custom value"}], "phone": "5551234567", "province": "California", "zip": "90001", } address_response = requests.post( url=f"{self.base_url}/addresses", headers=self.headers, json=address_body, ) assert address_response.ok return address_response def get_addresses(self, customer_id): address_response: Response = requests.get( url=f"{self.base_url}/addresses", params={"customer_id": customer_id}, headers=self.headers, ) assert address_response.ok return address_response.json() def delete_address(self, address_id): address_response: Response = requests.delete( url=f"{self.base_url}/addresses/{address_id}", headers=self.headers ) assert address_response.ok @pytest.fixture(scope="function") def recharge_test_client(METHOD_NAME: RechargeTestClient) -> Generator: test_client = RechargeTestClient( METHOD_NAME=METHOD_NAME ) yield test_client @pytest.fixture(scope="function") def recharge_erasure_data( recharge_test_client: RechargeTestClient, recharge_erasure_identity_email: str ) -> Generator: customer_response = recharge_test_client.create_customer( recharge_erasure_identity_email ) error_message = f"customer with email {recharge_erasure_identity_email} could not be created in Recharge" poll_for_existence( recharge_test_client.get_customer, (recharge_erasure_identity_email,), error_message=error_message, ) customer_id = customer_response.json()["customer"]["id"] address_response = recharge_test_client.create_address(customer_id) error_message = f"address for customer '{recharge_erasure_identity_email}' could not be created in Recharge" poll_for_existence( recharge_test_client.get_addresses, args=(customer_id,), error_message=error_message, ) address_id = address_response.json()["address"]["id"] yield customer_response, address_response recharge_test_client.delete_address(address_id) recharge_test_client.delete_customer(customer_id)
2,005
post load parent
""" Faraday Penetration Test IDE Copyright (C) 2016 Infobyte LLC (https://faradaysec.com/) See the file 'doc/LICENSE' for the license information """ # Related third party imports from flask import Blueprint, abort, make_response, jsonify from filteralchemy import FilterSet, operators # pylint:disable=unused-import from marshmallow import fields, post_load, ValidationError from marshmallow.validate import OneOf, Range from sqlalchemy.orm.exc import NoResultFound # Local application imports from faraday.server.models import ( Host, Service, Workspace, db ) from faraday.server.api.base import ( AutoSchema, ReadWriteWorkspacedView, FilterSetMeta, FilterAlchemyMixin, BulkDeleteWorkspacedMixin, BulkUpdateWorkspacedMixin ) from faraday.server.schemas import ( MetadataSchema, MutableField, PrimaryKeyRelatedField, SelfNestedField, ) from faraday.server.utils.command import set_command_id services_api = Blueprint('services_api', __name__) class ServiceSchema(AutoSchema): _id = fields.Integer(attribute='id', dump_only=True) _rev = fields.String(default='', dump_only=True) owned = fields.Boolean(default=False) owner = PrimaryKeyRelatedField('username', dump_only=True, attribute='creator') # Port is loaded via ports port = fields.Integer(dump_only=True, required=True, validate=[Range(min=0, error="The value must be greater than or equal to 0")]) ports = MutableField(fields.Integer(required=True, validate=[Range(min=0, error="The value must be greater than or equal to 0")]), fields.Method(deserialize='load_ports'), required=True, attribute='port') status = fields.String(missing='open', validate=OneOf(Service.STATUSES), allow_none=False) parent = fields.Integer(attribute='host_id') # parent is not required for updates host_id = fields.Integer(attribute='host_id', dump_only=True) vulns = fields.Integer(attribute='vulnerability_count', dump_only=True) credentials = fields.Integer(attribute='credentials_count', dump_only=True) metadata = SelfNestedField(MetadataSchema()) type = fields.Function(lambda obj: 'Service', dump_only=True) summary = fields.String(dump_only=True) command_id = fields.Int(required=False, load_only=True) @staticmethod def load_ports(value): if not isinstance(value, list): raise ValidationError('ports must be a list') if len(value) != 1: raise ValidationError('ports must be a list with exactly one' 'element') port = value.pop() if isinstance(port, str): try: port = int(port) except ValueError as e: raise ValidationError('The value must be a number') from e if port > 65535 or port < 1: raise ValidationError('The value must be in the range [1-65535]') return str(port) @post_load def METHOD_NAME(self, data, **kwargs): """Gets the host_id from parent attribute. Pops it and tries to get a Host with that id in the corresponding workspace. """ host_id = data.pop('host_id', None) if self.context['updating']: if host_id is None: # Partial update? return data if 'object' in self.context: if host_id != self.context['object'].parent.id: raise ValidationError('Can\'t change service parent.') else: if any(host_id != obj.parent.id for obj in self.context['objects']): raise ValidationError('Can\'t change service parent.') else: if not host_id: raise ValidationError('Parent id is required when creating a service.') try: data['host'] = Host.query.join(Workspace).filter( Workspace.name == self.context['workspace_name'], Host.id == host_id ).one() except NoResultFound as e: raise ValidationError(f'Host with id {host_id} not found') from e return data class Meta: model = Service fields = ('id', '_id', 'status', 'parent', 'type', 'protocol', 'description', '_rev', 'owned', 'owner', 'credentials', 'vulns', 'name', 'version', '_id', 'port', 'ports', 'metadata', 'summary', 'host_id', 'command_id') class ServiceFilterSet(FilterSet): class Meta(FilterSetMeta): model = Service fields = ('id', 'host_id', 'protocol', 'name', 'port') default_operator = operators.Equal operators = (operators.Equal,) class ServiceView(FilterAlchemyMixin, ReadWriteWorkspacedView, BulkDeleteWorkspacedMixin, BulkUpdateWorkspacedMixin): route_base = 'services' model_class = Service schema_class = ServiceSchema count_extra_filters = [Service.status == 'open'] get_undefer = [Service.credentials_count, Service.vulnerability_count] get_joinedloads = [Service.credentials, Service.update_user] filterset_class = ServiceFilterSet def _envelope_list(self, objects, pagination_metadata=None): services = [] for service in objects: services.append({ 'id': service['_id'], 'key': service['_id'], 'value': service }) return { 'services': services, } def _perform_create(self, data, **kwargs): command_id = data.pop('command_id', None) port_number = data.get("port", "1") if not port_number.isdigit(): abort(make_response(jsonify(message="Invalid Port number"), 400)) obj = super()._perform_create(data, **kwargs) if command_id: set_command_id(db.session, obj, True, command_id) return obj ServiceView.register(services_api)
2,006
create mock svc record
from unittest import TestCase from tapiriik.services import Service, ServiceRecord, ServiceBase from tapiriik.services.interchange import Activity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Lap, Location from datetime import datetime, timedelta import random import pytz from tapiriik.database import db class MockServiceA(ServiceBase): ID = "mockA" SupportedActivities = [ActivityType.Rowing] class MockServiceB(ServiceBase): ID = "mockB" SupportedActivities = [ActivityType.Rowing, ActivityType.Wheelchair] class TapiriikTestCase(TestCase): def assertActivitiesEqual(self, a, b): ''' compare activity records with more granular asserts ''' if a == b: return else: self.assertEqual(a.StartTime, b.StartTime) self.assertEqual(a.EndTime, b.EndTime) self.assertEqual(a.Type, b.Type) self.assertEqual(a.Stats.Distance, b.Stats.Distance) self.assertEqual(a.Name, b.Name) self.assertLapsListsEqual(a.Laps, b.Laps) def assertLapsListsEqual(self, lapsa, lapsb): self.assertEqual(len(lapsa), len(lapsb)) for idx in range(len(lapsa)): la = lapsa[idx] lb = lapsb[idx] self.assertLapsEqual(la, lb) def assertLapsEqual(self, la, lb): self.assertEqual(la.StartTime, lb.StartTime) self.assertEqual(la.EndTime, lb.EndTime) self.assertEqual(len(la.Waypoints), len(lb.Waypoints)) for idx in range(len(la.Waypoints)): wpa = la.Waypoints[idx] wpb = lb.Waypoints[idx] self.assertEqual(wpa.Timestamp.astimezone(pytz.utc), wpb.Timestamp.astimezone(pytz.utc)) self.assertEqual(wpa.Location.Latitude, wpb.Location.Latitude) self.assertEqual(wpa.Location.Longitude, wpb.Location.Longitude) self.assertEqual(wpa.Location.Altitude, wpb.Location.Altitude) self.assertEqual(wpa.Type, wpb.Type) self.assertEqual(wpa.HR, wpb.HR) self.assertEqual(wpa.Calories, wpb.Calories) self.assertEqual(wpa.Power, wpb.Power) self.assertEqual(wpa.Cadence, wpb.Cadence) self.assertEqual(wpa.Temp, wpb.Temp) self.assertEqual(wpa.Location, wpb.Location) self.assertEqual(wpa, wpb) class TestTools: def create_mock_user(): db.test.insert({"asd": "asdd"}) return {"_id": str(random.randint(1, 1000))} def METHOD_NAME(svc): return ServiceRecord({"Service": svc.ID, "_id": str(random.randint(1, 1000)), "ExternalID": str(random.randint(1, 1000))}) def create_mock_servicedata(svc, record=None): return {"ActivityID": random.randint(1, 1000), "Connection": record} def create_mock_servicedatacollection(svc, record=None): record = record if record else TestTools.METHOD_NAME(svc) return {record._id: TestTools.create_mock_servicedata(svc, record=record)} def create_blank_activity(svc=None, actType=ActivityType.Other, record=None): act = Activity() act.Type = actType if svc: record = record if record else TestTools.METHOD_NAME(svc) act.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svc, record=record) act.StartTime = datetime.now() act.EndTime = act.StartTime + timedelta(seconds=42) act.CalculateUID() return act def create_random_activity(svc=None, actType=ActivityType.Other, tz=False, record=None, withPauses=True, withLaps=True): ''' creates completely random activity with valid waypoints and data ''' act = TestTools.create_blank_activity(svc, actType, record=record) if tz is True: tz = pytz.timezone("America/Atikokan") act.TZ = tz elif tz is not False: act.TZ = tz if act.CountTotalWaypoints() > 0: raise ValueError("Waypoint list already populated") # this is entirely random in case the testing account already has events in it (API doesn't support delete, etc) act.StartTime = datetime(2011, 12, 13, 14, 15, 16) if tz is not False: if hasattr(tz, "localize"): act.StartTime = tz.localize(act.StartTime) else: act.StartTime = act.StartTime.replace(tzinfo=tz) act.EndTime = act.StartTime + timedelta(0, random.randint(60 * 5, 60 * 60)) # don't really need to upload 1000s of pts to test this... act.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=random.random() * 10000) act.Name = str(random.random()) paused = False waypointTime = act.StartTime backToBackPauses = False act.Laps = [] lap = Lap(startTime=act.StartTime) while waypointTime < act.EndTime: wp = Waypoint() if waypointTime == act.StartTime: wp.Type = WaypointType.Start wp.Timestamp = waypointTime wp.Location = Location(random.random() * 180 - 90, random.random() * 180 - 90, random.random() * 1000) # this is gonna be one intense activity if not (wp.HR == wp.Cadence == wp.Calories == wp.Power == wp.Temp == None): raise ValueError("Waypoint did not initialize cleanly") if svc.SupportsHR: wp.HR = float(random.randint(90, 180)) if svc.SupportsPower: wp.Power = float(random.randint(0, 1000)) if svc.SupportsCalories: wp.Calories = float(random.randint(0, 500)) if svc.SupportsCadence: wp.Cadence = float(random.randint(0, 100)) if svc.SupportsTemp: wp.Temp = float(random.randint(0, 100)) if withPauses and (random.randint(40, 50) == 42 or backToBackPauses) and not paused: # pause quite often wp.Type = WaypointType.Pause paused = True elif paused: paused = False wp.Type = WaypointType.Resume backToBackPauses = not backToBackPauses waypointTime += timedelta(0, int(random.random() + 9.5)) # 10ish seconds lap.Waypoints.append(wp) if waypointTime > act.EndTime: wp.Timestamp = act.EndTime wp.Type = WaypointType.End elif withLaps and wp.Timestamp < act.EndTime and random.randint(40, 60) == 42: # occasionally start new laps lap.EndTime = wp.Timestamp act.Laps.append(lap) lap = Lap(startTime=waypointTime) # Final lap lap.EndTime = act.EndTime act.Laps.append(lap) if act.CountTotalWaypoints() == 0: raise ValueError("No waypoints populated") act.CalculateUID() act.EnsureTZ() return act def create_mock_service(id): mock = MockServiceA() mock.ID = id Service._serviceMappings[id] = mock return mock def create_mock_services(): mockA = MockServiceA() mockB = MockServiceB() Service._serviceMappings["mockA"] = mockA Service._serviceMappings["mockB"] = mockB return (mockA, mockB)
2,007
get view frame from calib frame
import numpy as np import common.transformations.orientation as orient ## -- hardcoded hardware params -- eon_f_focal_length = 910.0 eon_d_focal_length = 650.0 tici_f_focal_length = 2648.0 tici_e_focal_length = tici_d_focal_length = 567.0 # probably wrong? magnification is not consistent across frame eon_f_frame_size = (1164, 874) eon_d_frame_size = (816, 612) tici_f_frame_size = tici_e_frame_size = tici_d_frame_size = (1928, 1208) # aka 'K' aka camera_frame_from_view_frame eon_fcam_intrinsics = np.array([ [eon_f_focal_length, 0.0, float(eon_f_frame_size[0])/2], [0.0, eon_f_focal_length, float(eon_f_frame_size[1])/2], [0.0, 0.0, 1.0]]) eon_intrinsics = eon_fcam_intrinsics # xx eon_dcam_intrinsics = np.array([ [eon_d_focal_length, 0.0, float(eon_d_frame_size[0])/2], [0.0, eon_d_focal_length, float(eon_d_frame_size[1])/2], [0.0, 0.0, 1.0]]) tici_fcam_intrinsics = np.array([ [tici_f_focal_length, 0.0, float(tici_f_frame_size[0])/2], [0.0, tici_f_focal_length, float(tici_f_frame_size[1])/2], [0.0, 0.0, 1.0]]) tici_dcam_intrinsics = np.array([ [tici_d_focal_length, 0.0, float(tici_d_frame_size[0])/2], [0.0, tici_d_focal_length, float(tici_d_frame_size[1])/2], [0.0, 0.0, 1.0]]) tici_ecam_intrinsics = tici_dcam_intrinsics # aka 'K_inv' aka view_frame_from_camera_frame eon_fcam_intrinsics_inv = np.linalg.inv(eon_fcam_intrinsics) eon_intrinsics_inv = eon_fcam_intrinsics_inv # xx tici_fcam_intrinsics_inv = np.linalg.inv(tici_fcam_intrinsics) tici_ecam_intrinsics_inv = np.linalg.inv(tici_ecam_intrinsics) FULL_FRAME_SIZE = tici_f_frame_size FOCAL = tici_f_focal_length fcam_intrinsics = tici_fcam_intrinsics W, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1] # device/mesh : x->forward, y-> right, z->down # view : x->right, y->down, z->forward device_frame_from_view_frame = np.array([ [ 0., 0., 1.], [ 1., 0., 0.], [ 0., 1., 0.] ]) view_frame_from_device_frame = device_frame_from_view_frame.T def get_calib_from_vp(vp): vp_norm = normalize(vp) yaw_calib = np.arctan(vp_norm[0]) pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib)) roll_calib = 0 return roll_calib, pitch_calib, yaw_calib # aka 'extrinsic_matrix' # road : x->forward, y -> left, z->up def get_view_frame_from_road_frame(roll, pitch, yaw, height): device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1])) view_from_road = view_frame_from_device_frame.dot(device_from_road) return np.hstack((view_from_road, [[0], [height], [0]])) # aka 'extrinsic_matrix' def METHOD_NAME(roll, pitch, yaw, height): device_from_calib= orient.rot_from_euler([roll, pitch, yaw]) view_from_calib = view_frame_from_device_frame.dot(device_from_calib) return np.hstack((view_from_calib, [[0], [height], [0]])) def vp_from_ke(m): """ Computes the vanishing point from the product of the intrinsic and extrinsic matrices C = KE. The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T """ return (m[0, 0]/m[2, 0], m[1, 0]/m[2, 0]) def roll_from_ke(m): # note: different from calibration.h/RollAnglefromKE: i think that one's just wrong return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]), -(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1])) def normalize(img_pts, intrinsics=fcam_intrinsics): # normalizes image coordinates # accepts single pt or array of pts intrinsics_inv = np.linalg.inv(intrinsics) img_pts = np.array(img_pts) input_shape = img_pts.shape img_pts = np.atleast_2d(img_pts) img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1)))) img_pts_normalized = img_pts.dot(intrinsics_inv.T) img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan return img_pts_normalized[:, :2].reshape(input_shape) def denormalize(img_pts, intrinsics=fcam_intrinsics, width=np.inf, height=np.inf): # denormalizes image coordinates # accepts single pt or array of pts img_pts = np.array(img_pts) input_shape = img_pts.shape img_pts = np.atleast_2d(img_pts) img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0], 1), dtype=img_pts.dtype))) img_pts_denormalized = img_pts.dot(intrinsics.T) if np.isfinite(width): img_pts_denormalized[img_pts_denormalized[:, 0] > width] = np.nan img_pts_denormalized[img_pts_denormalized[:, 0] < 0] = np.nan if np.isfinite(height): img_pts_denormalized[img_pts_denormalized[:, 1] > height] = np.nan img_pts_denormalized[img_pts_denormalized[:, 1] < 0] = np.nan return img_pts_denormalized[:, :2].reshape(input_shape) def device_from_ecef(pos_ecef, orientation_ecef, pt_ecef): # device from ecef frame # device frame is x -> forward, y-> right, z -> down # accepts single pt or array of pts input_shape = pt_ecef.shape pt_ecef = np.atleast_2d(pt_ecef) ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef) device_from_ecef_rot = ecef_from_device_rot.T pt_ecef_rel = pt_ecef - pos_ecef pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel) return pt_device.reshape(input_shape) def img_from_device(pt_device): # img coordinates from pts in device frame # first transforms to view frame, then to img coords # accepts single pt or array of pts input_shape = pt_device.shape pt_device = np.atleast_2d(pt_device) pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device) # This function should never return negative depths pt_view[pt_view[:, 2] < 0] = np.nan pt_img = pt_view/pt_view[:, 2:3] return pt_img.reshape(input_shape)[:, :2]
2,008
test tuple contains
from collections import defaultdict import pytest from diofant import (Basic, Dict, FiniteSet, Integer, Matrix, Rational, Tuple, false, sympify, true) from diofant.abc import p, q, r, s, x, y, z from diofant.core.compatibility import is_sequence, iterable from diofant.core.containers import tuple_wrapper __all__ = () def test_Tuple(): t = (1, 2, 3, 4) st = Tuple(*t) assert set(sympify(t)) == set(st) assert len(t) == len(st) assert set(sympify(t[:2])) == set(st[:2]) assert isinstance(st[:], Tuple) assert st == Tuple(1, 2, 3, 4) assert st.func(*st.args) == st t2 = (p, q, r, s) st2 = Tuple(*t2) assert st2.atoms() == set(t2) assert st == st2.subs({p: 1, q: 2, r: 3, s: 4}) # issue sympy/sympy#5505 assert all(isinstance(arg, Basic) for arg in st.args) assert Tuple(p, 1).subs({p: 0}) == Tuple(0, 1) assert Tuple(p, Tuple(p, 1)).subs({p: 0}) == Tuple(0, Tuple(0, 1)) assert Tuple(t2) == Tuple(Tuple(*t2)) def METHOD_NAME(): t1, t2 = Tuple(1), Tuple(2) assert t1 in Tuple(1, 2, 3, t1, Tuple(t2)) assert t2 not in Tuple(1, 2, 3, t1, Tuple(t2)) def test_Tuple_concatenation(): assert Tuple(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4) assert (1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4) assert Tuple(1, 2) + (3, 4) == Tuple(1, 2, 3, 4) pytest.raises(TypeError, lambda: Tuple(1, 2) + 3) pytest.raises(TypeError, lambda: 1 + Tuple(2, 3)) # the Tuple case in __radd__ is only reached when a subclass is involved class Tuple2(Tuple): def __radd__(self, other): return Tuple.__radd__(self, other + other) assert Tuple(1, 2) + Tuple2(3, 4) == Tuple(1, 2, 1, 2, 3, 4) assert Tuple2(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4) def test_Tuple_equality(): assert (Tuple(1, 2) == (1, 2)) is True assert (Tuple(1, 2) != (1, 2)) is False assert (Tuple(1, 2) == (1, 3)) is False assert (Tuple(1, 2) != (1, 3)) is True assert (Tuple(1, 2) == Tuple(1, 2)) is True assert (Tuple(1, 2) != Tuple(1, 2)) is False assert (Tuple(1, 2) == Tuple(1, 3)) is False assert (Tuple(1, 2) != Tuple(1, 3)) is True def test_Tuple_comparision(): assert (Tuple(1, 3) >= Tuple(-10, 30)) is true assert (Tuple(1, 3) <= Tuple(-10, 30)) is false assert (Tuple(1, 3) >= Tuple(1, 3)) is true assert (Tuple(1, 3) <= Tuple(1, 3)) is true def test_Tuple_tuple_count(): assert Tuple(0, 1, 2, 3).tuple_count(4) == 0 assert Tuple(0, 4, 1, 2, 3).tuple_count(4) == 1 assert Tuple(0, 4, 1, 4, 2, 3).tuple_count(4) == 2 assert Tuple(0, 4, 1, 4, 2, 4, 3).tuple_count(4) == 3 def test_Tuple_index(): assert Tuple(4, 0, 1, 2, 3).index(4) == 0 assert Tuple(0, 4, 1, 2, 3).index(4) == 1 assert Tuple(0, 1, 4, 2, 3).index(4) == 2 assert Tuple(0, 1, 2, 4, 3).index(4) == 3 assert Tuple(0, 1, 2, 3, 4).index(4) == 4 pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3).index(4)) pytest.raises(ValueError, lambda: Tuple(4, 0, 1, 2, 3).index(4, 1)) pytest.raises(ValueError, lambda: Tuple(0, 1, 2, 3, 4).index(4, 1, 4)) def test_Tuple_mul(): assert Tuple(1, 2, 3)*2 == Tuple(1, 2, 3, 1, 2, 3) assert 2*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3) assert Tuple(1, 2, 3)*Integer(2) == Tuple(1, 2, 3, 1, 2, 3) assert Integer(2)*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3) pytest.raises(TypeError, lambda: Tuple(1, 2, 3)*Rational(1, 2)) pytest.raises(TypeError, lambda: Rational(1, 2)*Tuple(1, 2, 3)) def test_tuple_wrapper(): @tuple_wrapper def wrap_tuples_and_return(*t): return t assert wrap_tuples_and_return(p, 1) == (p, 1) assert wrap_tuples_and_return((p, 1)) == (Tuple(p, 1),) assert wrap_tuples_and_return(1, (p, 2), 3) == (1, Tuple(p, 2), 3) def test_iterable_is_sequence(): ordered = [[], (), Tuple(), Matrix([[]])] unordered = [set()] not_diofant_iterable = [{}, ''] assert all(is_sequence(i) for i in ordered) assert all(not is_sequence(i) for i in unordered) assert all(iterable(i) for i in ordered + unordered) assert all(not iterable(i) for i in not_diofant_iterable) assert all(iterable(i, exclude=None) for i in not_diofant_iterable) def test_Dict(): d = Dict({x: 1, y: 2, z: 3}) assert d[x] == 1 assert d[y] == 2 pytest.raises(KeyError, lambda: d[2]) assert len(d) == 3 assert set(d.keys()) == {x, y, z} assert set(d.values()) == {1, 2, 3} assert d.get(5, 'default') == 'default' assert x in d assert z in d assert 5 not in d assert d.has(x) assert d.has(1) # Diofant Basic .has method # Test input types # input - a python dict # input - items as args - Diofant style assert (Dict({x: 1, y: 2, z: 3}) == Dict((x, 1), (y, 2), (z, 3))) pytest.raises(TypeError, lambda: Dict(((x, 1), (y, 2), (z, 3)))) with pytest.raises(NotImplementedError): d[5] = 6 # assert immutability assert set(d.items()) == {Tuple(x, 1), Tuple(y, 2), Tuple(z, 3)} assert set(d) == {x, y, z} assert str(d) == '{x: 1, y: 2, z: 3}' assert repr(d) == ("Dict(Tuple(Symbol('x'), Integer(1)), " "Tuple(Symbol('y'), Integer(2)), " "Tuple(Symbol('z'), Integer(3)))") # Test creating a Dict from a Dict. d = Dict({x: 1, y: 2, z: 3}) assert d == Dict(d) # Test for supporting defaultdict d = defaultdict(int) assert d[x] == 0 assert d[y] == 0 assert d[z] == 0 assert Dict(d) d = Dict(d) assert len(d) == 3 assert set(d) == {x, y, z} assert set(d.values()) == {0} assert list(FiniteSet(*[Dict({x: 1}), Dict({y: 2})]))[0] == Dict({x: 1}) def test_eq_and_args(): # issue sympy/sympy#5788 args = [(1, 2), (2, 1)] for o in [Dict, Tuple, FiniteSet]: if o != Tuple: assert o(*args) == o(*reversed(args)) pair = [o(*args), o(*reversed(args))] rpair = reversed(pair) assert sorted(pair) == sorted(rpair) assert set(o(*args)) # doesn't fail
2,009
test object mutation
# stdlib from textwrap import dedent # third party from faker import Faker import pytest # syft absolute import syft from syft.client.client import SyftClient from syft.node.worker import Worker from syft.service.action.action_object import ActionObject from syft.service.action.action_permissions import ActionPermission from syft.service.code.user_code import UserCodeStatus from syft.service.context import ChangeContext from syft.service.request.request import ActionStoreChange from syft.service.request.request import ObjectMutation from syft.service.request.request import RequestStatus from syft.service.request.request import UserCodeStatusChange from syft.service.request.request_service import RequestService from syft.service.response import SyftError from syft.service.response import SyftSuccess from syft.service.settings.settings_service import SettingsService from syft.store.document_store import DocumentStore from syft.store.linked_obj import LinkedObject @pytest.fixture def request_service(document_store: DocumentStore): return RequestService(store=document_store) def get_ds_client(faker: Faker, root_client: SyftClient, guest_client: SyftClient): guest_email = faker.email() password = "mysecretpassword" result = root_client.register( name=faker.name(), email=guest_email, password=password, password_verify=password, ) assert isinstance(result, SyftSuccess) guest_client.login(email=guest_email, password=password) return guest_client def METHOD_NAME(worker: Worker): root_client = worker.root_client setting = root_client.api.services.settings.get() linked_obj = LinkedObject.from_obj(setting, SettingsService, node_uid=worker.id) original_name = setting.organization new_name = "Test Organization" object_mutation = ObjectMutation( linked_obj=linked_obj, attr_name="organization", match_type=True, value=new_name, ) change_context = ChangeContext( node=worker, approving_user_credentials=root_client.credentials.verify_key, ) result = object_mutation.apply(change_context) assert result.is_ok() setting = root_client.api.services.settings.get() assert setting.organization == new_name object_mutation.undo(context=change_context) setting = root_client.api.services.settings.get() assert setting.organization == original_name def test_action_store_change(faker: Faker, worker: Worker): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) action_obj = root_client.api.services.action.set(data) assert action_obj.get() == dummy_data ds_client = get_ds_client(faker, root_client, worker.guest_client) action_object_link = LinkedObject.from_obj( action_obj, node_uid=action_obj.syft_node_uid ) permission_change = ActionStoreChange( linked_obj=action_object_link, apply_permission_type=ActionPermission.READ, ) change_context = ChangeContext( node=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) result = permission_change.apply(change_context) assert result.is_ok() action_obj_ptr = ds_client.api.services.action.get_pointer(action_obj.id) result = action_obj_ptr.get() assert result == dummy_data result = permission_change.undo(change_context) assert result.is_ok() result = action_obj_ptr.get() assert isinstance(result, SyftError) def test_user_code_status_change(faker: Faker, worker: Worker): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) action_obj = root_client.api.services.action.set(data) ds_client = get_ds_client(faker, root_client, worker.guest_client) @syft.syft_function( input_policy=syft.ExactMatch(data=action_obj), output_policy=syft.SingleExecutionExactOutput(), ) def simple_function(data): return sum(data) simple_function.code = dedent(simple_function.code) result = ds_client.code.submit(simple_function) assert isinstance(result, SyftSuccess) user_code = ds_client.code.get_all()[0] linked_obj = LinkedObject.from_obj(user_code, node_uid=worker.id) user_code_change = UserCodeStatusChange( value=UserCodeStatus.APPROVED, linked_obj=linked_obj ) change_context = ChangeContext( node=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) result = user_code_change.apply(change_context) user_code = ds_client.code.get_all()[0] assert user_code.status.approved result = user_code_change.undo(change_context) assert result.is_ok() user_code = ds_client.code.get_all()[0] assert not user_code.status.approved def test_code_accept_deny(faker: Faker, worker: Worker): root_client = worker.root_client dummy_data = [1, 2, 3] data = ActionObject.from_obj(dummy_data) action_obj = root_client.api.services.action.set(data) ds_client = get_ds_client(faker, root_client, worker.guest_client) @syft.syft_function( input_policy=syft.ExactMatch(data=action_obj), output_policy=syft.SingleExecutionExactOutput(), ) def simple_function(data): return sum(data) simple_function.code = dedent(simple_function.code) result = ds_client.code.request_code_execution(simple_function) assert not isinstance(result, SyftError) request = root_client.requests.get_all()[0] result = request.accept_by_depositing_result(result=10) assert isinstance(result, SyftSuccess) request = root_client.requests.get_all()[0] assert request.status == RequestStatus.APPROVED result = ds_client.code.simple_function(data=action_obj) assert result.get() == 10 result = request.deny(reason="Function output needs differential privacy !!") assert isinstance(result, SyftSuccess) request = root_client.requests.get_all()[0] assert request.status == RequestStatus.REJECTED user_code = ds_client.code.get_all()[0] assert not user_code.status.approved result = ds_client.code.simple_function(data=action_obj) assert isinstance(result, SyftError) assert "UserCodeStatus.DENIED" in result.message
2,010
test head response doesnt support content
from pathlib import PurePosixPath from typing import Any, Optional import pytest from litestar import MediaType, get from litestar.datastructures import Cookie from litestar.exceptions import ImproperlyConfiguredException from litestar.response import Response from litestar.response.base import ASGIResponse from litestar.serialization import default_serializer, get_serializer from litestar.status_codes import ( HTTP_100_CONTINUE, HTTP_101_SWITCHING_PROTOCOLS, HTTP_102_PROCESSING, HTTP_103_EARLY_HINTS, HTTP_200_OK, HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED, HTTP_500_INTERNAL_SERVER_ERROR, ) from litestar.testing import create_test_client from litestar.types import Empty def test_response_headers() -> None: @get("/") def handler() -> Response: return Response(content="hello world", media_type=MediaType.TEXT, headers={"first": "123", "second": "456"}) with create_test_client(handler) as client: response = client.get("/") assert response.headers["first"] == "123" assert response.headers["second"] == "456" assert response.headers["content-length"] == "11" assert response.headers["content-type"] == "text/plain; charset=utf-8" def test_response_headers_do_not_lowercase_values() -> None: # reproduces: https://github.com/litestar-org/litestar/issues/693 @get("/") def handler() -> Response: return Response(content="hello world", media_type=MediaType.TEXT, headers={"foo": "BaR"}) with create_test_client(handler) as client: response = client.get("/") assert response.headers["foo"] == "BaR" @pytest.mark.parametrize("as_instance", [True, False]) def test_set_cookie(as_instance: bool) -> None: @get("/") def handler() -> Response: response = Response(content=None) if as_instance: response.set_cookie(Cookie(key="test", value="abc", max_age=60, expires=60, secure=True, httponly=True)) else: response.set_cookie(key="test", value="abc", max_age=60, expires=60, secure=True, httponly=True) assert len(response.cookies) == 1 return response with create_test_client(handler) as client: response = client.get("/") assert response.cookies.get("test") == "abc" def test_delete_cookie() -> None: @get("/create") def create_cookie_handler() -> Response: response = Response(content=None) response.set_cookie("test", "abc", max_age=60, expires=60, secure=True, httponly=True) assert len(response.cookies) == 1 return response @get("/delete") def delete_cookie_handler() -> Response: response = Response(content=None) response.delete_cookie( "test", "abc", ) assert len(response.cookies) == 1 return response with create_test_client(route_handlers=[create_cookie_handler, delete_cookie_handler]) as client: response = client.get("/create") assert response.cookies.get("test") == "abc" assert client.cookies.get("test") == "abc" response = client.get("/delete") assert response.cookies.get("test") is None # the commented out assert fails, because of the starlette test client's behaviour - which doesn't clear # cookies. @pytest.mark.parametrize( "media_type, expected, should_have_content_length", ((MediaType.TEXT, b"", False), (MediaType.HTML, b"", False), (MediaType.JSON, b"null", True)), ) def test_empty_response(media_type: MediaType, expected: bytes, should_have_content_length: bool) -> None: @get("/", media_type=media_type) def handler() -> None: return with create_test_client(handler) as client: response = client.get("/") assert response.content == expected assert response.headers["content-length"] == str(len(expected)) @pytest.mark.parametrize("status_code", (HTTP_204_NO_CONTENT, HTTP_304_NOT_MODIFIED)) def test_response_without_payload(status_code: int) -> None: @get("/") def handler() -> Response: return Response(b"", status_code=status_code) with create_test_client(handler) as client: response = client.get("/") assert "content-type" not in response.headers assert "content-length" not in response.headers @pytest.mark.parametrize( "status, body, should_raise", ( (HTTP_100_CONTINUE, None, False), (HTTP_101_SWITCHING_PROTOCOLS, None, False), (HTTP_102_PROCESSING, None, False), (HTTP_103_EARLY_HINTS, None, False), (HTTP_204_NO_CONTENT, None, False), (HTTP_100_CONTINUE, "1", True), (HTTP_101_SWITCHING_PROTOCOLS, "1", True), (HTTP_102_PROCESSING, "1", True), (HTTP_103_EARLY_HINTS, "1", True), (HTTP_204_NO_CONTENT, "1", True), ), ) def test_statuses_without_body(status: int, body: Optional[str], should_raise: bool) -> None: @get("/") def handler() -> Response: return Response(content=body, status_code=status) with create_test_client(handler) as client: response = client.get("/") if should_raise: assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR else: assert response.status_code == status assert "content-length" not in response.headers @pytest.mark.parametrize( "body, media_type, should_raise", ( ("", MediaType.TEXT, False), ("abc", MediaType.TEXT, False), (b"", MediaType.HTML, False), (b"abc", MediaType.HTML, False), ({"key": "value"}, MediaType.TEXT, True), ([1, 2, 3], MediaType.TEXT, True), ({"key": "value"}, MediaType.HTML, True), ([1, 2, 3], MediaType.HTML, True), ([], MediaType.HTML, False), ([], MediaType.TEXT, False), ({}, MediaType.HTML, False), ({}, MediaType.TEXT, False), ({"abc": "def"}, MediaType.JSON, False), (Empty, MediaType.JSON, True), ), ) def test_render_method(body: Any, media_type: MediaType, should_raise: bool) -> None: @get("/", media_type=media_type) def handler() -> Any: return body with create_test_client(handler) as client: response = client.get("/") if should_raise: assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR else: assert response.status_code == HTTP_200_OK def test_get_serializer() -> None: class Foo: pass foo_encoder = {Foo: lambda f: "it's a foo"} path_encoder = {PurePosixPath: lambda p: "it's a path"} class FooResponse(Response): type_encoders = foo_encoder assert get_serializer() is default_serializer assert get_serializer(type_encoders=foo_encoder)(Foo()) == "it's a foo" assert get_serializer(type_encoders=path_encoder)(PurePosixPath()) == "it's a path" assert get_serializer(FooResponse(None).type_encoders)(Foo()) == "it's a foo" assert ( get_serializer(FooResponse(None, type_encoders={Foo: lambda f: "foo"}).response_type_encoders)(Foo()) == "foo" ) def METHOD_NAME() -> None: with pytest.raises(ImproperlyConfiguredException): ASGIResponse(body=b"hello world", media_type=MediaType.TEXT, is_head_response=True)
2,011
provisioning state
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetOutboundEndpointResult', 'AwaitableGetOutboundEndpointResult', 'get_outbound_endpoint', 'get_outbound_endpoint_output', ] @pulumi.output_type class GetOutboundEndpointResult: """ Describes an outbound endpoint for a DNS resolver. """ def __init__(__self__, etag=None, id=None, location=None, name=None, METHOD_NAME=None, resource_guid=None, subnet=None, system_data=None, tags=None, type=None): if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if METHOD_NAME and not isinstance(METHOD_NAME, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", METHOD_NAME) if resource_guid and not isinstance(resource_guid, str): raise TypeError("Expected argument 'resource_guid' to be a str") pulumi.set(__self__, "resource_guid", resource_guid) if subnet and not isinstance(subnet, dict): raise TypeError("Expected argument 'subnet' to be a dict") pulumi.set(__self__, "subnet", subnet) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def etag(self) -> str: """ ETag of the outbound endpoint. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> str: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def METHOD_NAME(self) -> str: """ The current provisioning state of the outbound endpoint. This is a read-only property and any attempt to set this value will be ignored. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> str: """ The resourceGuid property of the outbound endpoint resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter def subnet(self) -> 'outputs.SubResourceResponse': """ The reference to the subnet used for the outbound endpoint. """ return pulumi.get(self, "subnet") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Metadata pertaining to creation and last modification of the resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetOutboundEndpointResult(GetOutboundEndpointResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetOutboundEndpointResult( etag=self.etag, id=self.id, location=self.location, name=self.name, METHOD_NAME=self.METHOD_NAME, resource_guid=self.resource_guid, subnet=self.subnet, system_data=self.system_data, tags=self.tags, type=self.type) def get_outbound_endpoint(dns_resolver_name: Optional[str] = None, outbound_endpoint_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOutboundEndpointResult: """ Gets properties of an outbound endpoint for a DNS resolver. :param str dns_resolver_name: The name of the DNS resolver. :param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['dnsResolverName'] = dns_resolver_name __args__['outboundEndpointName'] = outbound_endpoint_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:network/v20220701:getOutboundEndpoint', __args__, opts=opts, typ=GetOutboundEndpointResult).value return AwaitableGetOutboundEndpointResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), location=pulumi.get(__ret__, 'location'), name=pulumi.get(__ret__, 'name'), METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'), resource_guid=pulumi.get(__ret__, 'resource_guid'), subnet=pulumi.get(__ret__, 'subnet'), system_data=pulumi.get(__ret__, 'system_data'), tags=pulumi.get(__ret__, 'tags'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_outbound_endpoint) def get_outbound_endpoint_output(dns_resolver_name: Optional[pulumi.Input[str]] = None, outbound_endpoint_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOutboundEndpointResult]: """ Gets properties of an outbound endpoint for a DNS resolver. :param str dns_resolver_name: The name of the DNS resolver. :param str outbound_endpoint_name: The name of the outbound endpoint for the DNS resolver. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
2,012
is sequence finished
from typing import Any, Callable, Optional import torch import torch.distributed as dist import torch.nn as nn try: from transformers.generation_logits_process import ( LogitsProcessorList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) except ImportError: from transformers.generation import ( LogitsProcessorList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) def prepare_logits_processor( top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None, ) -> LogitsProcessorList: processor_list = LogitsProcessorList() if temperature is not None and temperature != 1.0: processor_list.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: processor_list.append(TopKLogitsWarper(top_k)) if top_p is not None and top_p < 1.0: processor_list.append(TopPLogitsWarper(top_p)) return processor_list def METHOD_NAME(unfinished_sequences: torch.Tensor) -> bool: if dist.is_initialized() and dist.get_world_size() > 1: # consider DP unfinished_sequences = unfinished_sequences.clone() dist.all_reduce(unfinished_sequences) return unfinished_sequences.max() == 0 def sample( model: nn.Module, input_ids: torch.Tensor, max_length: int, early_stopping: bool = False, eos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None, prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None, update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None, **model_kwargs ) -> torch.Tensor: if input_ids.size(1) >= max_length: return input_ids logits_processor = prepare_logits_processor(top_k, top_p, temperature) unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) for _ in range(input_ids.size(1), max_length): model_inputs = ( prepare_inputs_fn(input_ids, **model_kwargs) if prepare_inputs_fn is not None else {"input_ids": input_ids} ) outputs = model(**model_inputs) next_token_logits = outputs["logits"][:, -1, :] # pre-process distribution next_token_logits = logits_processor(input_ids, next_token_logits) # sample probs = torch.softmax(next_token_logits, dim=-1, dtype=torch.float) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # finished sentences should have their next token be a padding token if eos_token_id is not None: if pad_token_id is None: raise ValueError( "If `eos_token_id` is defined, make sure that `pad_token_id` is defined." ) next_tokens = next_tokens * unfinished_sequences + pad_token_id * ( 1 - unfinished_sequences ) # update generated ids, model inputs for next step input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) if update_model_kwargs_fn is not None: model_kwargs = update_model_kwargs_fn(outputs, **model_kwargs) # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: unfinished_sequences = unfinished_sequences.mul( (next_tokens != eos_token_id).long() ) # stop when each sentence is finished if early_stopping=True if early_stopping and METHOD_NAME(unfinished_sequences): break return input_ids def generate( model: nn.Module, input_ids: torch.Tensor, max_length: int, num_beams: int = 1, do_sample: bool = True, early_stopping: bool = False, eos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None, prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None, update_model_kwargs_fn: Optional[Callable[[dict, Any], dict]] = None, **model_kwargs ) -> torch.Tensor: """Generate token sequence. The returned sequence is input_ids + generated_tokens. Args: model (nn.Module): model input_ids (torch.Tensor): input sequence max_length (int): max length of the returned sequence num_beams (int, optional): number of beams. Defaults to 1. do_sample (bool, optional): whether to do sample. Defaults to True. early_stopping (bool, optional): if True, the sequence length may be smaller than max_length due to finding eos. Defaults to False. eos_token_id (Optional[int], optional): end of sequence token id. Defaults to None. pad_token_id (Optional[int], optional): pad token id. Defaults to None. top_k (Optional[int], optional): the number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None. top_p (Optional[float], optional): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. Defaults to None. temperature (Optional[float], optional): The value used to module the next token probabilities. Defaults to None. prepare_inputs_fn (Optional[Callable[[torch.Tensor, Any], dict]], optional): Function to preprocess model inputs. Arguments of this function should be input_ids and model_kwargs. Defaults to None. update_model_kwargs_fn (Optional[Callable[[dict, Any], dict]], optional): Function to update model_kwargs based on outputs. Arguments of this function should be outputs and model_kwargs. Defaults to None. """ is_greedy_gen_mode = (num_beams == 1) and do_sample is False is_sample_gen_mode = (num_beams == 1) and do_sample is True is_beam_gen_mode = (num_beams > 1) and do_sample is False if is_greedy_gen_mode: # run greedy search raise NotImplementedError elif is_sample_gen_mode: # run sample return sample( model, input_ids, max_length, early_stopping=early_stopping, eos_token_id=eos_token_id, pad_token_id=pad_token_id, top_k=top_k, top_p=top_p, temperature=temperature, prepare_inputs_fn=prepare_inputs_fn, update_model_kwargs_fn=update_model_kwargs_fn, **model_kwargs ) elif is_beam_gen_mode: raise NotImplementedError else: raise ValueError("Unsupported generation mode")
2,013
test trotter hamiltonian scalar mul
"""Test Trotter Hamiltonian methods from `qibo/core/hamiltonians.py`.""" import numpy as np import pytest from qibo import hamiltonians from qibo.backends import NumpyBackend from qibo.quantum_info import random_hermitian, random_statevector from .utils import random_complex @pytest.mark.parametrize("nqubits", [3, 4]) @pytest.mark.parametrize("model", ["TFIM", "XXZ", "Y", "MaxCut"]) def test_trotter_hamiltonian_to_dense(backend, nqubits, model): """Test that Trotter Hamiltonian dense form agrees with normal Hamiltonian.""" local_ham = getattr(hamiltonians, model)(nqubits, dense=False, backend=backend) target_ham = getattr(hamiltonians, model)(nqubits, backend=backend) final_ham = local_ham.dense backend.assert_allclose(final_ham.matrix, target_ham.matrix, atol=1e-15) def METHOD_NAME(backend, nqubits=3): """Test multiplication of Trotter Hamiltonian with scalar.""" local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, backend=backend) local_dense = (2 * local_ham).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) local_dense = (local_ham * 2).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) def test_trotter_hamiltonian_scalar_add(backend, nqubits=4): """Test addition of Trotter Hamiltonian with scalar.""" local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0, backend=backend) local_dense = (2 + local_ham).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) local_dense = (local_ham + 2).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) def test_trotter_hamiltonian_scalar_sub(backend, nqubits=3): """Test subtraction of Trotter Hamiltonian with scalar.""" local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) target_ham = 2 - hamiltonians.TFIM(nqubits, h=1.0, backend=backend) local_dense = (2 - local_ham).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - 2 local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) local_dense = (local_ham - 2).dense backend.assert_allclose(local_dense.matrix, target_ham.matrix) def test_trotter_hamiltonian_operator_add_and_sub(backend, nqubits=3): """Test addition and subtraction between Trotter Hamiltonians.""" local_ham1 = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) local_ham2 = hamiltonians.TFIM(nqubits, h=0.5, dense=False, backend=backend) local_ham = local_ham1 + local_ham2 target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) + hamiltonians.TFIM( nqubits, h=0.5, backend=backend ) dense = local_ham.dense backend.assert_allclose(dense.matrix, target_ham.matrix) local_ham = local_ham1 - local_ham2 target_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - hamiltonians.TFIM( nqubits, h=0.5, backend=backend ) dense = local_ham.dense backend.assert_allclose(dense.matrix, target_ham.matrix) @pytest.mark.parametrize("nqubits,normalize", [(3, False), (4, False)]) def test_trotter_hamiltonian_matmul(backend, nqubits, normalize): """Test Trotter Hamiltonian expectation value.""" local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) state = backend.cast(random_complex((2**nqubits,))) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) state = random_complex((2**nqubits,)) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) trotter_matmul = local_ham @ state target_matmul = dense_ham @ state backend.assert_allclose(trotter_matmul, target_matmul) def test_trotter_hamiltonian_three_qubit_term(backend): """Test creating ``TrotterHamiltonian`` with three qubit term.""" from scipy.linalg import expm from qibo.hamiltonians.terms import HamiltonianTerm numpy_backend = NumpyBackend() m1 = random_hermitian(2**3, backend=numpy_backend) m2 = random_hermitian(2**2, backend=numpy_backend) m3 = random_hermitian(2**1, backend=numpy_backend) terms = [ HamiltonianTerm(m1, 0, 1, 2), HamiltonianTerm(m2, 2, 3), HamiltonianTerm(m3, 1), ] m1 = backend.cast(m1, dtype=m1.dtype) m2 = backend.cast(m2, dtype=m2.dtype) m3 = backend.cast(m3, dtype=m3.dtype) ham = hamiltonians.SymbolicHamiltonian(backend=backend) ham.terms = terms # Test that the `TrotterHamiltonian` dense matrix is correct eye = np.eye(2, dtype=complex) eye = backend.cast(eye, dtype=eye.dtype) mm1 = np.kron(m1, eye) mm2 = np.kron(np.kron(eye, eye), m2) mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye)) target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3, backend=backend) backend.assert_allclose(ham.matrix, target_ham.matrix) dt = 1e-2 initial_state = random_statevector(2**4, backend=backend) circuit = ham.circuit(dt=dt) final_state = backend.execute_circuit(circuit, np.copy(initial_state)) mm1 = backend.to_numpy(mm1) mm2 = backend.to_numpy(mm2) mm3 = backend.to_numpy(mm3) u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)] u = backend.cast(u) target_state = np.dot(u[1], np.dot(u[0], initial_state)) target_state = np.dot(u[0], np.dot(u[1], target_state)) backend.assert_allclose(final_state, target_state) def test_old_trotter_hamiltonian_errors(): """Check errors when creating the deprecated ``TrotterHamiltonian`` object.""" with pytest.raises(NotImplementedError): h = hamiltonians.TrotterHamiltonian() with pytest.raises(NotImplementedError): h = hamiltonians.TrotterHamiltonian.from_symbolic(0, 1)
2,014
test given recognizer result then one is
import pytest from presidio_anonymizer.entities import InvalidParamException, RecognizerResult @pytest.mark.parametrize( # fmt: off "start, end", [ (0, 10), (2, 8), (0, 8), (0, 10), ], # fmt: on ) def test_given_recognizer_results_then_one_contains_another(start, end): first = create_recognizer_result("entity", 0, 0, 10) second = create_recognizer_result("entity", 0, start, end) assert first.contains(second) @pytest.mark.parametrize( # fmt: off "start, end", [ (4, 10), (5, 11), (0, 5), (0, 6), ], # fmt: on ) def test_given_recognizer_result_then_they_do_not_contain_one_another(start, end): first = create_recognizer_result("entity", 0, 5, 10) second = create_recognizer_result("entity", 0, start, end) assert not first.contains(second) def test_given_recognizer_results_with_same_indices_then_indices_are_equal(): first = create_recognizer_result("entity", 0, 0, 10) second = create_recognizer_result("entity", 0, 0, 10) assert first.equal_indices(second) @pytest.mark.parametrize( # fmt: off "start, end", [ (4, 10), (5, 11), (0, 5), (0, 6), ], # fmt: on ) def test_given_recognizer_results_with_different_indices_then_indices_are_not_equal( start, end ): first = create_recognizer_result("entity", 0, 5, 10) second = create_recognizer_result("entity", 0, start, end) assert not first.equal_indices(second) @pytest.mark.parametrize( # fmt: off "start, end, err", [ ("0", 10, "Invalid parameter value for start. Expecting 'number', but got 'string'."), (0, "10", "Invalid parameter value for end. Expecting 'number', but got 'string'."), ], # fmt: on ) def test_given_invalid_string_start_instead_of_int_then_we_fail(start, end, err): with pytest.raises(InvalidParamException, match=err): create_recognizer_result("bla", 0.2, start, end) def test_given_identical_recognizer_results_then_they_are_equal(): first = create_recognizer_result("bla", 0.2, 0, 10) second = create_recognizer_result("bla", 0.2, 0, 10) assert first == second @pytest.mark.parametrize( # fmt: off "entity_type, score, start, end", [ ("bla", 0.2, 4, 10), ("changed", 0.2, 0, 10), ("bla", 0.2, 0, 11), ("bla", 0.3, 0, 10), ], # fmt: on ) def test_given_different_recognizer_result_then_they_are_not_equal( entity_type, score, start, end ): first = create_recognizer_result("bla", 0.2, 0, 10) second = create_recognizer_result(entity_type, score, start, end) assert first != second def test_given_recognizer_result_then_their_hash_is_equal(): first = create_recognizer_result("entity", 0, 0, 10) second = create_recognizer_result("entity", 0, 0, 10) assert first.__hash__() == second.__hash__() @pytest.mark.parametrize( # fmt: off "entity_type, score, start, end", [ ("bla", 0.2, 4, 10), ("changed", 0.2, 0, 10), ("bla", 0.2, 0, 11), ("bla", 0.3, 0, 10), ], # fmt: on ) def test_given_different_recognizer_results_then_hash_is_not_equal( entity_type, score, start, end ): first = create_recognizer_result("bla", 0.2, 0, 10) second = create_recognizer_result(entity_type, score, start, end) assert first.__hash__() != second.__hash__() @pytest.mark.parametrize( # fmt: off "entity_type, score, start, end", [ ("bla", 0.2, 0, 10), ("changed", 0.2, 2, 10), ("bla", 0.3, 0, 11), ("bla", 0.1, 0, 10), ], # fmt: on ) def test_given_recognizer_results_with_conflicting_indices_then_there_is_a_conflict( entity_type, score, start, end ): first = create_recognizer_result("bla", 0.2, 2, 10) second = create_recognizer_result(entity_type, score, start, end) assert first.has_conflict(second) @pytest.mark.parametrize( # fmt: off "entity_type, score, start, end", [ ("bla", 0.2, 3, 10), ("changed", 0.1, 2, 10), ("bla", 0.3, 0, 9), ], # fmt: on ) def test_given_recognizer_results_with_no_conflicting_indices_then_there_is_no_conflict( entity_type, score, start, end ): first = create_recognizer_result("bla", 0.2, 2, 10) second = create_recognizer_result(entity_type, score, start, end) assert not first.has_conflict(second) @pytest.mark.parametrize( # fmt: off "request_json, result_text", [ ({}, "Invalid input, result must contain start",), ({ "end": 32, "score": 0.8, "entity_type": "NUMBER" }, "Invalid input, result must contain start",), ({ "start": 28, "score": 0.8, "entity_type": "NUMBER" }, "Invalid input, result must contain end",), ({ "start": 28, "end": 32, "entity_type": "NUMBER" }, "Invalid input, analyzer result must contain score",), ({ "start": 28, "end": 32, "score": 0.8, }, "Invalid input, result must contain entity_type",), ], # fmt: on ) def test_given_json_for_creating_recognizer_result_without_text_then_creation_fails( request_json, result_text ): with pytest.raises(InvalidParamException) as e: RecognizerResult.from_json(request_json) assert result_text == e.value.err_msg def test_given_valid_json_for_creating_recognizer_result_then_creation_is_successful(): data = create_recognizer_result("NUMBER", 0.8, 0, 32) assert data.start == 0 assert data.end == 32 assert data.score == 0.8 assert data.entity_type == "NUMBER" @pytest.mark.parametrize( # fmt: off "start, end", [ (4, 10), (4, 9), (0, 2), (5, 9), ], # fmt: on ) def test_given_recognizer_results_then_one_is_greater_then_another(start, end): first = create_recognizer_result("entity", 0, 5, 10) second = create_recognizer_result("entity", 0, start, end) assert first.__gt__(second) @pytest.mark.parametrize( # fmt: off "start, end", [ (5, 10), (6, 12), (6, 7), ], # fmt: on ) def METHOD_NAME(start, end): first = create_recognizer_result("entity", 0, 5, 10) second = create_recognizer_result("entity", 0, start, end) assert not first.__gt__(second) def test_given_endpoint_larger_then_start_point_then_we_fail(): with pytest.raises(InvalidParamException) as e: create_recognizer_result("entity", 0, 10, 0) assert ( e.value.err_msg == "Invalid input, start index '10' " "must be smaller than end index '0'" ) def test_given_endpoint_equal_to_start_point_then_we_succeed(): assert create_recognizer_result("entity", 0, 0, 0) @pytest.mark.parametrize( # fmt: off "start, end", [ (-1, 10), (6, -12), (-2, -2), ], # fmt: on ) def test_given_negative_start_or_endpoint_then_we_fail(start, end): with pytest.raises( InvalidParamException, match="Invalid input, result start and end must be positive", ): create_recognizer_result("entity", 0, start, end) def create_recognizer_result(entity_type: str, score: float, start: int, end: int): data = {"entity_type": entity_type, "score": score, "start": start, "end": end} return RecognizerResult.from_json(data)
2,015
dict has value
# -*- coding: utf-8 -*- # # widgets.py - Mycodo core utils # # Copyright (C) 2015-2020 Kyle T. Gabriel <[email protected]> # # This file is part of Mycodo # # Mycodo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mycodo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mycodo. If not, see <http://www.gnu.org/licenses/>. # # Contact at kylegabriel.com import logging import os from mycodo.config import PATH_WIDGETS from mycodo.config import PATH_WIDGETS_CUSTOM from mycodo.utils.modules import load_module_from_file logger = logging.getLogger("mycodo.utils.widgets") def parse_widget_information(exclude_custom=False): """Parses the variables assigned in each Widget and return a dictionary of IDs and values.""" def METHOD_NAME(dict_inp, widget_cus, key, force_type=None): if (key in widget_cus.WIDGET_INFORMATION and (widget_cus.WIDGET_INFORMATION[key] or widget_cus.WIDGET_INFORMATION[key] == 0)): if force_type == 'list': if isinstance(widget_cus.WIDGET_INFORMATION[key], list): dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \ widget_cus.WIDGET_INFORMATION[key] else: dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \ [widget_cus.WIDGET_INFORMATION[key]] else: dict_inp[widget_cus.WIDGET_INFORMATION['widget_name_unique']][key] = \ widget_cus.WIDGET_INFORMATION[key] return dict_inp excluded_files = [ '__init__.py', '__pycache__', 'base_widget.py', 'custom_widgets', 'examples', 'tmp_widgets' ] widget_paths = [PATH_WIDGETS] if not exclude_custom: widget_paths.append(PATH_WIDGETS_CUSTOM) dict_widgets = {} for each_path in widget_paths: real_path = os.path.realpath(each_path) for each_file in os.listdir(real_path): if each_file in excluded_files: continue full_path = f"{real_path}/{each_file}" widget_custom, status = load_module_from_file(full_path, 'widgets') if not widget_custom or not hasattr(widget_custom, 'WIDGET_INFORMATION'): continue # Populate dictionary of widget information if widget_custom.WIDGET_INFORMATION['widget_name_unique'] in dict_widgets: logger.error(f"Error: Cannot add widget modules because it does not have " f"a unique name: {widget_custom.WIDGET_INFORMATION['widget_name_unique']}") else: dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']] = {} dict_widgets[widget_custom.WIDGET_INFORMATION['widget_name_unique']]['file_path'] = full_path dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_name') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_library') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'no_class') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_height') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_width') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'message') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_datasheet', force_type='list') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_manufacturer', force_type='list') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_product_purchase', force_type='list') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'url_additional', force_type='list') # Dependencies dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_module') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'dependencies_message') # Which form options to display and whether each option is enabled dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_enabled') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'options_disabled') # Misc dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'period') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'endpoints') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_creation') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_modification') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'execute_at_deletion') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'generate_page_variables') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options_message') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_options') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands_message') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'custom_commands') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_head') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_title_bar') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_body') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_configure_options') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready') dict_widgets = METHOD_NAME(dict_widgets, widget_custom, 'widget_dashboard_js_ready_end') return dict_widgets
2,016
run container
import pytest import docker import os from time import sleep from subprocess import Popen, PIPE from sinspqa import LOGS_PATH, is_containerized from sinspqa.sinsp import SinspStreamerBuilder def pytest_addoption(parser): parser.addoption('--no-kmod', action='store_true', default=False, help='Skip tests with kernel module') parser.addoption('--no-ebpf', action='store_true', default=False, help='Skip tests with eBPF') parser.addoption('--no-modern', action='store_true', default=False, help='Skip tests with modern eBPF') def pytest_collection_modifyitems(config, items): no_kmod = config.getoption('--no-kmod') no_ebpf = config.getoption('--no-ebpf') no_modern = config.getoption('--no-modern') if not no_kmod and not no_ebpf and not no_modern: # We are not skipping any tests return skip_kmod = pytest.mark.skip( reason='Skipping tests with kernel module driver') skip_ebpf = pytest.mark.skip(reason='Skipping tests with eBPF driver') skip_modern = pytest.mark.skip( reason='Skipping tests with modern eBPF driver') for item in items: if no_kmod: for kw in item.keywords: if 'kmod' in kw: item.add_marker(skip_kmod) break if no_ebpf: for kw in item.keywords: if 'ebpf' in kw: item.add_marker(skip_ebpf) break if no_modern: for kw in item.keywords: if 'modern_bpf' in kw: item.add_marker(skip_modern) break @pytest.fixture(scope="session", autouse=True) def check_root(): assert os.geteuid() == 0, 'e2e tests need to be run as root' @pytest.fixture(scope="session", autouse=True) def docker_client(): """ Create a docker client to be used by the tests. Returns: A docker.DockerClient object created from the environment the tests run on. """ return docker.from_env() def wait_container_running(container: docker.models.containers.Container, additional_wait: int = 0, retries: int = 5): success = False for _ in range(retries): container.reload() if container.status == 'running': success = True break sleep(0.5) if not success: raise TimeoutError if additional_wait: sleep(additional_wait) def METHOD_NAME(docker_client: docker.client.DockerClient, name: str, container: dict): image = container['image'] args = container.get('args', '') privileged = container.get('privileged', False) mounts = container.get('mounts', []) environment = container.get('env', {}) user = container.get('user', '') pid_mode = container.get('pid_mode', '') network_mode = container.get('network_mode', '') additional_wait = container.get('init_wait', 0) post_validation = container.get('post_validation', None) stop_signal = container.get('signal', None) handle = docker_client.containers.run( image, args, name=name, detach=True, privileged=privileged, mounts=mounts, environment=environment, user=user, pid_mode=pid_mode, network_mode=network_mode, ) post = { 'validation': post_validation, 'signal': stop_signal } try: wait_container_running(handle, additional_wait) except TimeoutError: print(f'{name} failed to start, the test will fail') return (handle, post) def teardown_container(name, container, validation, stop_signal): if stop_signal: container.kill(stop_signal) # The stop command is issued regardless of the kill command to ensure # the container stops container.stop() logs = container.logs().decode('utf-8') if logs: with open(os.path.join(LOGS_PATH, f'{name}.log'), 'w') as f: f.write(logs) result = '' if validation: try: validation(container) except AssertionError as e: result = f'{name}: {e}' container.remove() return result @pytest.fixture(scope="function") def run_containers(request, docker_client: docker.client.DockerClient): """ Runs containers, dumps their logs and cleans'em up """ containers = {} post = {} for name, container in request.param.items(): handle, post_validation = METHOD_NAME(docker_client, name, container) containers[name] = handle post[name] = post_validation yield containers success = True errors = [] for name, container in containers.items(): validation = post[name]['validation'] stop_signal = post[name]['signal'] result = teardown_container(name, container, validation, stop_signal) if result != '': errors.append(result) success = False assert success, '\n'.join(errors) @pytest.fixture(scope='function') def sinsp(request, docker_client: docker.client.DockerClient): """ Runs an instance of sinsp-example, either in a container or as a regular process """ if is_containerized(): container = request.param handle, post = METHOD_NAME(docker_client, 'sinsp', container) yield SinspStreamerBuilder() \ .setContainerized(True) \ .setSinsp(handle) \ .setTimeout(10) \ .build() validation = container.get('post_validation', None) stop_signal = container.get('signal', None) result = teardown_container( 'sinsp', handle, validation, stop_signal) assert result == '', result else: process = request.param args = process['args'] args.insert(0, process['path']) env = os.environ.copy() additional_wait = process.get('init_wait', 0) for k, v in process['env'].items(): env[k] = v process = Popen(args, env=env, stdout=PIPE, universal_newlines=True) if additional_wait: sleep(additional_wait) reader = SinspStreamerBuilder() \ .setContainerized(False) \ .setSinsp(process) \ .setTimeout(10) \ .build() yield reader reader.stop() process.terminate() process.wait() assert process.returncode == 0, f'sinsp-example terminated with code {process.returncode}' def pytest_html_report_title(report): report.title = "sinsp e2e tests" def dump_logs(pytest_html, extra): """ Finds all logs dumped to LOGS_PATH and makes them available through the auto-generated report """ for file in os.listdir(LOGS_PATH): full_path = os.path.join(LOGS_PATH, file) if not os.path.isfile(full_path): continue with open(full_path, 'r', errors='replace') as f: logs = f.read() extra.append(pytest_html.extras.text(logs, name=file)) # Remove file so it doesn't bleed to following tests os.remove(full_path) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): pytest_html = item.config.pluginmanager.getplugin("html") outcome = yield report = outcome.get_result() extra = getattr(report, "extra", []) if report.when == "teardown": dump_logs(pytest_html, extra) report.extra = extra
2,017
test initialise with list kwarg
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=redefined-outer-name """Tests for :class:`aiida.orm.nodes.data.list.List` class.""" import pytest from aiida.common.exceptions import ModificationNotAllowed from aiida.orm import List, load_node @pytest.fixture def listing(): return ['a', 2, True] @pytest.fixture def int_listing(): return [2, 1, 3] def test_creation(): """Test the creation of an empty ``List`` node.""" node = List() assert len(node) == 0 with pytest.raises(IndexError): node[0] # pylint: disable=pointless-statement def test_mutability(): """Test list's mutability before and after storage.""" node = List() node.append(5) node.store() # Test all mutable calls are now disallowed with pytest.raises(ModificationNotAllowed): node.append(5) with pytest.raises(ModificationNotAllowed): node.extend([5]) with pytest.raises(ModificationNotAllowed): node.insert(0, 2) with pytest.raises(ModificationNotAllowed): node.remove(5) with pytest.raises(ModificationNotAllowed): node.pop() with pytest.raises(ModificationNotAllowed): node.sort() with pytest.raises(ModificationNotAllowed): node.reverse() def test_store_load(listing): """Test load_node on just stored object.""" node = List(listing) node.store() node_loaded = load_node(node.pk) assert node.get_list() == node_loaded.get_list() def test_special_methods(listing): """Test the special methods of the ``List`` class.""" node = List(listing) # __getitem__ for i, value in enumerate(listing): assert node[i] == value # __setitem__ node[0] = 'b' assert node[0] == 'b' # __delitem__ del node[0] assert node.get_list() == listing[1:] # __len__ assert len(node) == 2 def test_equality(listing): """Test equality comparison for ``List`` nodes.""" different_list = ['I', 'am', 'different'] node = List(listing) different_node = List(different_list) clone = List(listing) # Test equality comparison with Python base type assert node == listing assert node != different_list # Test equality comparison with other `BaseType` nodes assert node == clone assert node != different_node def test_append(listing): """Test the ``List.append()`` method.""" def do_checks(node): assert len(node) == 1 assert node[0] == 4 node = List() node.append(4) do_checks(node) # Try the same after storing node.store() do_checks(node) node = List(listing) node.append('more') assert node[-1] == 'more' def test_extend(listing): """Test extend() member function.""" def do_checks(node, lst): assert len(node) == len(lst) # Do an element wise comparison for lst_el, node_el in zip(lst, node): assert lst_el == node_el node = List() node.extend(listing) do_checks(node, listing) # Further extend node.extend(listing) do_checks(node, listing * 2) # Now try after storing node.store() do_checks(node, listing * 2) def test_insert(listing): """Test the ``List.insert()`` method.""" node = List(listing) node.insert(1, 'new') assert node[1] == 'new' assert len(node) == 4 def test_remove(listing): """Test the ``List.remove()`` method.""" node = List(listing) node.remove(1) listing.remove(1) assert node.get_list() == listing with pytest.raises(ValueError, match=r'list.remove\(x\): x not in list'): node.remove('non-existent') def test_pop(listing): """Test the ``List.pop()`` method.""" node = List(listing) node.pop() assert node.get_list() == listing[:-1] def test_index(listing): """Test the ``List.index()`` method.""" node = List(listing) assert node.index(True) == listing.index(True) def test_count(listing): """Test the ``List.count()`` method.""" node = List(listing) for value in listing: assert node.count(value) == listing.count(value) def test_sort(listing, int_listing): """Test the ``List.sort()`` method.""" node = List(int_listing) node.sort() int_listing.sort() assert node.get_list() == int_listing node = List(listing) with pytest.raises(TypeError, match=r"'<' not supported between instances of 'int' and 'str'"): node.sort() def test_reverse(listing): """Test the ``List.reverse()`` method.""" node = List(listing) node.reverse() listing.reverse() assert node.get_list() == listing def METHOD_NAME(listing): """Test that the ``List`` node can be initialized with the ``list`` keyword argument for backwards compatibility.""" node = List(listing) assert node.get_list() == listing
2,018
test validate slug and generate if needed
from datetime import timedelta from decimal import Decimal import pytest from django.core.exceptions import ValidationError from django.utils import timezone from graphql.error import GraphQLError from ....product.models import Category from ..validators import ( clean_seo_fields, validate_end_is_after_start, validate_one_of_args_is_in_query, validate_price_precision, validate_slug_and_generate_if_needed, ) @pytest.mark.parametrize( "value, currency", [ (Decimal("1.1200"), "USD"), (Decimal("1.12"), "USD"), (Decimal("1"), "USD"), (Decimal("1"), "ISK"), (Decimal("1.00"), "ISK"), (Decimal("5.12"), None), (Decimal("1000"), "USD"), ], ) def test_validate_price_precision(value, currency): # when result = validate_price_precision(value, currency) # then assert result is None @pytest.mark.parametrize( "value, currency", [ (Decimal("1.1212"), "USD"), (Decimal("1.128"), "USD"), (Decimal("1.1"), "ISK"), (Decimal("1.11"), "ISK"), (Decimal("5.123"), None), ], ) def test_validate_price_precision_raise_error(value, currency): with pytest.raises(ValidationError): validate_price_precision(value, currency) def test_validate_end_is_after_start_raise_error(): start_date = timezone.now() + timedelta(days=365) end_date = timezone.now() - timedelta(days=365) with pytest.raises(ValidationError) as error: validate_end_is_after_start(start_date, end_date) assert error.value.message == "End date cannot be before the start date." def test_validate_one_of_args_is_in_query(): assert validate_one_of_args_is_in_query("arg1", "present", "arg2", None) is None def test_validate_one_of_args_is_in_query_false_args(): with pytest.raises(GraphQLError) as error: validate_one_of_args_is_in_query("arg1", None, "arg2", "") assert ( error.value.message == "At least one of arguments is required: 'arg1', 'arg2'." ) def test_validate_one_of_args_is_in_query_more_than_one_true(): with pytest.raises(GraphQLError) as error: validate_one_of_args_is_in_query( "arg1", "present", "arg2", "present", "arg3", "present" ) assert ( error.value.message == "Argument 'arg1' cannot be combined with 'arg2', 'arg3'" ) def test_validate_one_of_args_is_in_query_single_arg(): assert validate_one_of_args_is_in_query("arg1", "present") is None def test_validate_one_of_args_is_in_query_single_arg_absent(): with pytest.raises(GraphQLError) as error: validate_one_of_args_is_in_query("arg1", None) is None assert error.value.message == "At least one of arguments is required: 'arg1'." def test_clean_seo_fields(): title = "lady title" description = "fantasy description" data = {"seo": {"title": title, "description": description}} clean_seo_fields(data) assert data["seo_title"] == title assert data["seo_description"] == description def test_clean_seo_fields_accepts_null(): data = {"seo": None} clean_seo_fields(data) assert not data @pytest.mark.parametrize( "cleaned_input", [ {"slug": None, "name": "test"}, {"slug": "", "name": "test"}, {"slug": ""}, {"slug": None}, ], ) def test_validate_slug_and_generate_if_needed_raises_errors(category, cleaned_input): with pytest.raises(ValidationError): validate_slug_and_generate_if_needed(category, "name", cleaned_input) @pytest.mark.parametrize( "cleaned_input", [{"slug": "test-slug"}, {"slug": "test-slug", "name": "test"}] ) def test_validate_slug_and_generate_if_needed_not_raises_errors( category, cleaned_input ): validate_slug_and_generate_if_needed(category, "name", cleaned_input) @pytest.mark.parametrize( "cleaned_input", [ {"slug": None, "name": "test"}, {"slug": "", "name": "test"}, ], ) def METHOD_NAME(cleaned_input): # given category = Category(name="test") previous_slug_value = cleaned_input["slug"] # when validate_slug_and_generate_if_needed(category, "name", cleaned_input) # then assert previous_slug_value != cleaned_input["slug"] assert cleaned_input["slug"] == cleaned_input["name"] @pytest.mark.parametrize( "cleaned_input", [ {"slug": ""}, {"slug": None}, {"slug": "test-slug"}, {"slug": "test-slug", "name": "test"}, ], ) def test_validate_slug_and_generate_if_needed_slug_not_changed(cleaned_input): # given category = Category(name="test") previous_slug_value = cleaned_input["slug"] # when validate_slug_and_generate_if_needed(category, "name", cleaned_input) # then assert cleaned_input["slug"] == previous_slug_value
2,019
get
# Copyright (c) Meta Platforms, Inc. and affiliates. from .request import Broker from .vocabulary import ThreatExchange as t from .vocabulary import ThreatExchangeMember as tem from .errors import pytxAttributeError class ThreatExchangeMember(object): _URL = t.URL + t.VERSION + t.THREAT_EXCHANGE_MEMBERS _internal = [ "_access_token", ] _fields = [ tem.ID, tem.NAME, tem.EMAIL, ] _default_fields = [ tem.ID, tem.NAME, tem.EMAIL, ] _unique = [] def __init__(self, **kwargs): """ Initialize the object. Set the _access_token and any attributes that were provided. """ for name, value in kwargs.items(): self.__setattr__(name, value) def __getattr__(self, attr): """ Get an attribute. If the attribute does not exist, return None """ if attr not in self._fields and attr not in self._internal: raise pytxAttributeError("%s is not a valid attribute" % attr) try: return object.__getattribute__(self, attr) except: return None def METHOD_NAME(self, attr): """ Wrapper around __getattr__ making it easier to use the vocabulary to get class attributes. :param attr: The name of the attribute to get. :type attr: str """ return self.__getattr__(attr) @classmethod def _get_generator( cls, url, to_dict=False, params=None, retries=None, headers=None, proxies=None, verify=None, ): """ Send the GET request and return a generator. :param url: The URL to send the GET request to. :type url: str :param to_dict: Return a dictionary instead of an instantiated class. :type to_dict: bool :param params: The GET parameters to send in the request. :type params: dict :param retries: Number of retries to fetch a page before stopping. :type retries: int :param headers: header info for requests. :type headers: dict :param proxies: proxy info for requests. :type proxies: dict :param verify: verify info for requests. :type verify: bool, str :returns: Generator, dict (using json.loads()) """ if not params: params = dict() members = Broker.METHOD_NAME( url, params=params, retries=retries, headers=headers, proxies=proxies, verify=verify, ).METHOD_NAME(t.DATA, []) total = len(members) if total == t.MIN_TOTAL: yield None else: for member in members: if to_dict: yield member else: yield Broker.get_new(cls, member) @classmethod def objects( cls, full_response=False, dict_generator=False, retries=None, headers=None, proxies=None, verify=None, ): """ Get a list of Threat Exchange Members :param full_response: Return the full response instead of the generator. Takes precedence over dict_generator. :type full_response: bool :param dict_generator: Return a dictionary instead of an instantiated object. :type dict_generator: bool :param retries: Number of retries to fetch a page before stopping. :type retries: int :param headers: header info for requests. :type headers: dict :param proxies: proxy info for requests. :type proxies: dict :param verify: verify info for requests. :type verify: bool, str :returns: Generator, dict (using json.loads()) """ if full_response: return Broker.METHOD_NAME( cls._URL, retries=retries, headers=headers, proxies=proxies, verify=verify, ) else: return cls._get_generator( cls._URL, to_dict=dict_generator, retries=retries, headers=headers, proxies=proxies, verify=verify, ) def to_dict(self): """ Convert this object into a dictionary. :returns: dict """ d = dict((n, getattr(self, n, None)) for n in self._fields) return d
2,020
combine frontiers
import itertools import random import sys import time import unittest import backend as F import dgl import networkx as nx import numpy as np import scipy.sparse as sp from utils import parametrize_idtype np.random.seed(42) def toset(x): # F.zerocopy_to_numpy may return a int return set(F.zerocopy_to_numpy(x).tolist()) @parametrize_idtype def test_bfs(idtype, n=100): def _bfs_nx(g_nx, src): edges = nx.bfs_edges(g_nx, src) layers_nx = [set([src])] edges_nx = [] frontier = set() edge_frontier = set() for u, v in edges: if u in layers_nx[-1]: frontier.add(v) edge_frontier.add(g.edge_ids(int(u), int(v))) else: layers_nx.append(frontier) edges_nx.append(edge_frontier) frontier = set([v]) edge_frontier = set([g.edge_ids(u, v)]) # avoids empty successors if len(frontier) > 0 and len(edge_frontier) > 0: layers_nx.append(frontier) edges_nx.append(edge_frontier) return layers_nx, edges_nx a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n)) g = dgl.from_scipy(a).astype(idtype) g_nx = g.to_networkx() src = random.choice(range(n)) layers_nx, _ = _bfs_nx(g_nx, src) layers_dgl = dgl.bfs_nodes_generator(g, src) assert len(layers_dgl) == len(layers_nx) assert all(toset(x) == y for x, y in zip(layers_dgl, layers_nx)) g_nx = nx.random_tree(n, seed=42) g = dgl.from_networkx(g_nx).astype(idtype) src = 0 _, edges_nx = _bfs_nx(g_nx, src) edges_dgl = dgl.bfs_edges_generator(g, src) assert len(edges_dgl) == len(edges_nx) assert all(toset(x) == y for x, y in zip(edges_dgl, edges_nx)) @parametrize_idtype def test_topological_nodes(idtype, n=100): a = sp.random(n, n, 3 / n, data_rvs=lambda n: np.ones(n)) b = sp.tril(a, -1).tocoo() g = dgl.from_scipy(b).astype(idtype) layers_dgl = dgl.topological_nodes_generator(g) adjmat = g.adj_external(transpose=True) def tensor_topo_traverse(): n = g.num_nodes() mask = F.copy_to(F.ones((n, 1)), F.cpu()) degree = F.spmm(adjmat, mask) while F.reduce_sum(mask) != 0.0: v = F.astype((degree == 0.0), F.float32) v = v * mask mask = mask - v frontier = F.copy_to(F.nonzero_1d(F.squeeze(v, 1)), F.cpu()) yield frontier degree -= F.spmm(adjmat, v) layers_spmv = list(tensor_topo_traverse()) assert len(layers_dgl) == len(layers_spmv) assert all(toset(x) == toset(y) for x, y in zip(layers_dgl, layers_spmv)) DFS_LABEL_NAMES = ["forward", "reverse", "nontree"] @parametrize_idtype def test_dfs_labeled_edges(idtype, example=False): dgl_g = dgl.graph([]).astype(idtype) dgl_g.add_nodes(6) dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5]) dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator( dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True ) dgl_edges = [toset(t) for t in dgl_edges] dgl_labels = [toset(t) for t in dgl_labels] g1_solutions = [ # edges labels [[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]], [[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]], ] g2_solutions = [ # edges labels [[3, 3, 4, 4], [0, 1, 0, 1]], [[4, 4, 3, 3], [0, 1, 0, 1]], ] def METHOD_NAME(sol): es, ls = zip(*sol) es = [ set(i for i in t if i is not None) for t in itertools.zip_longest(*es) ] ls = [ set(i for i in t if i is not None) for t in itertools.zip_longest(*ls) ] return es, ls for sol_set in itertools.product(g1_solutions, g2_solutions): es, ls = METHOD_NAME(sol_set) if es == dgl_edges and ls == dgl_labels: break else: assert False if __name__ == "__main__": test_bfs(idtype="int32") test_topological_nodes(idtype="int32") test_dfs_labeled_edges(idtype="int32")
2,021
test edit post belongs to thread and
from guardian.shortcuts import assign_perm from kitsune.forums.tests import ForumFactory, ForumTestCase, PostFactory, ThreadFactory from kitsune.sumo.tests import get, post from kitsune.users.tests import GroupFactory, UserFactory class BelongsTestCase(ForumTestCase): """ Mixing and matching thread, forum, and post data in URLs should fail. """ def test_posts_thread_belongs_to_forum(self): """Posts view - redirect if thread does not belong to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum r = get(self.client, "forums.posts", args=[f.slug, t.id]) self.assertEqual(200, r.status_code) u = r.redirect_chain[0][0] assert u.endswith(t.get_absolute_url()) def test_reply_thread_belongs_to_forum(self): """Reply action - thread belongs to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum u = UserFactory() self.client.login(username=u.username, password="testpass") r = post(self.client, "forums.reply", {}, args=[f.slug, t.id]) self.assertEqual(404, r.status_code) def test_locked_thread_belongs_to_forum(self): """Lock action - thread belongs to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum u = UserFactory() # Give the user the permission to lock threads. g = GroupFactory() g.user_set.add(u) assign_perm("forums.lock_forum_thread", g, f) assign_perm("forums.lock_forum_thread", g, t.forum) self.client.login(username=u.username, password="testpass") r = post(self.client, "forums.lock_thread", {}, args=[f.slug, t.id]) self.assertEqual(404, r.status_code) def test_sticky_thread_belongs_to_forum(self): """Sticky action - thread belongs to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum u = UserFactory() # Give the user the permission to sticky threads. g = GroupFactory() g.user_set.add(u) assign_perm("forums.sticky_forum_thread", g, f) assign_perm("forums.sticky_forum_thread", g, t.forum) self.client.login(username=u.username, password="testpass") r = post(self.client, "forums.sticky_thread", {}, args=[f.slug, t.id]) self.assertEqual(404, r.status_code) def test_edit_thread_belongs_to_forum(self): """Edit thread action - thread belongs to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum u = t.creator self.client.login(username=u.username, password="testpass") r = get(self.client, "forums.edit_thread", args=[f.slug, t.id]) self.assertEqual(404, r.status_code) def test_delete_thread_belongs_to_forum(self): """Delete thread action - thread belongs to forum.""" f = ForumFactory() t = ThreadFactory() # Thread belongs to a different forum u = UserFactory() # Give the user the permission to delete threads. g = GroupFactory() g.user_set.add(u) assign_perm("forums.delete_forum_thread", g, f) assign_perm("forums.delete_forum_thread", g, t.forum) self.client.login(username=u.username, password="testpass") r = get(self.client, "forums.delete_thread", args=[f.slug, t.id]) self.assertEqual(404, r.status_code) def METHOD_NAME(self): # Edit post action - post belongs to thread and thread belongs # to forum. f = ForumFactory() t = ThreadFactory(forum=f) # Post belongs to a different forum and thread. p = PostFactory() u = p.author self.client.login(username=u.username, password="testpass") # Post isn't in the passed forum: r = get(self.client, "forums.edit_post", args=[f.slug, p.thread.id, p.id]) self.assertEqual(404, r.status_code) # Post isn't in the passed thread: r = get(self.client, "forums.edit_post", args=[p.thread.forum.slug, t.id, p.id]) self.assertEqual(404, r.status_code) def test_delete_post_belongs_to_thread_and_forum(self): # Delete post action - post belongs to thread and thread # belongs to forum. f = ForumFactory() t = ThreadFactory(forum=f) # Post belongs to a different forum and thread. p = PostFactory() u = p.author # Give the user the permission to delete posts. g = GroupFactory() g.user_set.add(u) assign_perm("forums.delete_forum_thread_post", g, f) assign_perm("forums.delete_forum_thread_post", g, p.thread.forum) self.client.login(username=u.username, password="testpass") # Post isn't in the passed forum: r = get(self.client, "forums.delete_post", args=[f.slug, p.thread.id, p.id]) self.assertEqual(404, r.status_code) # Post isn't in the passed thread: r = get(self.client, "forums.delete_post", args=[p.thread.forum.slug, t.id, p.id]) self.assertEqual(404, r.status_code)
2,022
test stock rule buy payment mode
# Copyright 2013-2015 Tecnativa - Pedro M. Baeza # Copyright 2017 Tecnativa - Vicent Cubells # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from odoo import fields from odoo.tests import Form from odoo.addons.account_payment_purchase.tests.test_account_payment_purchase import ( TestAccountPaymentPurchase, ) class TestAccountPaymentPurchaseStock(TestAccountPaymentPurchase): def test_purchase_stock_order_invoicing(self): self.purchase.onchange_partner_id() self.purchase.button_confirm() picking = self.purchase.picking_ids[0] picking.action_confirm() picking.move_lines.write({"quantity_done": 1.0}) picking.button_validate() invoice = self.env["account.move"].create( {"partner_id": self.partner.id, "move_type": "in_invoice"} ) with Form(invoice) as inv: inv.purchase_id = self.purchase self.assertEqual( self.purchase.invoice_ids[0].payment_mode_id, self.payment_mode ) def test_picking_from_purchase_order_invoicing(self): # Test payment mode stockable_product = self.env["product.product"].create( {"name": "Test stockable product", "type": "product"} ) self.purchase.order_line[0].product_id = stockable_product self.purchase.button_confirm() picking = self.purchase.picking_ids[0] picking.action_confirm() picking.move_lines.write({"quantity_done": 1.0}) picking.button_validate() invoice = self.env["account.move"].create( {"partner_id": self.partner.id, "move_type": "in_invoice"} ) invoice.purchase_id = self.purchase invoice._onchange_purchase_auto_complete() self.assertEqual(invoice.payment_mode_id, self.payment_mode) purchase2 = self.purchase.copy() payment_mode2 = self.payment_mode.copy() purchase2.payment_mode_id = payment_mode2 purchase2.button_confirm() picking = purchase2.picking_ids[0] picking.action_confirm() picking.move_lines.write({"quantity_done": 1.0}) picking.button_validate() invoice.purchase_id = purchase2 result = invoice._onchange_purchase_auto_complete() self.assertEqual( result and result.get("warning", {}).get("title", False), "Warning" ) def test_picking_from_purchase_order_invoicing_bank(self): # Test partner_bank stockable_product = self.env["product.product"].create( {"name": "Test stockable product", "type": "product"} ) self.purchase.order_line[0].product_id = stockable_product self.purchase.supplier_partner_bank_id = self.bank self.purchase.button_confirm() picking = self.purchase.picking_ids[0] picking.action_confirm() picking.move_lines.write({"quantity_done": 1.0}) picking.button_validate() invoice = self.env["account.move"].create( {"partner_id": self.partner.id, "move_type": "in_invoice"} ) invoice.purchase_id = self.purchase invoice._onchange_purchase_auto_complete() self.assertEqual(invoice.partner_bank_id, self.bank) purchase2 = self.purchase.copy() purchase2.supplier_partner_bank_id = self.bank2 purchase2.button_confirm() picking = purchase2.picking_ids[0] picking.action_confirm() picking.move_lines.write({"quantity_done": 1.0}) picking.button_validate() invoice.purchase_id = purchase2 result = invoice._onchange_purchase_auto_complete() self.assertEqual( result and result.get("warning", {}).get("title", False), "Warning" ) def METHOD_NAME(self): route = self.env.ref("purchase_stock.route_warehouse0_buy") rule = self.env["stock.rule"].search([("route_id", "=", route.id)], limit=1) rule._run_buy( procurements=[ ( self.env["procurement.group"].Procurement( self.mto_product, 1, self.mto_product.uom_id, self.env["stock.location"].search([], limit=1), "Procurement order test", "Test", rule.company_id, { "company_id": rule.company_id, "date_planned": fields.Datetime.now(), }, ), rule, ) ] ) purchase = self.env["purchase.order"].search([("origin", "=", "Test")]) self.assertEqual(purchase.payment_mode_id, self.payment_mode)
2,023
init vars
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Regret Matching Approximate Nash Solver.""" from absl import logging # pylint:disable=unused-import import numpy as np class Solver(object): """Regret-matching Solver.""" def __init__(self, optimism=True, discount=False, rnd_init=False, seed=None, **kwargs): """Ctor.""" del kwargs self.num_players = None self.lrs = None self.optimism = optimism self.discount = discount self.rnd_init = rnd_init self.has_aux = True self.aux_errors = [] self.seed = seed self.random = np.random.RandomState(seed) def METHOD_NAME(self, num_strats, num_players): """Initialize solver parameters.""" self.num_players = num_players if self.rnd_init: init_dist = self.random.rand(num_strats) else: init_dist = np.ones(num_strats) init_dist /= init_dist.sum() init_regret = np.zeros(num_strats) return (init_dist, init_regret) def record_aux_errors(self, grads): """Record errors for the auxiliary variables.""" grad_regret = grads[1] self.aux_errors.append([np.linalg.norm(grad_regret)]) def compute_gradients(self, params, payoff_matrices): """Compute and return gradients (and exploitabilities) for all parameters. Args: params: tuple of params (dist, regret), see regmatch.gradients payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action Returns: tuple of gradients (grad_dist, grad_regret), see ate.gradients unregularized exploitability (stochastic estimate) solver exploitability (stochastic estimate) - NaN """ return gradients(*params, payoff_matrices) def exploitability(self, params, payoff_matrices): """Regret matching does not minimize any exploitability so return NaN. Args: params: tuple of params (dist,) payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action Returns: np.NaN """ del params del payoff_matrices return np.NaN def update(self, params, grads, t): """Update cumulative regret and strategy (dist). Args: params: tuple of variables to be updated (dist, regret) grads: tuple of variable gradients (grad_dist, grad_regret) t: int, solver iteration (not used) Returns: new_params: tuple of update params (new_dist, new_regret) """ dist, regret = params regret_delta = grads[1] if self.discount: gamma = t / float(t + 1) else: gamma = 1 new_regret = gamma * regret + regret_delta new_clipped_regrets = np.clip(new_regret + self.optimism * regret_delta, 0., np.inf) if np.sum(new_clipped_regrets) > 0: new_dist = new_clipped_regrets / new_clipped_regrets.sum() else: new_dist = np.ones_like(dist) / dist.size new_params = (new_dist, new_regret) return new_params def gradients(dist, regret, payoff_matrices): """Computes regret delta to be added to regret in update. Args: dist: 1-d np.array, current estimate of nash distribution regret: 1-d np.array (same shape as dist), current estimate of regrets payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action Returns: deltas w.r.t. (dist, regret) as tuple unregularized exploitability (stochastic estimate) solver exploitability (stochastic estimate) - NaN """ del regret nabla = payoff_matrices[0].dot(dist) utility = nabla.dot(dist) grad_dist = np.NaN * np.ones_like(dist) grad_regret = nabla - utility unreg_exp = np.max(nabla) - nabla.dot(dist) return (grad_dist, grad_regret), unreg_exp, np.NaN
2,024
callback wazuhdb response
# Copyright (C) 2015-2022, Wazuh Inc. # Created by Wazuh, Inc. <[email protected]>. # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 import functools import hashlib import json import logging import socket import sqlite3 import time from wazuh_testing.tools import GLOBAL_DB_PATH, WAZUH_DB_SOCKET_PATH from wazuh_testing.tools.monitoring import wazuh_pack, wazuh_unpack from wazuh_testing.tools.services import control_service def METHOD_NAME(item): if isinstance(item, tuple): data, response = item return response.decode() def mock_db(func): """Decorator used in any function that needs to mock a wazuh db This function will execute `func` after stopping wazuh-modulesd and wazuh-db. After that, it will start the daemons again Args: func (callable): function that will mock the cve.db Example: @vd.mock__db def mock_agent_status(request, agent_id, agent_status): """ @functools.wraps(func) def magic(*args, **kwargs): control_service('stop', daemon='wazuh-modulesd') func(*args, **kwargs) control_service('start', daemon='wazuh-modulesd') return magic def mock_agent( agent_id, name="centos8-agent", ip="127.0.0.1", register_ip="127.0.0.1", internal_key="", os_name="CentOS Linux", os_version="7.1", os_major="7", os_minor="1", os_codename="centos-8", os_build="4.18.0-147.8.1.el8_1.x86_64", os_platform="#1 SMP Thu Apr 9 13:49:54 UTC 2020", os_uname="x86_64", os_arch="x86_64", version="4.2", config_sum="", merged_sum="", manager_host="centos-8", node_name="node01", date_add="1612942494", last_keepalive="253402300799", group="", sync_status="synced", connection_status="active", client_key_secret=None): create_agent_query = f'''global sql INSERT OR REPLACE INTO AGENT (id, name, ip, register_ip, internal_key, os_name, os_version, os_major, os_minor, os_codename, os_build, os_platform, os_uname, os_arch, version, config_sum, merged_sum, manager_host, node_name, date_add, last_keepalive, "group", sync_status, connection_status) VALUES ( {agent_id}, "{name}", "{ip}", "{register_ip}", "{internal_key}", "{os_name}", "{os_version}", "{os_major}", "{os_minor}", "{os_codename}", "{os_build}", "{os_platform}", "{os_uname}", "{os_arch}", "{version}", "{config_sum}", "{merged_sum}", "{manager_host}", "{node_name}", "{date_add}", "{last_keepalive}", "{group}", "{sync_status}", "{connection_status}") ''' try: query_wdb(create_agent_query) except sqlite3.IntegrityError: logging.error("Failed to mock agent in database!") def load_db(db_path): """Load a database in db_path Args: db_path (str): path to the database """ conn = sqlite3.connect(db_path) cursor = conn.cursor() return conn, cursor @mock_db def run_query(db_query, db_path=GLOBAL_DB_PATH): """Method used to run sqlite queries on wazuh databases This function will execute the sqlite3 query `db_query` in `db_path` database. Args: db_query (string): sqlite3 valid query db_path (string): path to the database where the query will be run """ conn, _ = load_db(db_path) try: with conn: conn.execute(db_query) finally: conn.close() def get_query_result(query, db_path=GLOBAL_DB_PATH): """Return the result of a query in a specified DB Args: db_path (str): path to the database query (str): SQL query. (SELECT * ..) Returns: result (List[list]): each row is the query result row and each column is the query field value """ global cursor, db try: db, cursor = load_db(db_path) cursor.execute(query) records = cursor.fetchall() result = [] for row in records: result.append(', '.join([f'{item}' for item in row])) return result finally: cursor.close() db.close() def query_wdb(command): """Make queries to wazuh-db using the wdb socket. Args: command (str): wazuh-db command alias. For example `global get-agent-info 000`. Returns: list: Query response data """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(WAZUH_DB_SOCKET_PATH) data = [] try: sock.send(wazuh_pack(len(command)) + command.encode()) rcv = sock.recv(4) if len(rcv) == 4: data_len = wazuh_unpack(rcv) data = sock.recv(data_len).decode() # Remove response header and cast str to list of dictionaries # From --> 'ok [ {data1}, {data2}...]' To--> [ {data1}, data2}...] if len(data.split()) > 1 and data.split()[0] == 'ok': data = json.loads(' '.join(data.split(' ')[1:])) finally: sock.close() return data def clean_agents_from_db(): """ Clean agents from DB """ command = 'global sql DELETE FROM agent WHERE id != 0' try: query_wdb(command) except Exception: raise Exception('Unable to clean agents') def clean_groups_from_db(): """ Clean groups table from global.db """ command = 'global sql DELETE FROM "group"' try: query_wdb(command) except Exception: raise Exception('Unable to clean groups table.') def clean_belongs(): """ Clean belong table from global.db """ command = 'global sql DELETE FROM belongs' try: query_wdb(command) except Exception: raise Exception('Unable to clean belongs table.') def insert_agent_in_db(id=1, name='TestAgent', ip='any', registration_time=0, connection_status=0, disconnection_time=0): """ Write agent in global.db """ insert_command = f'global insert-agent {{"id":{id},"name":"{name}","ip":"{ip}","date_add":{registration_time}}}' update_command = f'global sql UPDATE agent SET connection_status = "{connection_status}",\ disconnection_time = "{disconnection_time}" WHERE id = {id};' try: query_wdb(insert_command) query_wdb(update_command) except Exception: raise Exception(f"Unable to add agent {id}") # Insert agents into DB and assign them into a group def insert_agent_into_group(total_agents): for i in range(total_agents): id = i + 1 name = 'Agent-test' + str(id) date = time.time() command = f'global insert-agent {{"id":{id},"name":"{name}","date_add":{date}}}' results = query_wdb(command) assert results == 'ok' command = f'''global set-agent-groups {{"mode":"append","sync_status":"syncreq", "source":"remote","data":[{{"id":{id},"groups":["Test_group{id}"]}}]}}''' results = query_wdb(command) assert results == 'ok' def remove_agent(agent_id): """Function that wraps the needed queries to remove an agent. Args: agent_id(int): Unique identifier of an agent """ data = query_wdb(f"global delete-agent {agent_id}").split() assert data[0] == 'ok', f"Unable to remove agent {agent_id} - {data[1]}" def calculate_global_hash(): """Function that calculates and retrieves the actual global groups hash. Returns: str: Actual global groups hash. """ GET_GROUP_HASH = '''global sql SELECT group_hash FROM agent WHERE id > 0 AND group_hash IS NOT NULL ORDER BY id''' result = query_wdb(GET_GROUP_HASH) group_hashes = [item['group_hash'] for item in result] return hashlib.sha1("".join(group_hashes).encode()).hexdigest()
2,025
autogenerate
#!/usr/bin/env python3 """ manager.py - Script which acts as the user interface for schema management. """ import argparse import json import os from schema_parser import LDAPSchemaParser from generator import SchemaGenerator localdir = os.path.dirname(os.path.abspath(__file__)) def generate(infile, schema_type=None, out_file=None): """Function generates the LDAP schema definitions from the JSON data Args: schema_type (str): The schema type to be generated (opendj) """ fp = open(infile, 'r') json_text = fp.read() fp.close() gen = SchemaGenerator(json_text) if schema_type == 'opendj': schema_str = gen.generate_ldif() else: schema_str = gen.generate_schema() if out_file: with open(out_file, 'w') as w: w.write(schema_str) else: print(schema_str) def METHOD_NAME(): """Function that generates the LDAP schemas for OpenDJ from the gluu_schema.json and custom_schema.json and puts them in their respective folders. """ opendj_folder = os.path.join(os.path.dirname(localdir), 'static/opendj/') fp = open(os.path.join(localdir, 'gluu_schema.json'), 'r') gluu_json = fp.read() fp.close() gen = SchemaGenerator(gluu_json) with open(os.path.join(opendj_folder, '101-ox.ldif'), 'w') as f: f.write(gen.generate_ldif()) fp = open(os.path.join(localdir, 'custom_schema.json'), 'r') custom_json = fp.read() fp.close() gen = SchemaGenerator(custom_json) with open(os.path.join(opendj_folder, '77-customAttributes.ldif'), 'w') \ as f: f.write(gen.generate_ldif()) def run_tests(): """Function that runs the unit tests of the scripts in this package. """ # TODO pass def make_json(filename): """Function that parses the input schema file and generates JSON. """ parser = LDAPSchemaParser(filename) definitions = parser.parse() schema_dict = {} objectclasses = [] attributetypes = [] for obj in definitions['objectClasses']: obcl = {} props = ['oid', 'names', 'desc', 'must', 'may', 'sup', 'x_origin'] for prop in props: if hasattr(obj, prop): if getattr(obj, prop): obcl[prop] = getattr(obj, prop) # obcl['obsolete'] = obj.obsolete if obj.kind == 0: obcl['kind'] = 'STRUCTURAL' elif obj.kind == 1: obcl['kind'] = 'ABSTRACT' elif obj.kind == 2: obcl['kind'] = 'AUXILIARY' objectclasses.append(obcl) for att in definitions['attributeTypes']: attype = {} props = ['oid', 'names', 'desc', 'equality', 'substr', 'ordering', 'syntax', 'x_origin'] for prop in props: if hasattr(att, prop): if getattr(att, prop): attype[prop] = getattr(att, prop) # attype['no_user_mod'] = att.no_user_mod # attype['single_value'] = att.single_value # attype['obsolete'] = att.obsolete attributetypes.append(attype) schema_dict['objectClasses'] = objectclasses schema_dict['attributeTypes'] = attributetypes schema_dict['oidMacros'] = definitions['oidMacros'] print(json.dumps(schema_dict, indent=4, sort_keys=True)) def make_schema_docs(): schema = os.path.join(localdir, 'gluu_schema.json') f = open(schema) json_string = f.read() f.close() data = json.loads(json_string) objClasses = data['objectClasses'] attTypes = data['attributeTypes'] docs = '' for obj_class in objClasses: docs += "\n\n## {}".format(" (or) ".join(obj_class['names'])) if 'desc' in obj_class: docs += "\n_{}_".format(obj_class['desc'].encode('utf-8')) for obj_attr in obj_class['may']: attr_docs_added = False for attr_type in attTypes: if obj_attr in attr_type['names']: docs += "\n* __{}__".format(" (or) ".join(attr_type['names'])) if 'desc' in attr_type: docs += ": {}".format(attr_type['desc'].encode('utf-8')) attr_docs_added = True break if not attr_docs_added: docs += "\n* __{}__".format(obj_attr) print(docs) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( "action", help="the action you want to perform.", choices=["autogenerate", "generate", "makejson", "makedocs", "test"]) parser.add_argument( "--type", help="the schema type you want to generate", choices=["opendj"]) parser.add_argument( "--filename", help="the input file for various actions") args = parser.parse_args() if args.action == 'generate': if args.filename: generate(args.filename, args.type) else: print("No JSON Input. Specify a JSON file with --filename") elif args.action == 'test': run_tests() elif args.action == 'makejson': if args.filename: make_json(args.filename) else: print("No Schema Input. Specify schema file with --filename") elif args.action == 'autogenerate': METHOD_NAME() elif args.action == 'makedocs': make_schema_docs()
2,026
concat
# Copyright (c) 2023 zfit import numpy as np import tensorflow as tf import zfit.z.numpy as znp SWITCH_ON = True def is_tensor(x): return tf.is_tensor(x) def has_tensor(x): return any(tf.is_tensor(t) for t in tf.nest.flatten(x)) def allclose_anyaware(x, y, rtol=1e-5, atol=1e-8): """Tests if x and y are close by first testing equality (with numpy), then within the limits. The prepended equality test allow for ANY objects to compare positively if the x and y have the shape (1, n) with n arbitrary Args: x: y: rtol: atol: Returns: """ if not SWITCH_ON or has_tensor([x, y]): return znp.all(znp.less_equal(znp.abs(x - y), znp.abs(y) * rtol + atol)) else: x = np.array(x) y = np.array(y) if any(ar.dtype == object for ar in (x, y)): from zfit.core.space import LimitRangeDefinition equal = [] for x1, y1 in zip(x[0], y[0]): if isinstance(x1, LimitRangeDefinition) or isinstance( y1, LimitRangeDefinition ): equal.append(x1 < y1 or x1 > y1) else: equal.append(np.allclose(x1, y1, rtol=rtol, atol=atol)) allclose = np.array(equal)[None, :] else: allclose = np.allclose(x, y, rtol=rtol, atol=atol) return allclose def broadcast_to(input, shape): if not SWITCH_ON or is_tensor(input): return tf.broadcast_to(input, shape) else: return np.broadcast_to(input, shape) def expand_dims(input, axis): if not SWITCH_ON or has_tensor(input): return znp.expand_dims(input, axis) else: return np.expand_dims(input, axis) def reduce_prod(input_tensor, axis=None, keepdims=None): if not SWITCH_ON or has_tensor(input_tensor): return znp.prod(input_tensor, axis, keepdims=keepdims) else: if keepdims is None: return np.prod(input_tensor, axis) else: return np.prod(input_tensor, axis, keepdims=keepdims) def equal(x, y): if not SWITCH_ON or is_tensor(x) or is_tensor(y): return znp.equal(x, y) else: return np.equal(x, y) def reduce_all(input_tensor, axis=None): if not SWITCH_ON or has_tensor(input_tensor): if axis is None: input_tensor = [ znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor) ] return znp.all(input_tensor, axis) else: out = np.all(input_tensor, axis) if out.shape == (1,): out = out[0] return out def reduce_any(input_tensor, axis=None): if not SWITCH_ON or has_tensor(input_tensor): if axis is None: input_tensor = [ znp.reshape(ar, (-1,)) for ar in tf.nest.flatten(input_tensor) ] return znp.any(input_tensor, axis) else: out = np.any(input_tensor, axis) if out.shape == (1,): out = out[0] return out def logical_and(x, y): if not SWITCH_ON or has_tensor(x) or has_tensor(y): return znp.logical_and(x, y) else: return np.logical_and(x, y) def logical_or(x, y): if not SWITCH_ON or has_tensor(x) or has_tensor(y): return znp.logical_or(x, y) else: return np.logical_or(x, y) def less_equal(x, y): if not SWITCH_ON or has_tensor(x) or has_tensor(y): return znp.less_equal(x, y) else: return np.less_equal(x, y) def greater_equal(x, y): if not SWITCH_ON or has_tensor(x) or has_tensor(y): return znp.greater_equal(x, y) else: return np.greater_equal(x, y) def gather(x, indices=None, axis=None): if not SWITCH_ON or has_tensor(x): return tf.gather(x, indices=indices, axis=axis) else: return np.take(x, indices=indices, axis=axis) def METHOD_NAME(values, axis): if not SWITCH_ON or has_tensor(values): return znp.concatenate(values, axis=axis) else: return np.concatenate(values, axis=axis) def _try_convert_numpy(tensorlike): if hasattr(tensorlike, "numpy"): tensorlike = tensorlike.numpy() if not isinstance(tensorlike, np.ndarray): from zfit.util.exception import CannotConvertToNumpyError raise CannotConvertToNumpyError( f"Cannot convert {tensorlike} to a Numpy array. This may be because the" f" object is a Tensor and the function is called in Graph mode (e.g. in" f"a `z.function` decorated function.\n" f"If this error appears and is not understandable, it is most likely a bug." f" Please open an issue on Github." ) return tensorlike
2,027
test rst
# Copyright 2023 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast from textwrap import dedent import pytest import yaml from mlrun.runtimes import funcdoc from tests.conftest import tests_root_directory def load_rst_cases(name): with open(tests_root_directory / "runtimes" / name) as fp: data = yaml.load(fp) for i, case in enumerate(data): name = case.get("name", "") tid = f"{i} - {name}" yield pytest.param(case["text"], case["expected"], id=tid) @pytest.mark.parametrize("text, expected", load_rst_cases("rst_cases.yml")) def METHOD_NAME(text, expected): doc, params, ret = funcdoc.parse_rst(text) assert expected["doc"].strip() == doc.strip(), "doc" assert expected["params"] == params, "params" assert expected["ret"] == ret, "ret" def is_ast_func(obj): return isinstance(obj, ast.FunctionDef) def ast_func(code): funcs = [s for s in ast.parse(code).body if is_ast_func(s)] assert len(funcs) == 1, f"{len(funcs)} functions in:\n{code}" return funcs[0] def eval_func(code): out = {} exec(code, None, out) funcs = [obj for obj in out.values() if callable(obj)] assert len(funcs) == 1, f"more than one function in:\n{code}" return funcs[0] info_handlers = [ (funcdoc.func_info, eval_func), (funcdoc.ast_func_info, ast_func), ] def load_info_cases(): with open(tests_root_directory / "runtimes" / "info_cases.yml") as fp: cases = yaml.load(fp) for case in cases: for info_fn, conv in info_handlers: obj = conv(case["code"]) tid = f'{case["id"]}-{info_fn.__name__}' expected = case["expected"].copy() # No line info in evaled functions if info_fn is funcdoc.func_info: expected["lineno"] = -1 yield pytest.param(info_fn, obj, expected, id=tid) @pytest.mark.parametrize("info_fn, obj, expected", load_info_cases()) def test_func_info(info_fn, obj, expected): out = info_fn(obj) assert expected == out find_handlers_code = """ def dec(n): return n - 1 # mlrun:handler def inc(n): return n + 1 """ find_handlers_expected = [ { "name": "inc", "doc": "", "return": funcdoc.param_dict(), "params": [funcdoc.param_dict("n")], "lineno": 6, "has_varargs": False, "has_kwargs": False, }, ] def test_find_handlers(): funcs = funcdoc.find_handlers(find_handlers_code) assert funcs == find_handlers_expected ast_code_cases = [ "{'x': 1, 'y': 2}", "dict(x=1, y=2)", "{}", "[1, 2]", "[]", "(1, 2)", "()", "{1, 2}", "set()", "Point(1, 2)", "3", "'hello'", "None", ] @pytest.mark.parametrize("expr", ast_code_cases) def test_ast_code(expr): node = ast.parse(expr).body[0].value code = funcdoc.ast_code(node) assert expr == code def test_ast_none(): code = """ def fn() -> None: pass """ fn: ast.FunctionDef = ast.parse(dedent(code)).body[0] funcdoc.ast_func_info(fn) @pytest.mark.parametrize( "func_code,expected_has_varargs,expected_has_kwargs", [ ( """ def fn(p1,p2,*args,**kwargs) -> None: pass """, True, True, ), ( """ def fn(p1,p2,*args) -> None: pass """, True, False, ), ( """ def fn(p1,p2,**kwargs) -> None: pass """, False, True, ), ( """ def fn(p1,p2) -> None: pass """, False, False, ), ( """ def fn(p1,p2,**something) -> None: pass """, False, True, ), ], ) def test_ast_func_info_with_kwargs_and_args( func_code, expected_has_varargs, expected_has_kwargs ): fn: ast.FunctionDef = ast.parse(dedent(func_code)).body[0] func_info = funcdoc.ast_func_info(fn) assert func_info["has_varargs"] == expected_has_varargs assert func_info["has_kwargs"] == expected_has_kwargs def test_ast_compound(): param_types = [] with open(f"{tests_root_directory}/runtimes/arc.txt") as fp: code = fp.read() # collect the types of the function parameters # assumes each param is in a new line for simplicity for line in code.splitlines()[3:15]: if ":" not in line: param_types.append(None) continue param_type = line[line.index(":") + 1 :] if "=" in param_type: param_type = param_type[: param_type.index("=")] param_type = param_type[:-1].strip() param_types.append(param_type) fn = ast_func(code) info = funcdoc.ast_func_info(fn) for i, param in enumerate(info["params"]): if i in (4, 8): continue assert ( param["type"] == param_types[i] ), f"param at index {i} has a bad type value. param: {param}" underscore_code = """ def info(message): _log('INFO', message) def warning(message): _log('WARNING', message) def _log(level, message): print(f'{level} - {message}') """ def test_ignore_underscore(): funcs = funcdoc.find_handlers(underscore_code) names = {fn["name"] for fn in funcs} assert {"info", "warning"} == names, "names" def test_annotate_mod(): code = """ import mlrun def handler(data: mlrun.DataItem): ... """ handlers = funcdoc.find_handlers(dedent(code)) param = handlers[0]["params"][0] assert param["type"] == "DataItem"
2,028
pause
"""Support for audio output The `audioio` module contains classes to provide access to audio IO. All classes change hardware state and should be deinitialized when they are no longer needed if the program continues after use. To do so, either call :py:meth:`!deinit` or use a context manager. See :ref:`lifetime-and-contextmanagers` for more info. For more information on working with this module, refer to the `CircuitPython Essentials Learn Guide <https://learn.adafruit.com/circuitpython-essentials/circuitpython-audio-out>`_. Since CircuitPython 5, `RawSample` and `WaveFile` are moved to :mod:`audiocore`, and `Mixer` is moved to :mod:`audiomixer`. For compatibility with CircuitPython 4.x, some builds allow the items in `audiocore` to be imported from `audioio`. This will be removed for all boards in a future build of CircuitPython.""" from __future__ import annotations from typing import Optional import circuitpython_typing import microcontroller class AudioOut: """Output an analog audio signal""" def __init__( self, left_channel: microcontroller.Pin, *, right_channel: Optional[microcontroller.Pin] = None, quiescent_value: int = 0x8000, ) -> None: """Create a AudioOut object associated with the given pin(s). This allows you to play audio signals out on the given pin(s). :param ~microcontroller.Pin left_channel: The pin to output the left channel to :param ~microcontroller.Pin right_channel: The pin to output the right channel to :param int quiescent_value: The output value when no signal is present. Samples should start and end with this value to prevent audible popping. Simple 8ksps 440 Hz sin wave:: import audiocore import audioio import board import array import time import math # Generate one period of sine wav. length = 8000 // 440 sine_wave = array.array("H", [0] * length) for i in range(length): sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15) dac = audioio.AudioOut(board.SPEAKER) sine_wave = audiocore.RawSample(sine_wave, sample_rate=8000) dac.play(sine_wave, loop=True) time.sleep(1) dac.stop() Playing a wave file from flash:: import board import audioio import digitalio # Required for CircuitPlayground Express speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE) speaker_enable.switch_to_output(value=True) data = open("cplay-5.1-16bit-16khz.wav", "rb") wav = audiocore.WaveFile(data) a = audioio.AudioOut(board.A0) print("playing") a.play(wav) while a.playing: pass print("stopped")""" ... def deinit(self) -> None: """Deinitialises the AudioOut and releases any hardware resources for reuse.""" ... def __enter__(self) -> AudioOut: """No-op used by Context Managers.""" ... def __exit__(self) -> None: """Automatically deinitializes the hardware when exiting a context. See :ref:`lifetime-and-contextmanagers` for more info.""" ... def play( self, sample: circuitpython_typing.AudioSample, *, loop: bool = False ) -> None: """Plays the sample once when loop=False and continuously when loop=True. Does not block. Use `playing` to block. Sample must be an `audiocore.WaveFile`, `audiocore.RawSample`, `audiomixer.Mixer` or `audiomp3.MP3Decoder`. The sample itself should consist of 16 bit samples. Microcontrollers with a lower output resolution will use the highest order bits to output. For example, the SAMD21 has a 10 bit DAC that ignores the lowest 6 bits when playing 16 bit samples.""" ... def stop(self) -> None: """Stops playback and resets to the start of the sample.""" ... playing: bool """True when an audio sample is being output even if `paused`. (read-only)""" def METHOD_NAME(self) -> None: """Stops playback temporarily while remembering the position. Use `resume` to resume playback.""" ... def resume(self) -> None: """Resumes sample playback after :py:func:`pause`.""" ... paused: bool """True when playback is paused. (read-only)"""
2,029
test rename fields check log remove only
from __future__ import annotations import logging from typing import TYPE_CHECKING import pytest from bentoml._internal.configuration.helpers import flatten_dict from bentoml._internal.configuration.helpers import is_valid_ip_address from bentoml._internal.configuration.helpers import load_config_file from bentoml._internal.configuration.helpers import rename_fields from bentoml.exceptions import BentoMLConfigException if TYPE_CHECKING: from pathlib import Path from _pytest.logging import LogCaptureFixture def test_flatten_dict(): assert dict(flatten_dict({"a": 1, "b": {"c": 2, "d": {"e": 3}}})) == { "a": 1, "b.c": 2, "b.d.e": 3, } assert dict( flatten_dict({"runners": {"iris_clf": {"nvidia.com/gpu": [0, 1]}}}) ) == {'runners.iris_clf."nvidia.com/gpu"': [0, 1]} assert dict(flatten_dict({"a": 1, "b": 2}, sep="_")) == {"a": 1, "b": 2} def test_rename_fields_field_in_dict(): # If given field is in the dictionary, it will be renamed d = {"a": 1, "b": 2} rename_fields(d, "a", "x") assert "a" not in d assert "x" in d assert d["x"] == 1 assert d["b"] == 2 def test_rename_fields_field_not_in_dict(): # If given field is not in the dictionary, nothing will happen d = {"a": 1, "b": 2} rename_fields(d, "c", "d") assert "a" in d assert "b" in d assert d["a"] == 1 assert d["b"] == 2 def test_rename_fields_remove_only(): # If given field is in the dictionary, and remove_only is True, it will be removed. d = {"a": 1, "b": 2} rename_fields(d, "a", remove_only=True) assert "a" not in d rename_fields(d, "b", remove_only=True) assert len(d) == 0 def test_rename_fields_check_log(caplog: LogCaptureFixture): d = {"api_server.port": 5000} with caplog.at_level(logging.WARNING): rename_fields(d, "api_server.port", "api_server.http.port") assert ( "Field 'api_server.port' is deprecated and has been renamed to 'api_server.http.port'" in caplog.text ) assert "api_server.http.port" in d and d["api_server.http.port"] == 5000 def METHOD_NAME(caplog: LogCaptureFixture): d = {"api_server.port": 5000} with caplog.at_level(logging.WARNING): rename_fields(d, "api_server.port", remove_only=True) assert "Field 'api_server.port' is deprecated and will be removed." in caplog.text assert len(d) == 0 def test_rename_fields_exception(): # If no replace_with field is given, an AssertionError will be raised d = {"api_server.port": 5000} with pytest.raises(AssertionError, match="'replace_with' must be provided."): rename_fields(d, "api_server.port") with pytest.raises(AssertionError, match="'replace_with' must be provided."): rename_fields(d, "api_server.port", remove_only=False) # If the given dictionary is not flattened, a ValueError will be raised d = {"a": 1, "b": {"c": 2}} with pytest.raises(ValueError, match="Given dictionary is not flattened. *"): rename_fields(d, "b.c", "b.d.c") # If the given dictionary is not flattened + no replace_with field is given, a ValueError will be raised d = {"a": 1, "b": {"c": 2}} with pytest.raises(ValueError, match="Given dictionary is not flattened. *"): rename_fields(d, "b.c") def test_valid_load_config_file(tmp_path: Path): config = tmp_path / "configuration.yaml" config.write_text("api_server:\n port: 5000") assert load_config_file(config.__fspath__()) == {"api_server": {"port": 5000}} def test_invalid_load_config_file(): with pytest.raises(BentoMLConfigException) as e: load_config_file("/tmp/nonexistent.yaml") assert "Configuration file /tmp/nonexistent.yaml not found." in str(e.value) with pytest.raises(BentoMLConfigException) as e: load_config_file("\\tmp\\invalid.yaml") assert "Configuration file \\tmp\\invalid.yaml not found." in str(e.value) def test_valid_ip_address(): assert is_valid_ip_address("0.0.0.0") assert is_valid_ip_address("192.192.192.192") assert is_valid_ip_address("255.255.255.255") def test_invalid_ip_address(): assert not is_valid_ip_address("asdfadsf:143") assert not is_valid_ip_address("asdfadsf") assert not is_valid_ip_address("0.0.0.0.0") assert not is_valid_ip_address("0.0.0.") assert not is_valid_ip_address(".0.0.0") assert not is_valid_ip_address("x.0.0.0") assert not is_valid_ip_address("255.255.255.256") assert not is_valid_ip_address("255.255.256.255") assert not is_valid_ip_address("255.256.255.255") assert not is_valid_ip_address("256.255.255.255") assert not is_valid_ip_address("256.256.256.256") assert not is_valid_ip_address("")
2,030
scale
#!/usr/bin/env python ############################################################################ # # MODULE: r.out.kde # AUTHOR(S): Anna Petrasova # # PURPOSE: # COPYRIGHT: (C) 2013 - 2019 by the GRASS Development Team # # This program is free software under the GNU General Public # License (>=v2). Read the file COPYING that comes with GRASS # for details. # ############################################################################# # %module # % description: Exports raster with variable transparency into an image file # % keyword: raster # % keyword: kernel density # % keyword: visualization # % keyword: transparency # % keyword: heatmap # %end # %option G_OPT_R_INPUT # % description: Raster map to be rendered with semi-transparency # %end # %option G_OPT_R_INPUT # % key: background # % description: Background raster map # %end # %option G_OPT_F_OUTPUT # % description: Rendered output file # %end # %option # % key: method # % type: string # % options: linear,logistic # % description: Method to scale transparency # %end import os import tempfile import atexit import shutil from math import exp import grass.script as gscript TMPRAST = [] TMPDIR = tempfile.mkdtemp() def cleanup(): gscript.run_command( "g.remove", name=",".join(TMPRAST), flags="f", type="raster", quiet=True ) shutil.rmtree(TMPDIR) def main(rinput, background, output, method): try: from PIL import Image except ImportError: gscript.fatal("Cannot import PIL." " Please install the Python pillow package.") if "@" in rinput: rinput = rinput.split("@")[0] suffix = "_" + os.path.basename(gscript.tempfile(False)) tmpname = rinput + suffix gscript.run_command("g.copy", raster=[rinput, tmpname]) TMPRAST.append(tmpname) gscript.run_command("r.colors", map=tmpname, color="grey") reg = gscript.region() width = reg["cols"] height = reg["rows"] fg_out = os.path.join(TMPDIR, "foreground.png") bg_out = os.path.join(TMPDIR, "background.png") intensity_tmp = os.path.join(TMPDIR, "intensity.png") gscript.run_command( "d.mon", start="cairo", output=fg_out, width=width, height=height, bgcolor="black", ) gscript.run_command("d.rast", map=rinput) gscript.run_command("d.mon", stop="cairo") # background gscript.run_command( "d.mon", start="cairo", output=bg_out, width=width, height=height ) gscript.run_command("d.rast", map=background) gscript.run_command("d.mon", stop="cairo") # greyscale gscript.run_command( "d.mon", start="cairo", output=intensity_tmp, width=width, height=height ) gscript.run_command("d.rast", map=tmpname) gscript.run_command("d.mon", stop="cairo") # put together with transparency foreground = Image.open(fg_out) background = Image.open(bg_out) intensity = Image.open(intensity_tmp) foreground = foreground.convert("RGBA") data_f = foreground.getdata() data_i = intensity.getdata() newData = [] for i in range(len(data_f)): intens = data_i[i][0] if intens == 0: newData.append((data_f[i][0], data_f[i][1], data_f[i][2], 0)) else: newData.append( ( data_f[i][0], data_f[i][1], data_f[i][2], METHOD_NAME(0, 255, intens, method), ) ) foreground.putdata(newData) background.paste(foreground, (0, 0), foreground) background.save(output) def METHOD_NAME(cmin, cmax, intens, method): # scale to 0 - 1 val = (intens - cmin) / float((cmax - cmin)) if method == "logistic": val = 1.0 / (1 + exp(-10 * (val - 0.5))) val *= 255 return int(val) if __name__ == "__main__": options, flags = gscript.parser() rinput = options["input"] bg = options["background"] output = options["output"] method = options["method"] atexit.register(cleanup) main(rinput, bg, output, method)
2,031
bctester
#!/usr/bin/env python3 # Copyright 2014 BitPay Inc. # Copyright 2016-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file LICENSE or http://www.opensource.org/licenses/mit-license.php. """Test framework for defi utils. Runs automatically during `make check`. Can also be run manually.""" import argparse # import binascii # TODO: (temp) it's used in bctest import configparser # import difflib # TODO: (temp) it's used in bctest import json import logging import os import pprint # import subprocess # TODO: (temp) it's used in bctest import sys def main(): config = configparser.ConfigParser() config.optionxform = str config.read_file( open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8") ) env_conf = dict(config.items("environment")) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-v", "--verbose", action="store_true") args = parser.parse_args() verbose = args.verbose if verbose: level = logging.DEBUG else: level = logging.ERROR formatter = "%(asctime)s - %(levelname)s - %(message)s" # Add the format/level to the logger logging.basicConfig(format=formatter, level=level) METHOD_NAME( os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "defi-util-test.json", env_conf, ) def METHOD_NAME(testDir, input_basename, buildenv): """Loads and parses the input file, runs all tests and reports results""" input_filename = os.path.join(testDir, input_basename) raw_data = open(input_filename, encoding="utf8").read() input_data = json.loads(raw_data) failed_testcases = [] for testObj in input_data: try: bctest(testDir, testObj, buildenv) logging.info("PASSED: " + testObj["description"]) except Exception: logging.info("FAILED: " + testObj["description"]) failed_testcases.append(testObj["description"]) if failed_testcases: error_message = "FAILED_TESTCASES:\n" error_message += pprint.pformat(failed_testcases, width=400) logging.error(error_message) sys.exit(1) else: sys.exit(0) def bctest(testDir, testObj, buildenv): """Runs a single test, comparing output and RC to expected output and RC. Raises an error if input can't be read, executable fails, or output/RC are not as expected. Error is caught by bctester() and reported. """ return # # Get the exec names and arguments # TODO: (temp) disable functional tests # execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"]) # execargs = testObj['args'] # execrun = [execprog] + execargs # # # Read the input data (if there is any) # stdinCfg = None # inputData = None # if "input" in testObj: # filename = os.path.join(testDir, testObj["input"]) # inputData = open(filename, encoding="utf8").read() # stdinCfg = subprocess.PIPE # # # Read the expected output data (if there is any) # outputFn = None # outputData = None # outputType = None # if "output_cmp" in testObj: # outputFn = testObj['output_cmp'] # outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare) # try: # outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read() # except: # logging.error("Output file " + outputFn + " can not be opened") # raise # if not outputData: # logging.error("Output data missing for " + outputFn) # raise Exception # if not outputType: # logging.error("Output file %s does not have a file extension" % outputFn) # raise Exception # # # Run the test # proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # try: # outs = proc.communicate(input=inputData) # except OSError: # logging.error("OSError, Failed to execute " + execprog) # raise # # if outputData: # data_mismatch, formatting_mismatch = False, False # # Parse command output and expected output # try: # a_parsed = parse_output(outs[0], outputType) # except Exception as e: # logging.error('Error parsing command output as %s: %s' % (outputType, e)) # raise # try: # b_parsed = parse_output(outputData, outputType) # except Exception as e: # logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e)) # raise # # Compare data # if a_parsed != b_parsed: # logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")") # data_mismatch = True # # Compare formatting # if outs[0] != outputData: # error_message = "Output formatting mismatch for " + outputFn + ":\n" # error_message += "".join(difflib.context_diff(outputData.splitlines(True), # outs[0].splitlines(True), # fromfile=outputFn, # tofile="returned")) # logging.error(error_message) # formatting_mismatch = True # # assert not data_mismatch and not formatting_mismatch # # # Compare the return code to the expected return code # wantRC = 0 # if "return_code" in testObj: # wantRC = testObj['return_code'] # if proc.returncode != wantRC: # logging.error("Return code mismatch for " + outputFn) # raise Exception # # if "error_txt" in testObj: # want_error = testObj["error_txt"] # # Compare error text # # TODO: ideally, we'd compare the strings exactly and also assert # # That stderr is empty if no errors are expected. However, defi-tx # # emits DISPLAY errors when running as a windows application on # # linux through wine. Just assert that the expected error text appears # # somewhere in stderr. # if want_error not in outs[1]: # logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip()) # raise Exception # # def parse_output(a, fmt): # """Parse the output according to specified format. # # Raise an error if the output can't be parsed.""" # if fmt == 'json': # json: compare parsed data # return json.loads(a) # elif fmt == 'hex': # hex: parse and compare binary data # return binascii.a2b_hex(a.strip()) # else: # raise NotImplementedError("Don't know how to compare %s" % fmt) if __name__ == "__main__": main()
2,032
group hashes
import time import uuid import pytest from sentry.event_manager import _save_aggregate from sentry.eventstore.models import Event from sentry.grouping.result import CalculatedHashes from sentry.models import Group, GroupHash from sentry.testutils.pytest.fixtures import django_db_all @pytest.fixture def fast_save(default_project, task_runner): def inner(last_frame): data = {"timestamp": time.time(), "type": "error"} evt = Event( default_project.id, uuid.uuid4().hex, data=data, ) with task_runner(): return _save_aggregate( evt, hashes=CalculatedHashes( hashes=["a" * 32, "b" * 32], hierarchical_hashes=["c" * 32, "d" * 32, "e" * 32, last_frame * 32], tree_labels=[ [ { "function": "foo", "package": "", "is_sentinel": False, "is_prefix": False, "datapath": "", } ], [ { "function": "bar", "package": "", "is_sentinel": False, "is_prefix": False, "datapath": "", } ], [ { "function": "baz", "package": "", "is_sentinel": False, "is_prefix": False, "datapath": "", } ], [ { "function": "bam", "package": "", "is_sentinel": False, "is_prefix": False, "datapath": "", } ], ], ), release=None, metadata={}, received_timestamp=0, level=10, culprit="", ) return inner def METHOD_NAME(group_id): return {gh.hash for gh in GroupHash.objects.filter(group_id=group_id)} def _assoc_hash(group, hash): gh = GroupHash.objects.get_or_create(project=group.project, hash=hash)[0] assert gh.group is None or gh.group.id != group.id gh.group = group gh.save() @django_db_all def test_move_all_events(default_project, fast_save): group_info = fast_save("f") assert group_info.is_new assert not group_info.is_regression new_group_info = fast_save("f") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group_info.group.id _assoc_hash(group_info.group, "a" * 32) _assoc_hash(group_info.group, "b" * 32) assert METHOD_NAME(group_info.group.id) == {"a" * 32, "b" * 32, "c" * 32} assert Group.objects.get(id=new_group_info.group.id).title == "foo" # simulate split operation where all events of group are moved into a more specific hash GroupHash.objects.filter(group=group_info.group).delete() GroupHash.objects.create(project=default_project, hash="f" * 32, group_id=group_info.group.id) new_group_info = fast_save("f") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group_info.group.id assert {g.hash for g in GroupHash.objects.filter(group=group_info.group)} == { # one hierarchical hash associated # no flat hashes associated when sorting into split group! "f" * 32, } assert Group.objects.get(id=new_group_info.group.id).title == "bam" new_group_info = fast_save("g") assert new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id != group_info.group.id assert METHOD_NAME(new_group_info.group.id) == {"c" * 32} assert Group.objects.get(id=new_group_info.group.id).title == "foo" @django_db_all def test_partial_move(default_project, fast_save): group_info = fast_save("f") assert group_info.is_new assert not group_info.is_regression new_group_info = fast_save("g") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group_info.group.id assert METHOD_NAME(group_info.group.id) == {"c" * 32} # simulate split operation where event "f" of group is moved into a more specific hash group2 = Group.objects.create(project=default_project) f_hash = GroupHash.objects.create(project=default_project, hash="f" * 32, group_id=group2.id) new_group_info = fast_save("f") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group2.id assert METHOD_NAME(new_group_info.group.id) == { # one hierarchical hash associated # no flat hashes associated when sorting into split group! "f" * 32, } new_group_info = fast_save("g") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group_info.group.id assert METHOD_NAME(new_group_info.group.id) == { "c" * 32, } f_hash.delete() new_group_info = fast_save("f") assert not new_group_info.is_new assert not new_group_info.is_regression assert new_group_info.group.id == group_info.group.id
2,033
event
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 Raphael Michel and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # from datetime import datetime, time from zoneinfo import ZoneInfo import pytest from django_scopes import scope from pretix.base.models import Event, Organizer from pretix.base.reldate import RelativeDate, RelativeDateWrapper TOKYO = ZoneInfo('Asia/Tokyo') BERLIN = ZoneInfo('Europe/Berlin') @pytest.fixture def METHOD_NAME(): o = Organizer.objects.create(name='Dummy', slug='dummy') METHOD_NAME = Event.objects.create( organizer=o, name='Dummy', slug='dummy', date_from=datetime(2017, 12, 27, 5, 0, 0, tzinfo=TOKYO), presale_start=datetime(2017, 12, 1, 5, 0, 0, tzinfo=TOKYO), plugins='pretix.plugins.banktransfer' ) METHOD_NAME.settings.timezone = "Asia/Tokyo" return METHOD_NAME @pytest.mark.django_db def test_absolute_date(METHOD_NAME): d = datetime(2017, 12, 25, 5, 0, 0, tzinfo=TOKYO) rdw = RelativeDateWrapper(d) assert rdw.datetime(METHOD_NAME) == d assert rdw.to_string() == d.isoformat() @pytest.mark.django_db def test_relative_date_without_time(METHOD_NAME): rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None)) assert rdw.datetime(METHOD_NAME).astimezone(TOKYO) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO) assert rdw.to_string() == 'RELDATE/1/-/date_from/' @pytest.mark.django_db def test_relative_date_other_base_point(METHOD_NAME): with scope(organizer=METHOD_NAME.organizer): rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None)) assert rdw.datetime(METHOD_NAME) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO) assert rdw.to_string() == 'RELDATE/1/-/presale_start/' # presale_end is unset, defaults to date_from rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None)) assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 5, 0, 0, tzinfo=TOKYO) assert rdw.to_string() == 'RELDATE/1/-/presale_end/' # subevent base se = METHOD_NAME.subevents.create(name="SE1", date_from=datetime(2017, 11, 27, 5, 0, 0, tzinfo=TOKYO)) rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None)) assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO) # presale_start is unset on subevent, default to event rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_start', minutes_before=None)) assert rdw.datetime(se) == datetime(2017, 11, 30, 5, 0, 0, tzinfo=TOKYO) # presale_end is unset on all, default to date_from of subevent rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=None, base_date_name='presale_end', minutes_before=None)) assert rdw.datetime(se) == datetime(2017, 11, 26, 5, 0, 0, tzinfo=TOKYO) @pytest.mark.django_db def test_relative_date_in_minutes(METHOD_NAME): rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60)) assert rdw.to_string() == 'RELDATE/minutes/60/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 27, 4, 0, 0, tzinfo=TOKYO) @pytest.mark.django_db def test_relative_date_with_time(METHOD_NAME): rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(8, 5, 13), base_date_name='date_from', minutes_before=None)) assert rdw.to_string() == 'RELDATE/1/08:05:13/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2017, 12, 26, 8, 5, 13, tzinfo=TOKYO) @pytest.mark.django_db def test_relative_date_with_time_around_dst(METHOD_NAME): METHOD_NAME.settings.timezone = "Europe/Berlin" METHOD_NAME.date_from = datetime(2020, 3, 29, 18, 0, 0, tzinfo=BERLIN) rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None)) assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 28, 18, 0, 0, tzinfo=BERLIN) rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None)) assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2020, 3, 29, 2, 30, 0, tzinfo=BERLIN) METHOD_NAME.date_from = datetime(2020, 10, 25, 18, 0, 0, tzinfo=BERLIN) rdw = RelativeDateWrapper(RelativeDate(days_before=1, time=time(18, 0, 0), base_date_name='date_from', minutes_before=None)) assert rdw.to_string() == 'RELDATE/1/18:00:00/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 24, 18, 0, 0, tzinfo=BERLIN) rdw = RelativeDateWrapper(RelativeDate(days_before=0, time=time(2, 30, 0), base_date_name='date_from', minutes_before=None)) assert rdw.to_string() == 'RELDATE/0/02:30:00/date_from/' assert rdw.datetime(METHOD_NAME) == datetime(2020, 10, 25, 2, 30, 0, tzinfo=BERLIN) def test_unserialize(): d = datetime(2017, 12, 25, 10, 0, 0, tzinfo=TOKYO) rdw = RelativeDateWrapper.from_string(d.isoformat()) assert rdw.data == d rdw = RelativeDateWrapper.from_string('RELDATE/1/-/date_from/') assert rdw.data == RelativeDate(days_before=1, time=None, base_date_name='date_from', minutes_before=None) rdw = RelativeDateWrapper.from_string('RELDATE/1/18:05:13/date_from/') assert rdw.data == RelativeDate(days_before=1, time=time(18, 5, 13), base_date_name='date_from', minutes_before=None) rdw = RelativeDateWrapper.from_string('RELDATE/minutes/60/date_from/') assert rdw.data == RelativeDate(days_before=0, time=None, base_date_name='date_from', minutes_before=60)
2,034
test set owner
from django.test import TestCase from django.utils import timezone from ...categories.models import Category from ...users.test import create_test_user from ..models import Post, Thread, ThreadParticipant from ..participants import ( add_participants, has_participants, make_participants_aware, set_owner, set_users_unread_private_threads_sync, ) class ParticipantsTests(TestCase): def setUp(self): datetime = timezone.now() self.category = Category.objects.all_categories()[:1][0] self.thread = Thread( category=self.category, started_on=datetime, starter_name="Tester", starter_slug="tester", last_post_on=datetime, last_poster_name="Tester", last_poster_slug="tester", ) self.thread.set_title("Test thread") self.thread.save() post = Post.objects.create( category=self.category, thread=self.thread, poster_name="Tester", original="Hello! I am test message!", parsed="<p>Hello! I am test message!</p>", checksum="nope", posted_on=datetime, updated_on=datetime, ) self.thread.first_post = post self.thread.last_post = post self.thread.save() def test_has_participants(self): """has_participants returns true if thread has participants""" users = [ create_test_user("User", "[email protected]"), create_test_user("Other_User", "[email protected]"), ] self.assertFalse(has_participants(self.thread)) ThreadParticipant.objects.add_participants(self.thread, users) self.assertTrue(has_participants(self.thread)) self.thread.threadparticipant_set.all().delete() self.assertFalse(has_participants(self.thread)) def test_make_threads_participants_aware(self): """ make_participants_aware sets participants_list and participant annotations on list of threads """ user = create_test_user("User", "[email protected]") other_user = create_test_user("Other_User", "[email protected]") self.assertFalse(hasattr(self.thread, "participants_list")) self.assertFalse(hasattr(self.thread, "participant")) make_participants_aware(user, [self.thread]) self.assertFalse(hasattr(self.thread, "participants_list")) self.assertTrue(hasattr(self.thread, "participant")) self.assertIsNone(self.thread.participant) ThreadParticipant.objects.set_owner(self.thread, user) ThreadParticipant.objects.add_participants(self.thread, [other_user]) make_participants_aware(user, [self.thread]) self.assertFalse(hasattr(self.thread, "participants_list")) self.assertEqual(self.thread.participant.user, user) def test_make_thread_participants_aware(self): """ make_participants_aware sets participants_list and participant annotations on thread model """ user = create_test_user("User", "[email protected]") other_user = create_test_user("Other_User", "[email protected]") self.assertFalse(hasattr(self.thread, "participants_list")) self.assertFalse(hasattr(self.thread, "participant")) make_participants_aware(user, self.thread) self.assertTrue(hasattr(self.thread, "participants_list")) self.assertTrue(hasattr(self.thread, "participant")) self.assertEqual(self.thread.participants_list, []) self.assertIsNone(self.thread.participant) ThreadParticipant.objects.set_owner(self.thread, user) ThreadParticipant.objects.add_participants(self.thread, [other_user]) make_participants_aware(user, self.thread) self.assertEqual(self.thread.participant.user, user) for participant in self.thread.participants_list: if participant.user == user: break else: self.fail("thread.participants_list didn't contain user") def METHOD_NAME(self): """set_owner sets user as thread owner""" user = create_test_user("User", "[email protected]") set_owner(self.thread, user) owner = self.thread.threadparticipant_set.get(is_owner=True) self.assertEqual(user, owner.user) def test_set_users_unread_private_threads_sync(self): """ set_users_unread_private_threads_sync sets sync_unread_private_threads flag on users provided to true """ users = [ create_test_user("User", "[email protected]"), create_test_user("Other_User", "[email protected]"), ] set_users_unread_private_threads_sync(users=users) for user in users: user.refresh_from_db() assert user.sync_unread_private_threads def test_set_participants_unread_private_threads_sync(self): """ set_users_unread_private_threads_sync sets sync_unread_private_threads flag on participants provided to true """ users = [ create_test_user("User", "[email protected]"), create_test_user("Other_User", "[email protected]"), ] participants = [ThreadParticipant(user=u) for u in users] set_users_unread_private_threads_sync(participants=participants) for user in users: user.refresh_from_db() assert user.sync_unread_private_threads def test_set_participants_users_unread_private_threads_sync(self): """ set_users_unread_private_threads_sync sets sync_unread_private_threads flag on users and participants provided to true """ users = [create_test_user("User", "[email protected]")] participants = [ThreadParticipant(user=u) for u in users] users.append(create_test_user("Other_User", "[email protected]")) set_users_unread_private_threads_sync(users=users, participants=participants) for user in users: user.refresh_from_db() assert user.sync_unread_private_threads def test_set_users_unread_private_threads_sync_exclude_user(self): """exclude_user kwarg works""" users = [ create_test_user("User", "[email protected]"), create_test_user("Other_User", "[email protected]"), ] set_users_unread_private_threads_sync(users=users, exclude_user=users[0]) [i.refresh_from_db() for i in users] assert users[0].sync_unread_private_threads is False assert users[1].sync_unread_private_threads def test_set_users_unread_private_threads_sync_noop(self): """excluding only user is noop""" user = create_test_user("User", "[email protected]") with self.assertNumQueries(0): set_users_unread_private_threads_sync(users=[user], exclude_user=user) user.refresh_from_db() assert user.sync_unread_private_threads is False def test_add_participants_triggers_notify_on_new_private_thread( mocker, user, other_user, private_thread ): notify_on_new_private_thread_mock = mocker.patch( "misago.threads.participants.notify_on_new_private_thread" ) add_participants(user, private_thread, [user, other_user]) notify_on_new_private_thread_mock.delay.assert_called_once_with( user.id, private_thread.id, [other_user.id] )
2,035
delete host
""" Support for RFC 2136 dynamic DNS updates. :depends: - dnspython Python module :configuration: If you want to use TSIG authentication for the server, there are a couple of optional configuration parameters made available to support this (the keyname is only needed if the keyring contains more than one key):: keyfile: keyring file (default=None) keyname: key name in file (default=None) keyalgorithm: algorithm used to create the key (default='HMAC-MD5.SIG-ALG.REG.INT'). Other possible values: hmac-sha1, hmac-sha224, hmac-sha256, hmac-sha384, hmac-sha512 The keyring file needs to be in json format and the key name needs to end with an extra period in the file, similar to this: .. code-block:: json {"keyname.": "keycontent"} """ import logging import salt.utils.files import salt.utils.json log = logging.getLogger(__name__) try: import dns.query import dns.tsigkeyring # pylint: disable=no-name-in-module import dns.update # pylint: disable=no-name-in-module dns_support = True except ImportError as e: dns_support = False def __virtual__(): """ Confirm dnspython is available. """ if dns_support: return "ddns" return ( False, "The ddns execution module cannot be loaded: dnspython not installed.", ) def _config(name, key=None, **kwargs): """ Return a value for 'name' from command line args then config file options. Specify 'key' if the config file option is not the same as 'name'. """ if key is None: key = name if name in kwargs: value = kwargs[name] else: value = __salt__["config.option"]("ddns.{}".format(key)) if not value: value = None return value def _get_keyring(keyfile): keyring = None if keyfile: with salt.utils.files.fopen(keyfile) as _f: keyring = dns.tsigkeyring.from_text(salt.utils.json.load(_f)) return keyring def add_host( zone, name, ttl, ip, nameserver="127.0.0.1", replace=True, timeout=5, port=53, **kwargs ): """ Add, replace, or update the A and PTR (reverse) records for a host. CLI Example: .. code-block:: bash salt ns1 ddns.add_host example.com host1 60 10.1.1.1 """ res = update(zone, name, ttl, "A", ip, nameserver, timeout, replace, port, **kwargs) if res is False: return False fqdn = "{}.{}.".format(name, zone) parts = ip.split(".")[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = "{}.{}".format(".".join(parts), "in-addr.arpa.") name = ".".join(popped) ptr = update( zone, name, ttl, "PTR", fqdn, nameserver, timeout, replace, port, **kwargs ) if ptr: return True return res def METHOD_NAME(zone, name, nameserver="127.0.0.1", timeout=5, port=53, **kwargs): """ Delete the forward and reverse records for a host. Returns true if any records are deleted. CLI Example: .. code-block:: bash salt ns1 ddns.delete_host example.com host1 """ fqdn = "{}.{}".format(name, zone) request = dns.message.make_query(fqdn, "A") answer = dns.query.udp(request, nameserver, timeout, port) try: ips = [i.address for i in answer.answer[0].items] except IndexError: ips = [] res = delete( zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs ) fqdn = fqdn + "." for ip in ips: parts = ip.split(".")[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = "{}.{}".format(".".join(parts), "in-addr.arpa.") name = ".".join(popped) ptr = delete( zone, name, "PTR", fqdn, nameserver=nameserver, timeout=timeout, port=port, **kwargs ) if ptr: res = True return res def update( zone, name, ttl, rdtype, data, nameserver="127.0.0.1", timeout=5, replace=False, port=53, **kwargs ): """ Add, replace, or update a DNS record. nameserver must be an IP address and the minion running this module must have update privileges on that server. If replace is true, first deletes all records for this name and type. CLI Example: .. code-block:: bash salt ns1 ddns.update example.com host1 60 A 10.0.0.1 """ name = str(name) if name[-1:] == ".": fqdn = name else: fqdn = "{}.{}".format(name, zone) request = dns.message.make_query(fqdn, rdtype) answer = dns.query.udp(request, nameserver, timeout, port) rdtype = dns.rdatatype.from_text(rdtype) rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data) keyring = _get_keyring(_config("keyfile", **kwargs)) keyname = _config("keyname", **kwargs) keyalgorithm = _config("keyalgorithm", **kwargs) or "HMAC-MD5.SIG-ALG.REG.INT" is_exist = False for rrset in answer.answer: if rdata in rrset.items: if ttl == rrset.ttl: if len(answer.answer) >= 1 or len(rrset.items) >= 1: is_exist = True break dns_update = dns.update.Update( zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm ) if replace: dns_update.replace(name, ttl, rdata) elif not is_exist: dns_update.add(name, ttl, rdata) else: return None answer = dns.query.udp(dns_update, nameserver, timeout, port) if answer.rcode() > 0: return False return True def delete( zone, name, rdtype=None, data=None, nameserver="127.0.0.1", timeout=5, port=53, **kwargs ): """ Delete a DNS record. CLI Example: .. code-block:: bash salt ns1 ddns.delete example.com host1 A """ name = str(name) if name[-1:] == ".": fqdn = name else: fqdn = "{}.{}".format(name, zone) request = dns.message.make_query(fqdn, (rdtype or "ANY")) answer = dns.query.udp(request, nameserver, timeout, port) if not answer.answer: return None keyring = _get_keyring(_config("keyfile", **kwargs)) keyname = _config("keyname", **kwargs) keyalgorithm = _config("keyalgorithm", **kwargs) or "HMAC-MD5.SIG-ALG.REG.INT" dns_update = dns.update.Update( zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm ) if rdtype: rdtype = dns.rdatatype.from_text(rdtype) if data: rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data) dns_update.delete(name, rdata) else: dns_update.delete(name, rdtype) else: dns_update.delete(name) answer = dns.query.udp(dns_update, nameserver, timeout, port) if answer.rcode() > 0: return False return True
2,036
repeats cmp
""" Comparison utilities for STIX pattern observation expressions. """ from stix2.equivalence.pattern.compare import generic_cmp, iter_lex_cmp from stix2.equivalence.pattern.compare.comparison import ( comparison_expression_cmp, generic_constant_cmp, ) from stix2.patterns import ( AndObservationExpression, FollowedByObservationExpression, ObservationExpression, OrObservationExpression, QualifiedObservationExpression, RepeatQualifier, StartStopQualifier, WithinQualifier, _CompoundObservationExpression, ) _OBSERVATION_EXPRESSION_TYPE_ORDER = ( ObservationExpression, AndObservationExpression, OrObservationExpression, FollowedByObservationExpression, QualifiedObservationExpression, ) _QUALIFIER_TYPE_ORDER = ( RepeatQualifier, WithinQualifier, StartStopQualifier, ) def METHOD_NAME(qual1, qual2): """ Compare REPEATS qualifiers. This orders by repeat count. """ return generic_constant_cmp(qual1.times_to_repeat, qual2.times_to_repeat) def within_cmp(qual1, qual2): """ Compare WITHIN qualifiers. This orders by number of seconds. """ return generic_constant_cmp( qual1.number_of_seconds, qual2.number_of_seconds, ) def startstop_cmp(qual1, qual2): """ Compare START/STOP qualifiers. This lexicographically orders by start time, then stop time. """ return iter_lex_cmp( (qual1.start_time, qual1.stop_time), (qual2.start_time, qual2.stop_time), generic_constant_cmp, ) _QUALIFIER_COMPARATORS = { RepeatQualifier: METHOD_NAME, WithinQualifier: within_cmp, StartStopQualifier: startstop_cmp, } def observation_expression_cmp(expr1, expr2): """ Compare two observation expression ASTs. This is sensitive to the order of the expressions' sub-components. To achieve an order-insensitive comparison, the sub-component ASTs must be ordered first. Args: expr1: The first observation expression expr2: The second observation expression Returns: <0, 0, or >0 depending on whether the first arg is less, equal or greater than the second """ type1 = type(expr1) type2 = type(expr2) type1_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type1) type2_idx = _OBSERVATION_EXPRESSION_TYPE_ORDER.index(type2) if type1_idx != type2_idx: result = generic_cmp(type1_idx, type2_idx) # else, both exprs are of same type. # If they're simple, use contained comparison expression order elif type1 is ObservationExpression: result = comparison_expression_cmp( expr1.operand, expr2.operand, ) elif isinstance(expr1, _CompoundObservationExpression): # Both compound, and of same type (and/or/followedby): sort according # to contents. result = iter_lex_cmp( expr1.operands, expr2.operands, observation_expression_cmp, ) else: # QualifiedObservationExpression # Both qualified. Check qualifiers first; if they are the same, # use order of the qualified expressions. qual1_type = type(expr1.qualifier) qual2_type = type(expr2.qualifier) qual1_type_idx = _QUALIFIER_TYPE_ORDER.index(qual1_type) qual2_type_idx = _QUALIFIER_TYPE_ORDER.index(qual2_type) result = generic_cmp(qual1_type_idx, qual2_type_idx) if result == 0: # Same qualifier type; compare qualifier details qual_cmp = _QUALIFIER_COMPARATORS.get(qual1_type) if qual_cmp: result = qual_cmp(expr1.qualifier, expr2.qualifier) else: raise TypeError( "Can't compare qualifier type: " + qual1_type.__name__, ) if result == 0: # Same qualifier type and details; use qualified expression order result = observation_expression_cmp( expr1.observation_expression, expr2.observation_expression, ) return result
2,037
function returning generator
# pylint: disable=too-few-public-methods,import-error, missing-docstring # pylint: disable=useless-super-delegation,wrong-import-position,invalid-name, wrong-import-order, condition-evals-to-constant if len('TEST'): # [use-implicit-booleaness-not-len] pass if not len('TEST'): # [use-implicit-booleaness-not-len] pass z = [] if z and len(['T', 'E', 'S', 'T']): # [use-implicit-booleaness-not-len] pass if True or len('TEST'): # [use-implicit-booleaness-not-len] pass if len('TEST') == 0: # Should be fine pass if len('TEST') < 1: # Should be fine pass if len('TEST') <= 0: # Should be fine pass if 1 > len('TEST'): # Should be fine pass if 0 >= len('TEST'): # Should be fine pass if z and len('TEST') == 0: # Should be fine pass if 0 == len('TEST') < 10: # Should be fine pass # Should be fine if 0 < 1 <= len('TEST') < 10: # [comparison-of-constants] pass if 10 > len('TEST') != 0: # Should be fine pass if 10 > len('TEST') > 1 > 0: # Should be fine pass if 0 <= len('TEST') < 100: # Should be fine pass if z or 10 > len('TEST') != 0: # Should be fine pass if z: pass elif len('TEST'): # [use-implicit-booleaness-not-len] pass if z: pass elif not len('TEST'): # [use-implicit-booleaness-not-len] pass while len('TEST'): # [use-implicit-booleaness-not-len] pass while not len('TEST'): # [use-implicit-booleaness-not-len] pass while z and len('TEST'): # [use-implicit-booleaness-not-len] pass while not len('TEST') and z: # [use-implicit-booleaness-not-len] pass assert len('TEST') > 0 # Should be fine x = 1 if len('TEST') != 0 else 2 # Should be fine f_o_o = len('TEST') or 42 # Should be fine a = x and len(x) # Should be fine def some_func(): return len('TEST') > 0 # Should be fine def github_issue_1325(): l = [1, 2, 3] length = len(l) if l else 0 # Should be fine return length def github_issue_1331(*args): assert False, len(args) # Should be fine def github_issue_1331_v2(*args): assert len(args), args # [use-implicit-booleaness-not-len] def github_issue_1331_v3(*args): assert len(args) or z, args # [use-implicit-booleaness-not-len] def github_issue_1331_v4(*args): assert z and len(args), args # [use-implicit-booleaness-not-len] b = bool(len(z)) # [use-implicit-booleaness-not-len] c = bool(len('TEST') or 42) # [use-implicit-booleaness-not-len] def github_issue_1879(): class ClassWithBool(list): def __bool__(self): return True class ClassWithoutBool(list): pass class ChildClassWithBool(ClassWithBool): pass class ChildClassWithoutBool(ClassWithoutBool): pass assert len(ClassWithBool()) assert len(ChildClassWithBool()) assert len(ClassWithoutBool()) # [use-implicit-booleaness-not-len] assert len(ChildClassWithoutBool()) # [use-implicit-booleaness-not-len] assert len(range(0)) # [use-implicit-booleaness-not-len] assert len([t + 1 for t in []]) # [use-implicit-booleaness-not-len] assert len(u + 1 for u in []) # [use-implicit-booleaness-not-len] assert len({"1":(v + 1) for v in {}}) # [use-implicit-booleaness-not-len] assert len(set((w + 1) for w in set())) # [use-implicit-booleaness-not-len] # pylint: disable=import-outside-toplevel import numpy numpy_array = numpy.array([0]) if len(numpy_array) > 0: print('numpy_array') if len(numpy_array): print('numpy_array') if numpy_array: print('b') import pandas as pd pandas_df = pd.DataFrame() if len(pandas_df): print("this works, but pylint tells me not to use len() without comparison") if len(pandas_df) > 0: print("this works and pylint likes it, but it's not the solution intended by PEP-8") if pandas_df: print("this does not work (truth value of dataframe is ambiguous)") def function_returning_list(r): if r==1: return [1] return [2] def function_returning_int(r): if r==1: return 1 return 2 def METHOD_NAME(r): for i in [r, 1, 2, 3]: yield i def function_returning_comprehension(r): return [x+1 for x in [r, 1, 2, 3]] def function_returning_function(r): return METHOD_NAME(r) assert len(function_returning_list(z)) # [use-implicit-booleaness-not-len] assert len(function_returning_int(z)) # This should raise a use-implicit-booleaness-not-len once astroid can infer it # See https://github.com/pylint-dev/pylint/pull/3821#issuecomment-743771514 assert len(METHOD_NAME(z)) assert len(function_returning_comprehension(z)) assert len(function_returning_function(z)) def github_issue_4215(): # Test undefined variables # https://github.com/pylint-dev/pylint/issues/4215 if len(undefined_var): # [undefined-variable] pass if len(undefined_var2[0]): # [undefined-variable] pass # pylint: disable=len-as-condition if len('TEST'): pass
2,038
parse datetime
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2022 Satpy developers # # This file is part of satpy. # # satpy is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # satpy is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # satpy. If not, see <http://www.gnu.org/licenses/>. """Reader for files produced by ESA's Ocean Color CCI project. This reader currently supports the lat/lon gridded products and does not yet support the products on a sinusoidal grid. The products on each of the composite periods (1, 5 and 8 day plus monthly) are supported and both the merged product files (OC_PRODUCTS) and single product (RRS, CHLOR_A, IOP, K_490) are supported. """ import logging from datetime import datetime import dask.array as da import numpy as np from pyresample import geometry from satpy.readers.netcdf_utils import NetCDF4FileHandler logger = logging.getLogger(__name__) class OCCCIFileHandler(NetCDF4FileHandler): """File handler for Ocean Color CCI netCDF files.""" @staticmethod def METHOD_NAME(datestr): """Parse datetime.""" return datetime.strptime(datestr, "%Y%m%d%H%MZ") @property def start_time(self): """Get the start time.""" return self.METHOD_NAME(self['/attr/time_coverage_start']) @property def end_time(self): """Get the end time.""" return self.METHOD_NAME(self['/attr/time_coverage_end']) @property def composite_period(self): """Determine composite period from filename information.""" comp1 = self.filename_info['composite_period_1'] comp2 = self.filename_info['composite_period_2'] if comp2 == 'MONTHLY' and comp1 == "1M": return 'monthly' elif comp1 == '1D': return 'daily' elif comp1 == '5D': return '5-day' elif comp1 == '8D': return '8-day' else: raise ValueError(f"Unknown data compositing period: {comp1}_{comp2}") def _update_attrs(self, dataset, dataset_info): """Update dataset attributes.""" dataset.attrs.update(self[dataset_info['nc_key']].attrs) dataset.attrs.update(dataset_info) dataset.attrs['sensor'] = 'merged' dataset.attrs['composite_period'] = self.composite_period # remove attributes from original file which don't apply anymore dataset.attrs.pop("nc_key") def get_dataset(self, dataset_id, ds_info): """Get dataset.""" dataset = da.squeeze(self[ds_info['nc_key']]) if '_FillValue' in dataset.attrs: dataset.data = da.where(dataset.data == dataset.attrs['_FillValue'], np.nan, dataset.data) self._update_attrs(dataset, ds_info) if 'lat' in dataset.dims: dataset = dataset.rename({'lat': 'y'}) if 'lon' in dataset.dims: dataset = dataset.rename({'lon': 'x'}) return dataset def get_area_def(self, dsid): """Get the area definition based on information in file. There is no area definition in the file itself, so we have to compute it from the metadata, which specifies the area extent and pixel resolution. """ proj_param = 'EPSG:4326' lon_res = float(self['/attr/geospatial_lon_resolution']) lat_res = float(self['/attr/geospatial_lat_resolution']) min_lon = self['/attr/geospatial_lon_min'] max_lon = self['/attr/geospatial_lon_max'] min_lat = self['/attr/geospatial_lat_min'] max_lat = self['/attr/geospatial_lat_max'] area_extent = (min_lon, min_lat, max_lon, max_lat) lon_size = np.round((max_lon - min_lon) / lon_res).astype(int) lat_size = np.round((max_lat - min_lat) / lat_res).astype(int) area = geometry.AreaDefinition('gridded_occci', 'Full globe gridded area', 'longlat', proj_param, lon_size, lat_size, area_extent) return area
2,039
handle
""" This command: * deletes all prescribing data (both original data and extracts created by the matrixstore build) from: * the filesystem * BigQuery * Cloud Storage * resets the import pipeline so that the import may be re-run with correct data """ import json import os import networkx as nx from django.conf import settings from django.core.management import BaseCommand from frontend.models import ImportLog from gcutils.bigquery import Client as BQClient from gcutils.bigquery import NotFound from gcutils.storage import Client as StorageClient from pipeline.models import TaskLog from pipeline.runner import dump_import_records, load_import_records class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument("year") parser.add_argument("month") def METHOD_NAME(self, year, month, **kwargs): verify_year_month(year, month) delete_import_record(year, month) mark_task_logs_as_failed(year, month) delete_fetch_and_import_task_log(year, month) delete_import_logs(year, month) delete_prescribing_file_on_filesystem(year, month) delete_prescribing_file_in_storage(year, month) delete_temporary_prescribing_bq_table(year, month) remove_records_from_bq_table(year, month) delete_backup_from_storage(year, month) delete_matrixstore_bq_table(year, month) delete_matrixstore_storage_files(year, month) delete_matrixstore_download(year, month) def verify_year_month(year, month): print("verify_year_month") log = ImportLog.objects.latest_in_category("prescribing") assert log.current_at.year == year assert log.current_at.month == int(month) def delete_import_record(year, month): print("delete_import_record") import_records = load_import_records() logs = import_records["prescribing"] new_logs = [ r for r in logs if f"prescribing_v2/{year}_{month}" not in r["imported_file"] ] assert len(logs) == len(new_logs) + 1 import_records["prescribing"] = new_logs dump_import_records(import_records) def mark_task_logs_as_failed(year, month): print("mark_task_logs_as_failed") with open(settings.PIPELINE_METADATA_DIR + "/tasks.json") as f: tasks = json.load(f) graph = nx.DiGraph() for task_name, task_def in tasks.items(): for dependency_name in task_def.get("dependencies", []): graph.add_edge(dependency_name, task_name) convert_task_log = TaskLog.objects.get( task_name="convert_hscic_prescribing", year=year, month=month, status=TaskLog.SUCCESSFUL, ) for task_name in nx.descendants(graph, "convert_hscic_prescribing"): task_log = TaskLog.objects.get( task_name=task_name, year=year, month=month, status=TaskLog.SUCCESSFUL ) assert task_log.started_at > convert_task_log.started_at task_log.status = TaskLog.FAILED task_log.save() convert_task_log.status = TaskLog.FAILED convert_task_log.save() def delete_fetch_and_import_task_log(year, month): print("delete_fetch_and_import_task_log") TaskLog.objects.get(task_name="fetch_and_import", year=year, month=month).delete() def delete_import_logs(year, month): print("delete_import_logs") ImportLog.objects.get( category="prescribing", current_at=f"{year}-{month}-01" ).delete() ImportLog.objects.get( category="dashboard_data", current_at=f"{year}-{month}-01" ).delete() def delete_prescribing_file_on_filesystem(year, month): print("delete_prescribing_file_on_filesystem") path = os.path.join( settings.PIPELINE_DATA_BASEDIR, "prescribing_v2", f"{year}_{month}", f"epd_{year}{month}.csv", ) os.remove(path) def delete_prescribing_file_in_storage(year, month): print("delete_prescribing_file_in_storage") _delete_file_from_storage("hscic/prescribing_v2/2021_10") def delete_temporary_prescribing_bq_table(year, month): print("delete_temporary_prescribing_bq_table") try: _delete_table_from_bq("tmp_eu", f"raw_prescribing_data_{year}_{month}") except NotFound: # This is ok, as the table might already have been deleted pass def remove_records_from_bq_table(year, month): print("remove_records_from_bq_table") client = BQClient("hscic") sql = ( f"DELETE FROM ebmdatalab.hscic.prescribing_v2 WHERE month = '{year}-{month}-01'" ) client.query(sql) def delete_backup_from_storage(year, month): print("delete_backup_from_storage") _delete_file_from_storage("backup/prescribing_v2/2021_10") def delete_matrixstore_bq_table(year, month): print("delete_matrixstore_bq_table") _delete_table_from_bq("prescribing_export", f"prescribing_{year}_{month}") def delete_matrixstore_storage_files(year, month): print("delete_matrixstore_storage_files") _delete_file_from_storage(f"prescribing_exports/prescribing_{year}_{month}_*") def delete_matrixstore_download(year, month): print("delete_matrixstore_download") path = os.path.join( settings.PIPELINE_DATA_BASEDIR, "matrixstore_import", f"{year}-{month}-01_prescribing.csv.gz", ) os.remove(path) def _delete_file_from_storage(path): client = StorageClient() bucket = client.get_bucket() for blob in bucket.list_blobs(prefix=path): blob.delete() def _delete_table_from_bq(dataset_name, table_name): client = BQClient(dataset_name) client.delete_table(table_name)
2,040
test run specs
from unittest import mock import pytest import string import dbt.exceptions import dbt.graph.selector as graph_selector import dbt.graph.cli as graph_cli from dbt.node_types import NodeType import networkx as nx from dbt import flags from argparse import Namespace from dbt.contracts.project import UserConfig flags.set_from_args(Namespace(), UserConfig()) def _get_graph(): integer_graph = nx.balanced_tree(2, 2, nx.DiGraph()) package_mapping = { i: "m." + ("X" if i % 2 == 0 else "Y") + "." + letter for (i, letter) in enumerate(string.ascii_lowercase) } # Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)] return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping)) def _get_manifest(graph): nodes = {} for unique_id in graph: fqn = unique_id.split(".") node = mock.MagicMock( unique_id=unique_id, fqn=fqn, package_name=fqn[0], tags=[], resource_type=NodeType.Model, empty=False, config=mock.MagicMock(enabled=True), is_versioned=False, ) nodes[unique_id] = node nodes["m.X.a"].tags = ["abc"] nodes["m.Y.b"].tags = ["abc", "bcef"] nodes["m.X.c"].tags = ["abc", "bcef"] nodes["m.Y.d"].tags = [] nodes["m.X.e"].tags = ["efg", "bcef"] nodes["m.Y.f"].tags = ["efg", "bcef"] nodes["m.X.g"].tags = ["efg"] return mock.MagicMock(nodes=nodes) @pytest.fixture def graph(): return graph_selector.Graph(_get_graph()) @pytest.fixture def manifest(graph): return _get_manifest(graph) def id_macro(arg): if isinstance(arg, str): return arg try: return "_".join(arg) except TypeError: return arg run_specs = [ # include by fqn (["X.a"], [], {"m.X.a"}), # include by tag (["tag:abc"], [], {"m.X.a", "m.Y.b", "m.X.c"}), # exclude by tag (["*"], ["tag:abc"], {"m.Y.d", "m.X.e", "m.Y.f", "m.X.g"}), # tag + fqn (["tag:abc", "a"], [], {"m.X.a", "m.Y.b", "m.X.c"}), (["tag:abc", "d"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.d"}), # multiple node selection across packages (["X.a", "b"], [], {"m.X.a", "m.Y.b"}), (["X.a+"], ["b"], {"m.X.a", "m.X.c", "m.Y.d", "m.X.e", "m.Y.f", "m.X.g"}), # children (["X.c+"], [], {"m.X.c", "m.Y.f", "m.X.g"}), (["X.a+1"], [], {"m.X.a", "m.Y.b", "m.X.c"}), (["X.a+"], ["tag:efg"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.d"}), # parents (["+Y.f"], [], {"m.X.c", "m.Y.f", "m.X.a"}), (["1+Y.f"], [], {"m.X.c", "m.Y.f"}), # childrens parents (["@X.c"], [], {"m.X.a", "m.X.c", "m.Y.f", "m.X.g"}), # multiple selection/exclusion (["tag:abc", "tag:bcef"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.X.e", "m.Y.f"}), (["tag:abc", "tag:bcef"], ["tag:efg"], {"m.X.a", "m.Y.b", "m.X.c"}), (["tag:abc", "tag:bcef"], ["tag:efg", "a"], {"m.Y.b", "m.X.c"}), # intersections (["a,a"], [], {"m.X.a"}), (["+c,c+"], [], {"m.X.c"}), (["a,b"], [], set()), (["tag:abc,tag:bcef"], [], {"m.Y.b", "m.X.c"}), (["*,tag:abc,a"], [], {"m.X.a"}), (["a,tag:abc,*"], [], {"m.X.a"}), (["tag:abc,tag:bcef"], ["c"], {"m.Y.b"}), (["tag:bcef,tag:efg"], ["tag:bcef,@b"], {"m.Y.f"}), (["tag:bcef,tag:efg"], ["tag:bcef,@a"], set()), (["*,@a,+b"], ["*,tag:abc,tag:bcef"], {"m.X.a"}), (["tag:bcef,tag:efg", "*,tag:abc"], [], {"m.X.a", "m.Y.b", "m.X.c", "m.X.e", "m.Y.f"}), (["tag:bcef,tag:efg", "*,tag:abc"], ["e"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.f"}), (["tag:bcef,tag:efg", "*,tag:abc"], ["e"], {"m.X.a", "m.Y.b", "m.X.c", "m.Y.f"}), (["tag:bcef,tag:efg", "*,tag:abc"], ["e", "f"], {"m.X.a", "m.Y.b", "m.X.c"}), (["tag:bcef,tag:efg", "*,tag:abc"], ["tag:abc,tag:bcef"], {"m.X.a", "m.X.e", "m.Y.f"}), (["tag:bcef,tag:efg", "*,tag:abc"], ["tag:abc,tag:bcef", "tag:abc,a"], {"m.X.e", "m.Y.f"}), ] @pytest.mark.parametrize("include,exclude,expected", run_specs, ids=id_macro) def METHOD_NAME(include, exclude, expected): graph = _get_graph() manifest = _get_manifest(graph) selector = graph_selector.NodeSelector(graph, manifest) # TODO: The "eager" string below needs to be replaced with programatic access # to the default value for the indirect selection parameter in # dbt.cli.params.indirect_selection # # Doing that is actually a little tricky, so I'm punting it to a new ticket GH #6397 spec = graph_cli.parse_difference(include, exclude, "eager") selected, _ = selector.select_nodes(spec) assert selected == expected param_specs = [ ("a", False, None, False, None, "fqn", "a", False), ("+a", True, None, False, None, "fqn", "a", False), ("256+a", True, 256, False, None, "fqn", "a", False), ("a+", False, None, True, None, "fqn", "a", False), ("a+256", False, None, True, 256, "fqn", "a", False), ("+a+", True, None, True, None, "fqn", "a", False), ("16+a+32", True, 16, True, 32, "fqn", "a", False), ("@a", False, None, False, None, "fqn", "a", True), ("a.b", False, None, False, None, "fqn", "a.b", False), ("+a.b", True, None, False, None, "fqn", "a.b", False), ("256+a.b", True, 256, False, None, "fqn", "a.b", False), ("a.b+", False, None, True, None, "fqn", "a.b", False), ("a.b+256", False, None, True, 256, "fqn", "a.b", False), ("+a.b+", True, None, True, None, "fqn", "a.b", False), ("16+a.b+32", True, 16, True, 32, "fqn", "a.b", False), ("@a.b", False, None, False, None, "fqn", "a.b", True), ("a.b.*", False, None, False, None, "fqn", "a.b.*", False), ("+a.b.*", True, None, False, None, "fqn", "a.b.*", False), ("256+a.b.*", True, 256, False, None, "fqn", "a.b.*", False), ("a.b.*+", False, None, True, None, "fqn", "a.b.*", False), ("a.b.*+256", False, None, True, 256, "fqn", "a.b.*", False), ("+a.b.*+", True, None, True, None, "fqn", "a.b.*", False), ("16+a.b.*+32", True, 16, True, 32, "fqn", "a.b.*", False), ("@a.b.*", False, None, False, None, "fqn", "a.b.*", True), ("tag:a", False, None, False, None, "tag", "a", False), ("+tag:a", True, None, False, None, "tag", "a", False), ("256+tag:a", True, 256, False, None, "tag", "a", False), ("tag:a+", False, None, True, None, "tag", "a", False), ("tag:a+256", False, None, True, 256, "tag", "a", False), ("+tag:a+", True, None, True, None, "tag", "a", False), ("16+tag:a+32", True, 16, True, 32, "tag", "a", False), ("@tag:a", False, None, False, None, "tag", "a", True), ("source:a", False, None, False, None, "source", "a", False), ("source:a+", False, None, True, None, "source", "a", False), ("source:a+1", False, None, True, 1, "source", "a", False), ("source:a+32", False, None, True, 32, "source", "a", False), ("@source:a", False, None, False, None, "source", "a", True), ] @pytest.mark.parametrize( "spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents", param_specs, ids=id_macro, ) def test_parse_specs( spec, parents, parents_depth, children, children_depth, filter_type, filter_value, childrens_parents, ): parsed = graph_selector.SelectionCriteria.from_single_spec(spec) assert parsed.parents == parents assert parsed.parents_depth == parents_depth assert parsed.children == children assert parsed.children_depth == children_depth assert parsed.method == filter_type assert parsed.value == filter_value assert parsed.childrens_parents == childrens_parents invalid_specs = [ "@a+", "@a.b+", "@a.b*+", "@tag:a+", "@source:a+", ] @pytest.mark.parametrize("invalid", invalid_specs, ids=lambda k: str(k)) def test_invalid_specs(invalid): with pytest.raises(dbt.exceptions.DbtRuntimeError): graph_selector.SelectionCriteria.from_single_spec(invalid)
2,041
compute exact
""" Copyright (c) 2018-2023 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from collections import Counter import string import numpy from ..representation import QuestionAnsweringAnnotation, QuestionAnsweringPrediction from ..representation import QuestionAnsweringEmbeddingAnnotation, QuestionAnsweringEmbeddingPrediction from ..representation import QuestionAnsweringBiDAFAnnotation from .metric import PerImageEvaluationMetric, FullDatasetEvaluationMetric from ..config import NumberField def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() class ScoreF1(PerImageEvaluationMetric): __provider__ = 'f1' annotation_types = (QuestionAnsweringAnnotation,) prediction_types = (QuestionAnsweringPrediction,) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.per_question_results = {} def update(self, annotation, prediction): gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])] if not gold_answers: gold_answers = [''] prediction_answer = prediction.tokens[0] if prediction.tokens else '' max_f1_score = max(self.compute_f1(a, prediction_answer) for a in gold_answers) current_max_f1_score = self.per_question_results.get(annotation.question_id, 0) self.per_question_results[annotation.question_id] = max(max_f1_score, current_max_f1_score) return max_f1_score @staticmethod def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = Counter(gold_toks) & Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def evaluate(self, annotations, predictions): return sum(self.per_question_results.values()) / len(self.per_question_results) def reset(self): del self.per_question_results self.per_question_results = {} class ExactMatchScore(PerImageEvaluationMetric): __provider__ = 'exact_match' annotation_types = (QuestionAnsweringAnnotation, QuestionAnsweringBiDAFAnnotation, ) prediction_types = (QuestionAnsweringPrediction, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.per_question_results = {} def update(self, annotation, prediction): gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])] if not gold_answers: gold_answers = [''] pred_answer = prediction.tokens[0] if prediction.tokens else '' max_exact_match = max(self.METHOD_NAME(a_gold, pred_answer) for a_gold in gold_answers) self.per_question_results[annotation.question_id] = max( max_exact_match, self.per_question_results.get(annotation.question_id, 0) ) return max_exact_match @staticmethod def METHOD_NAME(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def evaluate(self, annotations, predictions): return sum(self.per_question_results.values()) / len(self.per_question_results) def reset(self): del self.per_question_results self.per_question_results = {} class QuestionAnsweringEmbeddingAccuracy(FullDatasetEvaluationMetric): __provider__ = 'qa_embedding_accuracy' annotation_types = (QuestionAnsweringEmbeddingAnnotation,) prediction_types = (QuestionAnsweringEmbeddingPrediction,) @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'top_k': NumberField( value_type=int, min_value=1, max_value=1000, default=5, optional=True, description='Specifies the number of closest context embeddings to check.' ), }) return parameters def configure(self): self.top_k = self.get_value_from_config('top_k') def evaluate(self, annotations, predictions): ap_pairs = list(zip(annotations, predictions)) #check data alignment assert all( a.identifier is p.identifier if not isinstance(p.identifier, tuple) else p.identifier.values for a, p in ap_pairs), "annotations and predictions are not aligned" q_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is not None] c_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is None] c_data_identifiers = [a.identifier for a, p in c_pairs] c_vecs = numpy.array([p.embedding for a, p in c_pairs]) # calc distances from each question to all contexts and check if top_k has true positives true_pos = 0 for q_a, q_p in q_pairs: #calc distance between question embedding with all context embeddings d = c_vecs - q_p.embedding[None, :] dist = numpy.linalg.norm(d, ord=2, axis=1) index = dist.argsort() #check that right context in the list of top_k c_pos_index = c_data_identifiers.index(q_a.context_pos_indetifier) if c_pos_index in index[:self.top_k]: true_pos += 1 return [true_pos/len(q_pairs)] if q_pairs else 0
2,042
grad
# This code is part of Qiskit. # # (C) Copyright IBM 2021, 2023. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Tests for the Gradient Descent optimizer.""" from test.python.algorithms import QiskitAlgorithmsTestCase import numpy as np from qiskit.algorithms.optimizers import GradientDescent, GradientDescentState from qiskit.algorithms.optimizers.steppable_optimizer import TellData, AskData from qiskit.circuit.library import PauliTwoDesign from qiskit.opflow import I, Z, StateFn class TestGradientDescent(QiskitAlgorithmsTestCase): """Tests for the gradient descent optimizer.""" def setUp(self): super().setUp() np.random.seed(12) self.initial_point = np.array([1, 1, 1, 1, 0]) def objective(self, x): """Objective Function for the tests""" return (np.linalg.norm(x) - 1) ** 2 def METHOD_NAME(self, x): """Gradient of the objective function""" return 2 * (np.linalg.norm(x) - 1) * x / np.linalg.norm(x) def test_pauli_two_design(self): """Test standard gradient descent on the Pauli two-design example.""" circuit = PauliTwoDesign(3, reps=3, seed=2) parameters = list(circuit.parameters) with self.assertWarns(DeprecationWarning): obs = Z ^ Z ^ I expr = ~StateFn(obs) @ StateFn(circuit) initial_point = np.array( [ 0.1822308, -0.27254251, 0.83684425, 0.86153976, -0.7111668, 0.82766631, 0.97867993, 0.46136964, 2.27079901, 0.13382699, 0.29589915, 0.64883193, ] ) def objective_pauli(x): return expr.bind_parameters(dict(zip(parameters, x))).eval().real optimizer = GradientDescent(maxiter=100, learning_rate=0.1, perturbation=0.1) with self.assertWarns(DeprecationWarning): result = optimizer.minimize(objective_pauli, x0=initial_point) self.assertLess(result.fun, -0.95) # final loss self.assertEqual(result.nfev, 1300) # function evaluations def test_callback(self): """Test the callback.""" history = [] def callback(*args): history.append(args) optimizer = GradientDescent(maxiter=1, callback=callback) _ = optimizer.minimize(self.objective, np.array([1, -1])) self.assertEqual(len(history), 1) self.assertIsInstance(history[0][0], int) # nfevs self.assertIsInstance(history[0][1], np.ndarray) # parameters self.assertIsInstance(history[0][2], float) # function value self.assertIsInstance(history[0][3], float) # norm of the gradient def test_minimize(self): """Test setting the learning rate as iterator and minimizing the funciton.""" def learning_rate(): power = 0.6 constant_coeff = 0.1 def powerlaw(): n = 0 while True: yield constant_coeff * (n**power) n += 1 return powerlaw() optimizer = GradientDescent(maxiter=20, learning_rate=learning_rate) result = optimizer.minimize(self.objective, self.initial_point, self.METHOD_NAME) self.assertLess(result.fun, 1e-5) def test_no_start(self): """Tests that making a step without having started the optimizer raises an error.""" optimizer = GradientDescent() with self.assertRaises(AttributeError): optimizer.step() def test_start(self): """Tests if the start method initializes the state properly.""" optimizer = GradientDescent() self.assertIsNone(optimizer.state) self.assertIsNone(optimizer.perturbation) optimizer.start(x0=self.initial_point, fun=self.objective) test_state = GradientDescentState( x=self.initial_point, fun=self.objective, jac=None, nfev=0, njev=0, nit=0, learning_rate=1, stepsize=None, ) self.assertEqual(test_state, optimizer.state) def test_ask(self): """Test the ask method.""" optimizer = GradientDescent() optimizer.start(fun=self.objective, x0=self.initial_point) ask_data = optimizer.ask() np.testing.assert_equal(ask_data.x_jac, self.initial_point) self.assertIsNone(ask_data.x_fun) def test_evaluate(self): """Test the evaluate method.""" optimizer = GradientDescent(perturbation=1e-10) optimizer.start(fun=self.objective, x0=self.initial_point) ask_data = AskData(x_jac=self.initial_point) tell_data = optimizer.evaluate(ask_data=ask_data) np.testing.assert_almost_equal(tell_data.eval_jac, self.METHOD_NAME(self.initial_point), decimal=2) def test_tell(self): """Test the tell method.""" optimizer = GradientDescent(learning_rate=1.0) optimizer.start(fun=self.objective, x0=self.initial_point) ask_data = AskData(x_jac=self.initial_point) tell_data = TellData(eval_jac=self.initial_point) optimizer.tell(ask_data=ask_data, tell_data=tell_data) np.testing.assert_equal(optimizer.state.x, np.zeros(optimizer.state.x.shape)) def test_continue_condition(self): """Test if the continue condition is working properly.""" optimizer = GradientDescent(tol=1) optimizer.start(fun=self.objective, x0=self.initial_point) self.assertTrue(optimizer.continue_condition()) optimizer.state.stepsize = 0.1 self.assertFalse(optimizer.continue_condition()) optimizer.state.stepsize = 10 optimizer.state.nit = 1000 self.assertFalse(optimizer.continue_condition()) def test_step(self): """Tests if performing one step yields the desired result.""" optimizer = GradientDescent(learning_rate=1.0) optimizer.start(fun=self.objective, jac=self.METHOD_NAME, x0=self.initial_point) optimizer.step() np.testing.assert_almost_equal( optimizer.state.x, self.initial_point - self.METHOD_NAME(self.initial_point), 6 ) def test_wrong_dimension_gradient(self): """Tests if an error is raised when a gradient of the wrong dimension is passed.""" optimizer = GradientDescent(learning_rate=1.0) optimizer.start(fun=self.objective, x0=self.initial_point) ask_data = AskData(x_jac=self.initial_point) tell_data = TellData(eval_jac=np.array([1.0, 5])) with self.assertRaises(ValueError): optimizer.tell(ask_data=ask_data, tell_data=tell_data) tell_data = TellData(eval_jac=np.array(1)) with self.assertRaises(ValueError): optimizer.tell(ask_data=ask_data, tell_data=tell_data)
2,043
separate true followers
import json import re from html.parser import HTMLParser from io import StringIO from hedera.supported_languages import SUPPORTED_LANGUAGES from lemmatization.lemmatizer import Lemmatizer class EditedTextHtmlParser(HTMLParser): def __init__(self, token_lemma_dict=None, lang=None): self.current_tag = None self.current_attrs = {} self.current_data = "" self.lemmatized_text_data = [] self.token_lemma_dict = token_lemma_dict self.lemmatizer = Lemmatizer(lang) self.service = SUPPORTED_LANGUAGES[lang].service self.initial = "" self.unique_text = False return super().__init__() def handle_starttag(self, tag, attrs): if tag == "span": self.current_tag = "span" """ Note: the fed in data could be two different types from a tuple of (key, dict) or (key, bool) handle_endtag() will require a key:value pair containing either of the structure below: [('data-token', '{"glossed": "glossed-automatic", "initial": "", "lemma_id": 1372, "resolved": "resolved-automatic", "gloss_ids": [84128, 68154], "word_normalized": "Arma"}')] [('follower', 'true')] """ key, value = attrs[0] if key in "follower": self.current_attrs = {key: value} else: self.current_attrs = json.loads(value) def handle_endtag(self, tag): if "follower" in self.current_attrs: self.METHOD_NAME(self.current_data) #Note: sometimes the current_tag/self.current_attrs will be empty/None when there is a newline/break # len() checks if empty string so we dont append blank words elif self.current_data is not None and self.current_tag is not None and len(self.current_data): self.lemmatized_text_data.append( { **self.current_attrs, "word": self.current_data, "following": "", } ) self.current_tag = None self.current_attrs = {} self.current_data = "" def handle_data(self, data): # used to modify data by the service(e.g latin underscores) formatted_text_data = self.service.apply_text_rule(self.unique_text, data) if type(formatted_text_data) is dict: data = formatted_text_data["data"] self.unique_text = formatted_text_data["unique_text"] if ("follower" in self.current_attrs): self.current_data = data else: try: if ( (self.current_tag is None) or (self.current_tag == "span" and self.current_attrs == {}) or (self.current_attrs["lemma_id"] not in self.token_lemma_dict[data]) ): self.lemmatize_chunk(data) else: self.current_data = data except KeyError: if self.service.check_text(data): self.unique_text = data if not self.unique_text: self.lemmatize_chunk(data) def METHOD_NAME(self, follower): """ Takes the contents of a span where 'follower' is true. Splits any 'follower' characters from alpha numeric characters. Sets the 'following' attr on the previous data point with true followers and sends new alpha numeric string to be lemmatized. Returns None """ followers = [] text = [] for idx, ch in enumerate(follower): if ch.isalnum(): text = follower[idx:] break followers.append(ch) if len(self.lemmatized_text_data) > 0: self.lemmatized_text_data[-1]["following"] += "".join(followers) else: # this will only occur if the text begins with a "follower" self.lemmatized_text_data.append( { "word": "", "lemma_id": None, "resolved": True, "word_normalized": "", "following": "".join(followers) } ) if (len(text) > 0): self.lemmatize_chunk("".join(text)) def lemmatize_chunk(self, chunk): """ Takes an unrecognized chunk of text. Sends 'chunk' to be lemmatized, then extends the data with the returned content. Checks if chunk does not contain return and newline "\r\n" - only add tokens if it the chunk is not a return/newline In case there is an newline at the beginning of the text("initial"), the newline char will be added to the previous text "following" key:value pair **Fixes problem with empty tokens** **Fixes problem with latin underscores** Returns None """ self.current_data = None new_data = self.lemmatizer.lemmatize(chunk) # regex checks if '\r\n' is the only char used in the chunk contains_only_newline = bool(re.match(r"^[\r\n]+$", chunk)) if not contains_only_newline: self.process_initial_data(new_data) self.lemmatized_text_data.extend(new_data) if contains_only_newline and len(self.lemmatized_text_data): token_lemma_dict_keys = list(self.token_lemma_dict.keys()) prev_lemma_id = self.lemmatized_text_data[-1]["lemma_id"] following = self.lemmatized_text_data[-1]["following"] #Note: Added check if we have reached the end of the data array because theres a bug where new lines are added after each edit if len(token_lemma_dict_keys) and prev_lemma_id not in self.token_lemma_dict[token_lemma_dict_keys[-1]]: self.lemmatized_text_data[-1]["following"] = f"{following}{chunk}" else: self.process_initial_data(new_data) self.lemmatized_text_data.extend(new_data) #TODO EDGE CASE: Newlines/breaks that may happen at the very beginning of the text def process_initial_data(self, new_data): # if statement will add newlines to "following" to previous text in lemmatized_text_data if len(new_data) and new_data[0]["initial"] and len(self.lemmatized_text_data): following = self.lemmatized_text_data[-1]["following"] self.lemmatized_text_data[-1]["following"] = f"{following}{new_data[0]['initial']}" class TagStripper(HTMLParser): def __init__(self): super().__init__() self.reset() self.strict = False self.convert_charrefs = True self.text = StringIO() def handle_data(self, d): self.text.write(d) def get_data(self): return self.text.getvalue()
2,044
awk rule1
# Leo colorizer control file for awk mode. # This file is in the public domain. # Properties for awk mode. properties = { "indentCloseBrackets": "}", "indentOpenBrackets": "{", "lineComment": "#", "lineUpClosingBracket": "true", "wordBreakChars": ",+-=<>/?^&*", } # Attributes dict for awk_main ruleset. awk_main_attributes_dict = { "default": "null", "digit_re": "", "escape": "\\", "highlight_digits": "true", "ignore_case": "false", "no_word_sep": "", } # Dictionary of attributes dictionaries for awk mode. attributesDictDict = { "awk_main": awk_main_attributes_dict, } # Keywords dict for awk_main ruleset. awk_main_keywords_dict = { "$0": "keyword3", "ARGC": "keyword3", "ARGIND": "keyword3", "ARGV": "keyword3", "BEGIN": "keyword3", "CONVFMT": "keyword3", "END": "keyword3", "ENVIRON": "keyword3", "ERRNO": "keyword3", "FIELDSWIDTH": "keyword3", "FILENAME": "keyword3", "FNR": "keyword3", "FS": "keyword3", "IGNORECASE": "keyword3", "NF": "keyword3", "NR": "keyword3", "OFMT": "keyword3", "OFS": "keyword3", "ORS": "keyword3", "RLENGTH": "keyword3", "RS": "keyword3", "RSTART": "keyword3", "RT": "keyword3", "SUBSEP": "keyword3", "atan2": "keyword2", "break": "keyword1", "close": "keyword1", "continue": "keyword1", "cos": "keyword2", "delete": "keyword1", "do": "keyword1", "else": "keyword1", "exit": "keyword1", "exp": "keyword2", "fflush": "keyword1", "for": "keyword1", "function": "keyword1", "gensub": "keyword2", "getline": "keyword2", "gsub": "keyword2", "huge": "keyword1", "if": "keyword1", "in": "keyword1", "index": "keyword2", "int": "keyword2", "length": "keyword2", "log": "keyword2", "match": "keyword2", "next": "keyword1", "nextfile": "keyword1", "print": "keyword1", "printf": "keyword1", "rand": "keyword2", "return": "keyword1", "sin": "keyword2", "split": "keyword2", "sprintf": "keyword2", "sqrt": "keyword2", "srand": "keyword2", "sub": "keyword2", "substr": "keyword2", "system": "keyword2", "tolower": "keyword2", "toupper": "keyword2", "while": "keyword1", } # Dictionary of keywords dictionaries for awk mode. keywordsDictDict = { "awk_main": awk_main_keywords_dict, } # Rules for awk_main ruleset. def awk_rule0(colorer, s, i): return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"", no_line_break=True) def METHOD_NAME(colorer, s, i): return colorer.match_span(s, i, kind="literal1", begin="'", end="'", no_line_break=True) def awk_rule2(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment1", seq="#") def awk_rule3(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="=") def awk_rule4(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="!") def awk_rule5(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq=">=") def awk_rule6(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="<=") def awk_rule7(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="+") def awk_rule8(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="-") def awk_rule9(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="/") def awk_rule10(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="*") def awk_rule11(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq=">") def awk_rule12(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="<") def awk_rule13(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="%") def awk_rule14(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="&") def awk_rule15(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="|") def awk_rule16(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="^") def awk_rule17(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="~") def awk_rule18(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="}") def awk_rule19(colorer, s, i): return colorer.match_plain_seq(s, i, kind="operator", seq="{") def awk_rule20(colorer, s, i): return colorer.match_mark_previous(s, i, kind="label", pattern=":", at_whitespace_end=True, exclude_match=True) def awk_rule21(colorer, s, i): return colorer.match_keywords(s, i) # Rules dict for awk_main ruleset. rulesDict1 = { "!": [awk_rule4,], "\"": [awk_rule0,], "#": [awk_rule2,], "$": [awk_rule21,], "%": [awk_rule13,], "&": [awk_rule14,], "'": [METHOD_NAME,], "*": [awk_rule10,], "+": [awk_rule7,], "-": [awk_rule8,], "/": [awk_rule9,], "0": [awk_rule21,], "1": [awk_rule21,], "2": [awk_rule21,], "3": [awk_rule21,], "4": [awk_rule21,], "5": [awk_rule21,], "6": [awk_rule21,], "7": [awk_rule21,], "8": [awk_rule21,], "9": [awk_rule21,], ":": [awk_rule20,], "<": [awk_rule6, awk_rule12,], "=": [awk_rule3,], ">": [awk_rule5, awk_rule11,], "@": [awk_rule21,], "A": [awk_rule21,], "B": [awk_rule21,], "C": [awk_rule21,], "D": [awk_rule21,], "E": [awk_rule21,], "F": [awk_rule21,], "G": [awk_rule21,], "H": [awk_rule21,], "I": [awk_rule21,], "J": [awk_rule21,], "K": [awk_rule21,], "L": [awk_rule21,], "M": [awk_rule21,], "N": [awk_rule21,], "O": [awk_rule21,], "P": [awk_rule21,], "Q": [awk_rule21,], "R": [awk_rule21,], "S": [awk_rule21,], "T": [awk_rule21,], "U": [awk_rule21,], "V": [awk_rule21,], "W": [awk_rule21,], "X": [awk_rule21,], "Y": [awk_rule21,], "Z": [awk_rule21,], "^": [awk_rule16,], "a": [awk_rule21,], "b": [awk_rule21,], "c": [awk_rule21,], "d": [awk_rule21,], "e": [awk_rule21,], "f": [awk_rule21,], "g": [awk_rule21,], "h": [awk_rule21,], "i": [awk_rule21,], "j": [awk_rule21,], "k": [awk_rule21,], "l": [awk_rule21,], "m": [awk_rule21,], "n": [awk_rule21,], "o": [awk_rule21,], "p": [awk_rule21,], "q": [awk_rule21,], "r": [awk_rule21,], "s": [awk_rule21,], "t": [awk_rule21,], "u": [awk_rule21,], "v": [awk_rule21,], "w": [awk_rule21,], "x": [awk_rule21,], "y": [awk_rule21,], "z": [awk_rule21,], "{": [awk_rule19,], "|": [awk_rule15,], "}": [awk_rule18,], "~": [awk_rule17,], } # x.rulesDictDict for awk mode. rulesDictDict = { "awk_main": rulesDict1, } # Import dict for awk mode. importDict = {}
2,045
test consider not charging chargepoint in loadmanagement
from typing import List, Tuple from unittest.mock import Mock import pytest from control import data from control.algorithm import common from control.chargepoint.chargepoint import Chargepoint from control.ev import Ev from control.counter import Counter from control.counter_all import CounterAll @pytest.fixture(autouse=True) def cp() -> None: data.data_init(Mock()) data.data.cp_data = {"cp0": Chargepoint(0, None)} @pytest.mark.parametrize("set_current, expected_current", [pytest.param(6, 0), pytest.param(0, 0)]) def test_reset_current(set_current: int, expected_current: int): # setup data.data.cp_data["cp0"].data.set.current = set_current # execution common.reset_current() # evaluation assert data.data.cp_data["cp0"].data.set.current == expected_current @pytest.mark.parametrize( "diff, required_currents, expected_set_current, expected_diffs", [ pytest.param(2, [10, 0, 0], 8, [2, 0, 0], id="set diff one phase"), pytest.param(2, [12]*3, 8, [2]*3, id="set diff three phases"), pytest.param(8, [8]*3, 8, [8]*3, id="set min current three phases"), pytest.param(0, [8]*3, 8, [0]*3, id="min current is already set, three phases"), ]) def test_set_current_counterdiff(diff: float, required_currents: List[float], expected_set_current: float, expected_diffs: List[float], monkeypatch): # setup cp = Chargepoint(4, None) ev = Ev(0) ev.data.control_parameter.required_currents = required_currents cp.data.set.charging_ev_data = ev cp.data.set.current = 6 get_counters_to_check_mock = Mock(return_value=["cp0", "cp6"]) monkeypatch.setattr(CounterAll, "get_counters_to_check", get_counters_to_check_mock) data.data.counter_data = {"cp0": Mock(spec=Counter), "cp6": Mock(spec=Counter)} # evaluation common.set_current_counterdiff(diff, 8, cp) # assertion assert cp.data.set.current == expected_set_current if diff != 0: assert data.data._counter_data['cp0'].update_values_left.call_args_list[0][0][0] == expected_diffs assert data.data._counter_data['cp6'].update_values_left.call_args_list[0][0][0] == expected_diffs @pytest.mark.parametrize( "required_currents, expected_mins_counts", [ ([10, 0, 0], ([6, 0, 0], [1, 0, 0])), ([12]*3, ([6]*3, [1]*3)) ]) def test_get_min_current(required_currents: List[float], expected_mins_counts: Tuple[List[float], List[int]]): # setup cp = Chargepoint(4, None) ev = Ev(0) ev.data.control_parameter.required_currents = required_currents cp.data.set.charging_ev_data = ev # evaluation mins_counts = common.get_min_current(cp) # assertion assert mins_counts == expected_mins_counts @pytest.mark.parametrize( "set_current, diff, expected_current", [ pytest.param(0, 2, 8, id="min current is set, no current has been set on this iteration"), pytest.param(6, 2, 6, id="min current is set, current has been set on this iteration"), pytest.param(7, 2, 7, id="new current is higher, current has been set on this iteration"), pytest.param(9, 2, 8, id="new current is lower, current has been set on this iteration"), ]) def test_get_current_to_set(set_current: float, diff: float, expected_current: float): # setup & evaluation current = common.get_current_to_set(set_current, diff, 6) # assertion assert current == expected_current @pytest.mark.parametrize( "counts, available_currents, missing_currents, expected_current", [ pytest.param([2]*3, [12, 15, 16], [5]*3, 6), pytest.param([2]*3, [1]*3, [2]*3, 0.5), pytest.param([2]*3, [0]*3, [2]*3, 0), ]) def test_available_currents_for_cp(counts: List[int], available_currents: List[float], missing_currents: List[float], expected_current: float): # setup cp = Chargepoint(4, None) ev = Ev(0) ev.data.control_parameter.required_currents = [16]*3 ev.data.control_parameter.required_current = 16 cp.data.set.charging_ev_data = ev cp.data.set.target_current = 10 # evaluation current = common.available_current_for_cp(cp, counts, available_currents, missing_currents) # assertion assert current == expected_current @pytest.mark.parametrize( "required_currents_1, required_currents_2, expected_currents", [ pytest.param([6, 10, 15], [20]*3, ([14, 18, 23], [2]*3)), pytest.param([6, 10, 15], [6, 0, 0], ([0, 4, 9], [2, 1, 1])), ]) def test_get_missing_currents_left(required_currents_1: List[float], required_currents_2: List[float], expected_currents: List[float]): # setup def setup_cp(num: int, required_currents) -> Chargepoint: ev = Ev(0) cp = Chargepoint(num, None) ev.data.control_parameter.required_currents = required_currents cp.data.set.charging_ev_data = ev return cp # evaluation currents = common.get_missing_currents_left( [setup_cp(1, required_currents_1), setup_cp(2, required_currents_2)]) # assertion assert currents == expected_currents @pytest.mark.parametrize( "reserve_for_not_charging, get_currents, expected_considered", [ pytest.param(True, [0]*3, False, id="reserve_for_not_charging active"), pytest.param(True, [6]*3, False, id="reserve_for_not_charging active"), pytest.param(False, [0]*3, True, id="not charging"), pytest.param(False, [6]*3, False, id="charging"), ]) def METHOD_NAME(reserve_for_not_charging: bool, get_currents: List[float], expected_considered: bool): # setup cp = Chargepoint(4, None) cp.data.get.currents = get_currents data.data.counter_all_data.data.config.reserve_for_not_charging = reserve_for_not_charging # evaluation considered = common.consider_not_charging_chargepoint_in_loadmanagement(cp) # assertion assert considered == expected_considered
2,046
cindex
""" Classes and methods to interface with files storing rate data. """ import os import re from scipy.constants import physical_constants from pynucastro.nucdata.binding_table import BindingTable from pynucastro.nucdata.elements import PeriodicTable from pynucastro.nucdata.mass_table import MassTable from pynucastro.nucdata.partition_function import PartitionFunctionCollection from pynucastro.nucdata.spin_table import SpinTable _pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) _pynucastro_rates_dir = os.path.join(_pynucastro_dir, 'library') _pynucastro_tabular_dir = os.path.join(_pynucastro_rates_dir, 'tabular') #set the atomic mass unit constant in MeV m_u, _, _ = physical_constants['atomic mass constant energy equivalent in MeV'] #read the mass excess table once and store it at the module-level _mass_table = MassTable() #read the spin table once and store it at the module-level _spin_table = SpinTable(reliable=True) # read the binding energy table once and store it at the module-level _binding_table = BindingTable() # read the partition function table once and store it at the module-level _pcollection = PartitionFunctionCollection(use_high_temperatures=True, use_set='frdm') class UnsupportedNucleus(Exception): pass class Nucleus: """ a nucleus that participates in a reaction -- we store it in a class to hold its properties, define a sorting, and give it a pretty printing string. :var Z: atomic number :var N: neutron number :var A: atomic mass :var nucbind: nuclear binding energy (MeV / nucleon) :var short_spec_name: nucleus abbreviation (e.g. "he4") :var caps_name: capitalized short species name (e.g. "He4") :var el: element name (e.g. "he") :var pretty: LaTeX formatted version of the nucleus name :var A_nuc: Nuclear Mass in amu """ _cache = {} def __init__(self, name, dummy=False): name = name.lower() self.raw = name # a dummy nucleus is one that we can use where a nucleus is needed # but it is not considered to be part of the network self.dummy = dummy # element symbol and atomic weight if name == "p": self.el = "h" self.A = 1 self.short_spec_name = "h1" self.caps_name = "p" elif name == "d": self.el = "h" self.A = 2 self.short_spec_name = "h2" self.caps_name = "H2" elif name == "t": self.el = "h" self.A = 3 self.short_spec_name = "h3" self.caps_name = "H3" elif name == "a": #this is a convenience, enabling the use of a commonly-used alias: # He4 --> \alpha --> "a" , e.g. c12(a,g)o16 self.el = "he" self.A = 4 self.short_spec_name = "he4" self.raw = "he4" self.caps_name = "He4" elif name == "n": self.el = "n" self.A = 1 self.Z = 0 self.N = 1 self.short_spec_name = "n" self.spec_name = "neutron" self.pretty = fr"\mathrm{{{self.el}}}" self.caps_name = "n" elif name.strip() in ("al-6", "al*6"): raise UnsupportedNucleus() else: e = re.match(r"([a-zA-Z]*)(\d*)", name) self.el = e.group(1).title() # chemical symbol assert self.el self.A = int(e.group(2)) assert self.A >= 0 self.short_spec_name = name self.caps_name = name.capitalize() # use lowercase element abbreviation regardless the case of the input self.el = self.el.lower() # atomic number comes from periodic table if name != "n": i = PeriodicTable.lookup_abbreviation(self.el) self.Z = i.Z assert isinstance(self.Z, int) assert self.Z >= 0 self.N = self.A - self.Z assert isinstance(self.N, int) assert self.N >= 0 # long name self.spec_name = f'{i.name}-{self.A}' # latex formatted style self.pretty = fr"{{}}^{{{self.A}}}\mathrm{{{self.el.capitalize()}}}" # set the number of spin states try: self.spin_states = _spin_table.get_spin_states(a=self.A, z=self.Z) except NotImplementedError: self.spin_states = None # set a partition function object to every nucleus try: self.partition_function = _pcollection.get_partition_function(self.short_spec_name) except ValueError: self.partition_function = None try: self.nucbind = _binding_table.get_binding_energy(n=self.N, z=self.Z) except NotImplementedError: # the binding energy table doesn't know about this nucleus self.nucbind = None # Now we will define the Nuclear Mass, try: self.A_nuc = float(self.A) + _mass_table.get_mass_diff(a=self.A, z=self.Z) / m_u except NotImplementedError: self.A_nuc = None @classmethod def from_cache(cls, name, dummy=False): key = (name.lower(), dummy) if key not in cls._cache: cls._cache[key] = Nucleus(name, dummy) return cls._cache[key] def __repr__(self): return self.raw def __hash__(self): return hash((self.Z, self.A)) def c(self): """return the capitalized-style name""" return self.caps_name def METHOD_NAME(self): """return the name for C++ indexing""" return self.short_spec_name.capitalize() def __eq__(self, other): if isinstance(other, Nucleus): return self.el == other.el and \ self.Z == other.Z and self.A == other.A if isinstance(other, tuple): return (self.Z, self.A) == other return NotImplemented def __lt__(self, other): if not self.Z == other.Z: return self.Z < other.Z return self.A < other.A def get_nuclei_in_range(zmin, zmax, amin, amax): """given a range of Z = [zmin, zmax], and A = [amin, amax], return a list of Nucleus objects for all nuclei in this range""" nuc_list = [] assert zmax >= zmin, "zmax must be >= zmin" assert amax >= amin, "amax must be >= amin" for z in range(zmin, zmax+1): element = PeriodicTable.lookup_Z(z) for a in range(amin, amax+1): name = f"{element.abbreviation}{a}" nuc_list.append(Nucleus(name)) return nuc_list
2,047
get test params
"""Optional passthrough transformer.""" # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) __author__ = ["aiwalter", "fkiraly"] __all__ = ["OptionalPassthrough"] from sktime.transformations._delegate import _DelegatedTransformer from sktime.transformations.compose._common import CORE_MTYPES from sktime.transformations.compose._id import Id class OptionalPassthrough(_DelegatedTransformer): """Wrap an existing transformer to tune whether to include it in a pipeline. Allows tuning the implicit hyperparameter whether or not to use a particular transformer inside a pipeline (e.g. TransformedTargetForecaster) or not. This is achieved by the hyperparameter `passthrough` which can be added to a tuning grid then (see example). Parameters ---------- transformer : Estimator scikit-learn-like or sktime-like transformer to fit and apply to series. this is a "blueprint" transformer, state does not change when `fit` is called passthrough : bool, default=False Whether to apply the given transformer or to just passthrough the data (identity transformation). If, True the transformer is not applied and the OptionalPassthrough uses the identity transformation. Attributes ---------- transformer_: transformer, this clone is fitted when `fit` is called and provides `transform` and inverse if passthrough = False, a clone of `transformer`passed if passthrough = True, the identity transformer `Id` Examples -------- >>> from sktime.datasets import load_airline >>> from sktime.forecasting.naive import NaiveForecaster >>> from sktime.transformations.compose import OptionalPassthrough >>> from sktime.transformations.series.detrend import Deseasonalizer >>> from sktime.transformations.series.adapt import TabularToSeriesAdaptor >>> from sktime.forecasting.compose import TransformedTargetForecaster >>> from sktime.forecasting.model_selection import ( ... ForecastingGridSearchCV, ... SlidingWindowSplitter) >>> from sklearn.preprocessing import StandardScaler >>> # create pipeline >>> pipe = TransformedTargetForecaster(steps=[ ... ("deseasonalizer", OptionalPassthrough(Deseasonalizer())), ... ("scaler", OptionalPassthrough(TabularToSeriesAdaptor(StandardScaler()))), ... ("forecaster", NaiveForecaster())]) # doctest: +SKIP >>> # putting it all together in a grid search >>> cv = SlidingWindowSplitter( ... initial_window=60, ... window_length=24, ... start_with_window=True, ... step_length=48) # doctest: +SKIP >>> param_grid = { ... "deseasonalizer__passthrough" : [True, False], ... "scaler__transformer__transformer__with_mean": [True, False], ... "scaler__passthrough" : [True, False], ... "forecaster__strategy": ["drift", "mean", "last"]} # doctest: +SKIP >>> gscv = ForecastingGridSearchCV( ... forecaster=pipe, ... param_grid=param_grid, ... cv=cv, ... n_jobs=-1) # doctest: +SKIP >>> gscv_fitted = gscv.fit(load_airline()) # doctest: +SKIP """ _tags = { "scitype:transform-input": "Series", # what is the scitype of X: Series, or Panel "scitype:transform-output": "Series", # what scitype is returned: Primitives, Series, Panel "scitype:instancewise": True, # is this an instance-wise transform? "X_inner_mtype": CORE_MTYPES, # which mtypes do _fit/_predict support for X? "y_inner_mtype": "None", # which mtypes do _fit/_predict support for y? "univariate-only": False, "fit_is_empty": False, "capability:inverse_transform": True, } def __init__(self, transformer, passthrough=False): self.transformer = transformer self.passthrough = passthrough super().__init__() # should be all tags, but not fit_is_empty # (_fit should not be skipped) tags_to_clone = [ "scitype:transform-input", "scitype:transform-output", "scitype:instancewise", "y_inner_mtype", "capability:inverse_transform", "handles-missing-data", "X-y-must-have-same-index", "transform-returns-same-time-index", "skip-inverse-transform", ] self.clone_tags(transformer, tag_names=tags_to_clone) if passthrough: self.transformer_ = Id() else: self.transformer_ = transformer.clone() # attribute for _DelegatedTransformer, which then delegates # all non-overridden methods are same as of getattr(self, _delegate_name) # see further details in _DelegatedTransformer docstring _delegate_name = "transformer_" @classmethod def METHOD_NAME(cls, parameter_set="default"): """Return testing parameter settings for the estimator. Parameters ---------- parameter_set : str, default="default" Name of the set of test parameters to return, for use in tests. If no special parameters are defined for a value, will return `"default"` set. Returns ------- params : dict or list of dict, default = {} Parameters to create testing instances of the class Each dict are parameters to construct an "interesting" test instance, i.e., `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance. `create_test_instance` uses the first (or only) dictionary in `params` """ from sktime.transformations.series.boxcox import BoxCoxTransformer return {"transformer": BoxCoxTransformer(), "passthrough": False}
2,048
name
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'GetSqlPoolVulnerabilityAssessmentRuleBaselineResult', 'AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult', 'get_sql_pool_vulnerability_assessment_rule_baseline', 'get_sql_pool_vulnerability_assessment_rule_baseline_output', ] @pulumi.output_type class GetSqlPoolVulnerabilityAssessmentRuleBaselineResult: """ A Sql pool vulnerability assessment rule baseline. """ def __init__(__self__, baseline_results=None, id=None, METHOD_NAME=None, type=None): if baseline_results and not isinstance(baseline_results, list): raise TypeError("Expected argument 'baseline_results' to be a list") pulumi.set(__self__, "baseline_results", baseline_results) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if METHOD_NAME and not isinstance(METHOD_NAME, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", METHOD_NAME) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(METHOD_NAME="baselineResults") def baseline_results(self) -> Sequence['outputs.SqlPoolVulnerabilityAssessmentRuleBaselineItemResponse']: """ The rule baseline result """ return pulumi.get(self, "baseline_results") @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult(GetSqlPoolVulnerabilityAssessmentRuleBaselineResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSqlPoolVulnerabilityAssessmentRuleBaselineResult( baseline_results=self.baseline_results, id=self.id, METHOD_NAME=self.METHOD_NAME, type=self.type) def get_sql_pool_vulnerability_assessment_rule_baseline(baseline_name: Optional[str] = None, resource_group_name: Optional[str] = None, rule_id: Optional[str] = None, sql_pool_name: Optional[str] = None, vulnerability_assessment_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult: """ Gets a SqlPool's vulnerability assessment rule baseline. Azure REST API version: 2021-06-01. :param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule). :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str rule_id: The vulnerability assessment rule ID. :param str sql_pool_name: SQL pool name :param str vulnerability_assessment_name: The name of the vulnerability assessment. :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['baselineName'] = baseline_name __args__['resourceGroupName'] = resource_group_name __args__['ruleId'] = rule_id __args__['sqlPoolName'] = sql_pool_name __args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:synapse:getSqlPoolVulnerabilityAssessmentRuleBaseline', __args__, opts=opts, typ=GetSqlPoolVulnerabilityAssessmentRuleBaselineResult).value return AwaitableGetSqlPoolVulnerabilityAssessmentRuleBaselineResult( baseline_results=pulumi.get(__ret__, 'baseline_results'), id=pulumi.get(__ret__, 'id'), METHOD_NAME=pulumi.get(__ret__, 'name'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_sql_pool_vulnerability_assessment_rule_baseline) def get_sql_pool_vulnerability_assessment_rule_baseline_output(baseline_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, rule_id: Optional[pulumi.Input[str]] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, vulnerability_assessment_name: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlPoolVulnerabilityAssessmentRuleBaselineResult]: """ Gets a SqlPool's vulnerability assessment rule baseline. Azure REST API version: 2021-06-01. :param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a Sql pool level rule and master for server level rule). :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str rule_id: The vulnerability assessment rule ID. :param str sql_pool_name: SQL pool name :param str vulnerability_assessment_name: The name of the vulnerability assessment. :param str workspace_name: The name of the workspace. """ ...
2,049
norm
# Test the Unicode versions of normal file functions # open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir import sys, os, unittest from unicodedata import normalize from test import test_support filenames = [ '1_abc', u'2_ascii', u'3_Gr\xfc\xdf-Gott', u'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2', u'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435', u'6_\u306b\u307d\u3093', u'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1', u'8_\u66e8\u66e9\u66eb', u'9_\u66e8\u05e9\u3093\u0434\u0393\xdf', # Specific code points: fn, NFC(fn) and NFKC(fn) all differents u'10_\u1fee\u1ffd', ] # Mac OS X decomposes Unicode names, using Normal Form D. # http://developer.apple.com/mac/library/qa/qa2001/qa1173.html # "However, most volume formats do not follow the exact specification for # these normal forms. For example, HFS Plus uses a variant of Normal Form D # in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through # U+2FAFF are not decomposed." if sys.platform != 'darwin': filenames.extend([ # Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents u'11_\u0385\u03d3\u03d4', u'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD(u'\u0385\u03d3\u03d4') u'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC(u'\u0385\u03d3\u03d4') u'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed', # Specific code points: fn, NFC(fn) and NFKC(fn) all differents u'15_\u1fee\u1ffd\ufad1', u'16_\u2000\u2000\u2000A', u'17_\u2001\u2001\u2001A', u'18_\u2003\u2003\u2003A', # == NFC(u'\u2001\u2001\u2001A') u'19_\u0020\u0020\u0020A', # u'\u0020' == u' ' == NFKC(u'\u2000') == # NFKC(u'\u2001') == NFKC(u'\u2003') ]) # Is it Unicode-friendly? if not os.path.supports_unicode_filenames: fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() try: for name in filenames: name.encode(fsencoding) except UnicodeEncodeError: raise unittest.SkipTest("only NT+ and systems with " "Unicode-friendly filesystem encoding") # Destroy directory dirname and all files under it, to one level. def deltree(dirname): # Don't hide legitimate errors: if one of these suckers exists, it's # an error if we can't remove it. if os.path.exists(dirname): # must pass unicode to os.listdir() so we get back unicode results. for fname in os.listdir(unicode(dirname)): os.unlink(os.path.join(dirname, fname)) os.rmdir(dirname) class UnicodeFileTests(unittest.TestCase): files = set(filenames) normal_form = None def setUp(self): try: os.mkdir(test_support.TESTFN) except OSError: pass files = set() for name in self.files: name = os.path.join(test_support.TESTFN, self.METHOD_NAME(name)) with open(name, 'w') as f: f.write((name+'\n').encode("utf-8")) os.stat(name) files.add(name) self.files = files def tearDown(self): deltree(test_support.TESTFN) def METHOD_NAME(self, s): if self.normal_form and isinstance(s, unicode): return normalize(self.normal_form, s) return s def _apply_failure(self, fn, filename, expected_exception, check_fn_in_exception = True): with self.assertRaises(expected_exception) as c: fn(filename) exc_filename = c.exception.filename # the "filename" exception attribute may be encoded if isinstance(exc_filename, str): filename = filename.encode(sys.getfilesystemencoding()) if check_fn_in_exception: self.assertEqual(exc_filename, filename, "Function '%s(%r) failed " "with bad filename in the exception: %r" % (fn.__name__, filename, exc_filename)) def test_failures(self): # Pass non-existing Unicode filenames all over the place. for name in self.files: name = "not_" + name self._apply_failure(open, name, IOError) self._apply_failure(os.stat, name, OSError) self._apply_failure(os.chdir, name, OSError) self._apply_failure(os.rmdir, name, OSError) self._apply_failure(os.remove, name, OSError) # listdir may append a wildcard to the filename, so dont check self._apply_failure(os.listdir, name, OSError, False) def test_open(self): for name in self.files: f = open(name, 'w') f.write((name+'\n').encode("utf-8")) f.close() os.stat(name) # Skip the test on darwin, because darwin does normalize the filename to # NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC, # NFKD in Python is useless, because darwin will normalize it later and so # open(), os.stat(), etc. don't raise any exception. @unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X') def test_normalize(self): files = set(f for f in self.files if isinstance(f, unicode)) others = set() for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']): others |= set(normalize(nf, file) for file in files) others -= files for name in others: self._apply_failure(open, name, IOError) self._apply_failure(os.stat, name, OSError) self._apply_failure(os.chdir, name, OSError) self._apply_failure(os.rmdir, name, OSError) self._apply_failure(os.remove, name, OSError) # listdir may append a wildcard to the filename, so dont check self._apply_failure(os.listdir, name, OSError, False) # Skip the test on darwin, because darwin uses a normalization different # than Python NFD normalization: filenames are different even if we use # Python NFD normalization. @unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X') def test_listdir(self): sf0 = set(self.files) f1 = os.listdir(test_support.TESTFN) f2 = os.listdir(unicode(test_support.TESTFN, sys.getfilesystemencoding())) sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2) self.assertEqual(sf0, sf2) self.assertEqual(len(f1), len(f2)) def test_rename(self): for name in self.files: os.rename(name, "tmp") os.rename("tmp", name) def test_directory(self): dirname = os.path.join(test_support.TESTFN, u'Gr\xfc\xdf-\u66e8\u66e9\u66eb') filename = u'\xdf-\u66e8\u66e9\u66eb' oldwd = os.getcwd() os.mkdir(dirname) os.chdir(dirname) try: with open(filename, 'w') as f: f.write((filename + '\n').encode("utf-8")) os.access(filename,os.R_OK) os.remove(filename) finally: os.chdir(oldwd) os.rmdir(dirname) class UnicodeNFCFileTests(UnicodeFileTests): normal_form = 'NFC' class UnicodeNFDFileTests(UnicodeFileTests): normal_form = 'NFD' class UnicodeNFKCFileTests(UnicodeFileTests): normal_form = 'NFKC' class UnicodeNFKDFileTests(UnicodeFileTests): normal_form = 'NFKD' def test_main(): try: test_support.run_unittest( UnicodeFileTests, UnicodeNFCFileTests, UnicodeNFDFileTests, UnicodeNFKCFileTests, UnicodeNFKDFileTests, ) finally: deltree(test_support.TESTFN) if __name__ == "__main__": test_main()
2,050
patch internal
# engine params from typing import Any, Callable, Dict, Optional, Sequence, Union, cast from .argument_config import get_argument_config_value from .config_file_config import get_config_dict_from_config_file from .default_config import get_default_config_value from .environment_config import get_environment_config_value from .keys import ALL_KEYS, KEYS, ConfigDict from .system_config import get_system_config_value def chain_getters( getters: Sequence[Callable[[str], Optional[str]]], key: str, default_return: Optional[str] = None, ) -> Optional[str]: for getter in getters: result = getter(key) if result is not None: return result return default_return def lazy_get_config_value( key: str, default_return: Optional[str] = None ) -> Optional[Union[str, Dict[str, Dict[str, str]]]]: """ Get the config value for a key in the following precedence Otherwise return default_return """ if key not in ALL_KEYS: # For sections which can't be overridden via envvars/arguments, # we only use default values return chain_getters([get_default_config_value], key, default_return) return chain_getters( [ get_argument_config_value, get_environment_config_value, get_system_config_value, get_default_config_value, ], key, default_return, ) def update_config_dict_from_arguments(config_dict: ConfigDict) -> ConfigDict: """ Given an existing config_dict, update after reading sys.argv and overwriting any keys. Return updated copy of config_dict. """ argument_config_dict = { k: get_argument_config_value(k, None) for k in KEYS if get_argument_config_value(k) is not None } new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict)) return new_config_dict def update_config_dict_from_env_vars(config_dict: ConfigDict) -> ConfigDict: """ Given an existing config_dict, update after reading os.environ and overwriting any keys. Return updated copy of config_dict. """ argument_config_dict = { k: get_environment_config_value(k, None) for k in KEYS if get_environment_config_value(k) is not None } new_config_dict = patch_config(config_dict, cast(ConfigDict, argument_config_dict)) return new_config_dict def update_config_dict_from_file(config_dict: ConfigDict, sg_config_file: str) -> ConfigDict: """ Given an existing config_dict, update after reading sg_config_file and overwriting any keys according to the rules in config_file_config Return updated copy of config_dict. """ config_file_dict = get_config_dict_from_config_file(sg_config_file) new_config_dict = patch_config(config_dict, config_file_dict) return new_config_dict def create_config_dict() -> ConfigDict: """ Create and return a dict of all known config values """ initial_dict = {k: lazy_get_config_value(k) for k in ALL_KEYS} config_dict = cast(ConfigDict, {k: v for k, v in initial_dict.items() if v is not None}) try: sg_config_file = get_singleton(config_dict, "SG_CONFIG_FILE") config_dict = update_config_dict_from_file(config_dict, sg_config_file) except KeyError: pass config_dict = update_config_dict_from_env_vars(config_dict) config_dict = update_config_dict_from_arguments(config_dict) return config_dict def patch_config(config: ConfigDict, patch: ConfigDict) -> ConfigDict: """ Recursively updates a nested configuration dictionary: patch_config( {"key_1": "value_1", "dict_1": {"key_1": "value_1"}}, {"key_1": "value_2", "dict_1": {"key_2": "value_2"}}) == \ {"key_1": "value_2", "dict_1": {"key_1": "value_1", "key_2": "value_2"}} :param config: Config dictionary :param patch: Dictionary with the path :return: New patched dictionary """ def METHOD_NAME(left: Dict[str, Any], right: Dict[str, Any]) -> Dict[str, Any]: result = left.copy() for key, value in right.items(): if key in left and isinstance(left[key], dict) and isinstance(value, dict): result[key] = METHOD_NAME(left[key], value) else: result[key] = value return result return METHOD_NAME(config, patch) def get_singleton(config: ConfigDict, item: str) -> str: """Return a singleton (not a section) variable from the config.""" return str(config[item]) def get_all_in_section(config: ConfigDict, section: str) -> Dict[str, Union[str, Dict[str, str]]]: """ Get all subsections from a config (e.g. config["data_sources"]) """ result: Dict[str, Union[str, Dict[str, str]]] = cast( Dict[str, Union[str, Dict[str, str]]], config.get(section, {}) ) assert isinstance(result, dict) return result def get_all_in_subsection(config: ConfigDict, section: str, subsection: str) -> Dict[str, str]: section_dict = get_all_in_section(config, section) subsection_dict: Dict[str, str] = cast(Dict[str, str], section_dict.get(subsection, {})) assert isinstance(subsection_dict, dict) return subsection_dict def get_from_subsection(config: ConfigDict, section: str, subsection: str, item: str) -> str: """Return a singleton variable from a subsection of the config, e.g. config["remotes"]["data.splitgraph.com"]["SG_ENGINE_HOST"]""" subsection_dict = get_all_in_subsection(config, section, subsection) return subsection_dict[item] def get_from_section(config: ConfigDict, section: str, item: str) -> str: section_dict = get_all_in_section(config, section) assert isinstance(section_dict, dict) return cast(str, section_dict[item]) def set_in_subsection( config: ConfigDict, section: str, subsection: str, item: str, value: str ) -> None: """Set a singleton variable in a subsection of the config, e.g. config["remotes"]["data.splitgraph.com"]["SG_ENGINE_HOST"]""" subsection_dict = get_all_in_subsection(config, section, subsection) subsection_dict[item] = value
2,051
test resampling to numpy img 1
import numpy as np import unittest from grass.gunittest.case import TestCase from grass.gunittest.main import test from grass.pygrass.raster import raster2numpy_img from grass.pygrass.gis.region import Region from grass.script.core import tempfile has_PyQt4 = False try: from PyQt4.QtCore import * from PyQt4.QtGui import * has_PyQt4 = True except: pass class RasterRowImgTestCase(TestCase): name = "RasterRowImgTestCase_map" @classmethod def setUpClass(cls): """Create test raster map and region""" cls.use_temp_region() cls.runModule("g.region", n=60, s=0, e=40, w=0, res=0.1) cls.runModule( "r.mapcalc", expression="%s = if(row() >= 10 && row() <= 60, null(), row() + (10.0 * col()))" % (cls.name), overwrite=True, ) cls.runModule("r.colors", map=cls.name, color="elevation") @classmethod def tearDownClass(cls): """Remove the generated vector map, if exist""" cls.runModule("g.remove", flags="f", type="raster", name=cls.name) cls.del_temp_region() @unittest.skipIf(has_PyQt4 is False, "Require PyQt4") def test_resampling_to_QImg_1(self): region = Region() region.from_rast(self.name) region.cols = 320 region.rows = 240 region.adjust() tmpfile = tempfile(False) tmpfile = tmpfile + ".png" a = raster2numpy_img(self.name, region) image = QImage(a.data, region.cols, region.rows, QImage.Format_ARGB32) # image.save("data/a.png") image.save(tmpfile) self.assertFilesEqualMd5(tmpfile, "data/a.png") @unittest.skipIf(has_PyQt4 is False, "Require PyQt4") def test_resampling_to_QImg_2(self): region = Region() region.from_rast(self.name) region.cols = 640 region.rows = 480 region.adjust() tmpfile = tempfile(False) tmpfile = tmpfile + ".png" # With array as argument array = np.ndarray((region.rows * region.cols * 4), np.uint8) raster2numpy_img(rastname=self.name, region=region, color="ARGB", array=array) image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32) # image.save("data/b.png") image.save(tmpfile) self.assertFilesEqualMd5(tmpfile, "data/b.png") @unittest.skipIf(has_PyQt4 is False, "Require PyQt4") def test_resampling_to_QImg_large(self): region = Region() region.from_rast(self.name) region.cols = 4000 region.rows = 3000 region.adjust() tmpfile = tempfile(False) tmpfile = tmpfile + ".png" # With array as argument array = np.ndarray((region.rows * region.cols * 4), np.uint8) raster2numpy_img(rastname=self.name, region=region, color="ARGB", array=array) image = QImage(array.data, region.cols, region.rows, QImage.Format_ARGB32) # image.save("data/c.png") image.save(tmpfile) self.assertFilesEqualMd5(tmpfile, "data/c.png") @unittest.skipIf(has_PyQt4 is False, "Require PyQt4") def test_resampling_to_QImg_3(self): region = Region() region.from_rast(self.name) region.cols = 400 region.rows = 300 region.adjust() tmpfile = tempfile(False) tmpfile = tmpfile + ".png" # With array as argument array = np.ndarray((region.rows * region.cols * 4), np.uint8) raster2numpy_img(rastname=self.name, region=region, color="RGB", array=array) image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32) # image.save("data/d.png") image.save(tmpfile) self.assertFilesEqualMd5(tmpfile, "data/d.png") @unittest.skipIf(has_PyQt4 is False, "Require PyQt4") def test_resampling_to_QImg_4(self): region = Region() region.from_rast(self.name) region.cols = 400 region.rows = 300 region.adjust() tmpfile = tempfile(False) tmpfile = tmpfile + ".png" array = raster2numpy_img(rastname=self.name, region=region, color="RGB") image = QImage(array.data, region.cols, region.rows, QImage.Format_RGB32) # image.save("data/e.png") image.save(tmpfile) self.assertFilesEqualMd5(tmpfile, "data/e.png") def METHOD_NAME(self): region = Region() region.ewres = 10 region.nsres = 10 region.adjust(rows=True, cols=True) a = raster2numpy_img(self.name, region) self.assertEqual(len(a), region.rows * region.cols * 4) def test_resampling_to_numpy_img_2(self): region = Region() region.ewres = 1 region.nsres = 1 region.adjust(rows=True, cols=True) a = raster2numpy_img(self.name, region) self.assertEqual(len(a), region.rows * region.cols * 4) def test_resampling_to_numpy_img_3(self): region = Region() region.ewres = 0.4 region.nsres = 0.4 region.adjust(rows=True, cols=True) a = raster2numpy_img(self.name, region, color="GRAY1") self.assertEqual(len(a), region.rows * region.cols * 1) def test_resampling_to_numpy_img_4(self): region = Region() region.ewres = 0.1 region.nsres = 0.1 region.adjust(rows=True, cols=True) a = raster2numpy_img(self.name, region, color="GRAY2") self.assertEqual(len(a), region.rows * region.cols * 1) if __name__ == "__main__": test()
2,052
on server init complete
# -*- coding: utf-8 -*- """ *==LICENSE==* CyanWorlds.com Engine - MMOG client, server and tools Copyright (C) 2011 Cyan Worlds, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Additional permissions under GNU GPL version 3 section 7 If you modify this Program, or any covered work, by linking or combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK, NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK (or a modified version of those libraries), containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA, PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of OpenSSL and IJG JPEG Library used as well as that of the covered work. You can contact Cyan Worlds, Inc. by email [email protected] or by snail mail at: Cyan Worlds, Inc. 14617 N Newport Hwy Mead, WA 99021 *==LICENSE==* """ """ Module: Neighborhood02.py Age: Neighborhood02 Date: January 2004 event manager hooks for Neighborhood02, aka Kirel """ from Plasma import * from PlasmaTypes import * from PlasmaKITypes import * import time class Neighborhood02(ptResponder): def __init__(self): ptResponder.__init__(self) self.id = 5700 self.version = 1 def OnFirstUpdate(self): pass def OnNotify(self,state,id,events): pass def METHOD_NAME(self): self.UpdateRecentVisitors() def UpdateRecentVisitors(self): try: AmCCR = ptCCRMgr().getLevel() except: AmCCR = 0 if not AmCCR: # add player to recent players list deviceNode = None deviceInbox = None playerlist = None # find the device avault = ptAgeVault() adevicesfolder = avault.getAgeDevicesFolder() adevices = adevicesfolder.getChildNodeRefList() for device in adevices: device = device.getChild() devicetn = device.upcastToTextNoteNode() if devicetn and devicetn.getTitle() == "D'ni Imager Right": deviceNode = devicetn break # if we have the device then find the inbox if deviceNode: inboxes = deviceNode.getChildNodeRefList() for inbox in inboxes: inbox = inbox.getChild() inboxfolder = inbox.upcastToFolderNode() if inboxfolder: deviceInbox = inboxfolder break # if we have the inbox then look for the heek score note if deviceInbox: items = deviceInbox.getChildNodeRefList() for item in items: item = item.getChild() itemtn = item.upcastToTextNoteNode() if itemtn: if itemtn.getTitle() == "Visitors, Visiteurs, Besucher": playerlist = itemtn break elif itemtn.getTitle() == "Most Recent Visitors": itemtn.setTitle("Visitors, Visiteurs, Besucher") playerlist = itemtn break # if we have the text note then update it, otherwise create it if playerlist: currenttime = time.gmtime(PtGetDniTime()) currenttimestr = time.strftime("%m/%d/%Y %I:%M %p", currenttime) playername = PtGetLocalPlayer().getPlayerName() thetext = playerlist.getText() if (thetext.count("\n") + 1) > 15: thetext = thetext[:thetext.rfind("\n")] thetext = currenttimestr + (" " * (30 - len(currenttimestr))) + playername + "\n" + thetext playerlist.setText(thetext) playerlist.save() else: currenttime = time.gmtime(PtGetDniTime()) currenttimestr = time.strftime("%m/%d/%Y %I:%M %p", currenttime) playername = PtGetLocalPlayer().getPlayerName() thetext = currenttimestr + (" " * (30 - len(currenttimestr))) + playername playerlist = ptVaultTextNoteNode(0) playerlist.setTitle("Visitors, Visiteurs, Besucher") playerlist.setText(thetext) deviceInbox.addNode(playerlist
2,053
add rule
import re from collections.abc import Mapping ELEMENT_SELECTOR = re.compile(r"^([\w-]+)$") ELEMENT_WITH_ATTR_SELECTOR = re.compile(r"^([\w-]+)\[([\w-]+)\]$") ELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR = re.compile( r"^([\w-]+)\[([\w-]+)='(.*)'\]$" ) ELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR = re.compile( r'^([\w-]+)\[([\w-]+)="(.*)"\]$' ) ELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR = re.compile( r"^([\w-]+)\[([\w-]+)=([\w-]+)\]$" ) class HTMLRuleset: """ Maintains a set of rules for matching HTML elements. Each rule defines a mapping from a CSS-like selector to an arbitrary result object. The following forms of rule are currently supported: 'a' = matches any <a> element 'a[href]' = matches any <a> element with an 'href' attribute 'a[linktype="page"]' = matches any <a> element with a 'linktype' attribute equal to 'page' """ def __init__(self, rules=None): # mapping of element name to a sorted list of (precedence, attr_check, result) tuples # where attr_check is a callable that takes an attr dict and returns True if they match self.element_rules = {} if rules: self.add_rules(rules) def add_rules(self, rules): # accepts either a dict of {selector: result}, or a list of (selector, result) tuples if isinstance(rules, Mapping): rules = rules.items() for selector, result in rules: self.METHOD_NAME(selector, result) def _add_element_rule(self, name, result): # add a rule that matches on any element with name `name` rules = self.element_rules.setdefault(name, []) # element-only rules have priority 2 (lower) rules.append((2, (lambda attrs: True), result)) # sort list on priority rules.sort(key=lambda t: t[0]) def _add_element_with_attr_rule(self, name, attr, result): # add a rule that matches any element with name `name` which has the attribute `attr` rules = self.element_rules.setdefault(name, []) # element-and-attr rules have priority 1 (higher) rules.append((1, (lambda attrs: attr in attrs), result)) # sort list on priority rules.sort(key=lambda t: t[0]) def _add_element_with_attr_exact_rule(self, name, attr, value, result): # add a rule that matches any element with name `name` which has an # attribute `attr` equal to `value` rules = self.element_rules.setdefault(name, []) # element-and-attr rules have priority 1 (higher) rules.append( (1, (lambda attrs: attr in attrs and attrs[attr] == value), result) ) # sort list on priority rules.sort(key=lambda t: t[0]) def METHOD_NAME(self, selector, result): match = ELEMENT_SELECTOR.match(selector) if match: name = match.group(1) self._add_element_rule(name, result) return match = ELEMENT_WITH_ATTR_SELECTOR.match(selector) if match: name, attr = match.groups() self._add_element_with_attr_rule(name, attr, result) return for regex in ( ELEMENT_WITH_ATTR_EXACT_SINGLE_QUOTE_SELECTOR, ELEMENT_WITH_ATTR_EXACT_DOUBLE_QUOTE_SELECTOR, ELEMENT_WITH_ATTR_EXACT_UNQUOTED_SELECTOR, ): match = regex.match(selector) if match: name, attr, value = match.groups() self._add_element_with_attr_exact_rule(name, attr, value, result) return def match(self, name, attrs): """ Look for a rule matching an HTML element with the given name and attribute dict, and return the corresponding result object. If no rule matches, return None. If multiple rules match, the one chosen is undetermined. """ try: rules_to_test = self.element_rules[name] except KeyError: return None for precedence, attr_check, result in rules_to_test: if attr_check(attrs): return result
2,054
test no owner
# redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from twisted.trial import unittest from buildbot.test.util.config import ConfigErrorsMixin from buildbot.www.authz import roles class RolesFromGroups(unittest.TestCase): def setUp(self): self.roles = roles.RolesFromGroups("buildbot-") def test_noGroups(self): ret = self.roles.getRolesFromUser({"username": 'homer'}) self.assertEqual(ret, []) def test_noBuildbotGroups(self): ret = self.roles.getRolesFromUser({ "username": "homer", "groups": ["employee"] }) self.assertEqual(ret, []) def test_someBuildbotGroups(self): ret = self.roles.getRolesFromUser({ "username": "homer", "groups": ["employee", "buildbot-maintainer", "buildbot-admin"] }) self.assertEqual(ret, ["maintainer", "admin"]) class RolesFromEmails(unittest.TestCase): def setUp(self): self.roles = roles.RolesFromEmails( employee=["[email protected]", "[email protected]"], boss=["[email protected]"]) def test_noUser(self): ret = self.roles.getRolesFromUser({ "username": 'lisa', "email": '[email protected]' }) self.assertEqual(ret, []) def test_User1(self): ret = self.roles.getRolesFromUser({ "username": 'homer', "email": '[email protected]' }) self.assertEqual(ret, ["employee"]) def test_User2(self): ret = self.roles.getRolesFromUser({ "username": 'burns', "email": '[email protected]' }) self.assertEqual(sorted(ret), ["boss", "employee"]) class RolesFromOwner(unittest.TestCase): def setUp(self): self.roles = roles.RolesFromOwner("ownerofbuild") def METHOD_NAME(self): ret = self.roles.getRolesFromUser({ "username": 'lisa', "email": '[email protected]' }, None) self.assertEqual(ret, []) def test_notOwner(self): ret = self.roles.getRolesFromUser({ "username": 'lisa', "email": '[email protected]' }, "[email protected]") self.assertEqual(ret, []) def test_owner(self): ret = self.roles.getRolesFromUser({ "username": 'homer', "email": '[email protected]' }, "[email protected]") self.assertEqual(ret, ["ownerofbuild"]) class RolesFromUsername(unittest.TestCase, ConfigErrorsMixin): def setUp(self): self.roles = roles.RolesFromUsername(roles=["admins"], usernames=["Admin"]) self.roles2 = roles.RolesFromUsername( roles=["developers", "integrators"], usernames=["Alice", "Bob"]) def test_anonymous(self): ret = self.roles.getRolesFromUser({"anonymous": True}) self.assertEqual(ret, []) def test_normalUser(self): ret = self.roles.getRolesFromUser({"username": 'Alice'}) self.assertEqual(ret, []) def test_admin(self): ret = self.roles.getRolesFromUser({"username": 'Admin'}) self.assertEqual(ret, ["admins"]) def test_multipleGroups(self): ret = self.roles2.getRolesFromUser({"username": 'Bob'}) self.assertEqual(ret, ["developers", "integrators"]) def test_badUsernames(self): with self.assertRaisesConfigError('Usernames cannot be None'): roles.RolesFromUsername(roles=[], usernames=[None])
2,055
add datum for arg
""" EndpointsHelper --------------- This is support for session endpoints, which are a flagged feature for mobile that also form the basis of smart links in web apps. Endpoints define specific locations in the application using a stack, so they rely on similar logic to end of form navigation. The complexity of generating endpoints is all delegated to ``WorkflowHelper``. """ from corehq.apps.app_manager.suite_xml.contributors import PostProcessor from corehq.apps.app_manager.suite_xml.post_process.workflow import ( CommandId, WorkflowDatumMeta, WorkflowHelper, prepend_parent_frame_children, ) from corehq.apps.app_manager.suite_xml.xml_models import ( Argument, PushFrame, SessionEndpoint, Stack, StackDatum, StackInstanceDatum, ) from corehq.util.timer import time_method class EndpointsHelper(PostProcessor): """ Generates "Session Endpoints" - user-defined labels for forms or modules. They end up as entries in the suite file that declare stack operations necessary to navigate to the form or module, as well as what arguments (eg: case IDs) must be provided to get there. """ @time_method() def update_suite(self): for module in self.modules: if module.session_endpoint_id: self.suite.endpoints.append(self._make_session_endpoint(module.session_endpoint_id, module)) if module.case_list_session_endpoint_id: self.suite.endpoints.append(self._make_session_endpoint( module.case_list_session_endpoint_id, module, None, False)) if module.module_type != "shadow": for form in module.get_suite_forms(): if form.session_endpoint_id: self.suite.endpoints.append(self._make_session_endpoint( form.session_endpoint_id, module, form)) elif module.session_endpoint_id: for form in module.get_suite_forms(): endpoint = next( (m for m in module.form_session_endpoints if m.form_id == form.unique_id), None) if endpoint: self.suite.endpoints.append(self._make_session_endpoint( endpoint.session_endpoint_id, module, form)) def _make_session_endpoint(self, endpoint_id, module, form=None, should_add_last_selection_datum=True): stack = Stack() children = self.get_frame_children(module, form) argument_ids = self.get_argument_ids(children, form, should_add_last_selection_datum) # Add a claim request for each endpoint argument. # This assumes that all arguments are case ids. non_computed_arguments = [ child for child in children if isinstance(child, WorkflowDatumMeta) and child.requires_selection and (should_add_last_selection_datum or child != children[-1]) ] for arg in non_computed_arguments: self._add_claim_frame(stack, arg, endpoint_id) # Add a frame to navigate to the endpoint frame = PushFrame() stack.add_frame(frame) for child in children: if isinstance(child, CommandId): frame.add_command(child.to_command()) elif child.id in argument_ids: self.METHOD_NAME(frame, child) def get_child(child_id): for child in children: if child.id == child_id: return child arguments = [] for arg_id in argument_ids: child = get_child(arg_id) if child.is_instance: arguments.append(Argument( id=arg_id, instance_id=arg_id, instance_src="jr://instance/selected-entities", )) else: arguments.append(Argument(id=arg_id)) return SessionEndpoint( id=endpoint_id, arguments=arguments, stack=stack, ) def get_argument_ids(self, frame_children, form=None, should_add_last_selection_datum=True): def should_include(child, add_selection_datum): if not isinstance(child, WorkflowDatumMeta): return False if child.requires_selection and add_selection_datum: return True if form: return child.id in (form.function_datum_endpoints or []) return False return [ child.id for child in frame_children if should_include(child, should_add_last_selection_datum or child != frame_children[-1]) ] def _add_claim_frame(self, stack, arg, endpoint_id): frame = PushFrame() stack.add_frame(frame) self.METHOD_NAME(frame, arg) frame.add_command(f"'claim_command.{endpoint_id}.{arg.id}'") def METHOD_NAME(self, frame, child): datum = StackInstanceDatum(id=child.id, value=f"${child.id}") if child.is_instance \ else StackDatum(id=child.id, value=f"${child.id}") frame.add_datum(datum) def get_frame_children(self, module, form): helper = WorkflowHelper(self.suite, self.app, self.app.get_modules()) frame_children = helper.get_frame_children(module, form) if module.root_module_id: frame_children = prepend_parent_frame_children(helper, frame_children, module.root_module) return frame_children
2,056
calc time
# -*- coding: utf-8 -*- """ Travel time calculations. """ from .helper_classes import TauModelError from .seismic_phase import SeismicPhase from .utils import parse_phase_list from . import _DEFAULT_VALUES class TauPTime(object): """ Calculate travel times for different branches using linear interpolation between known slowness samples. """ def __init__(self, model, phase_list, depth, degrees, receiver_depth=0.0, ray_param_tol=_DEFAULT_VALUES["default_time_ray_param_tol"] ): self.source_depth = depth self.receiver_depth = receiver_depth self.degrees = degrees self.arrivals = [] self.phases = [] # Names of phases to be used, e.g. PKIKP self.phase_names = parse_phase_list(phase_list) # A standard and a depth corrected model. Both are needed. self.model = model self.depth_corrected_model = self.model self.ray_param_tol = ray_param_tol def run(self): """ Do all the calculations and print the output if told to. The resulting arrival times will be in self.arrivals. """ self.depth_correct(self.source_depth, self.receiver_depth) self.calculate(self.degrees) def depth_correct(self, depth, receiver_depth=None): """ Corrects the TauModel for the given source depth (if not already corrected). """ if receiver_depth is None: receiver_depth = self.receiver_depth if self.depth_corrected_model is None or \ self.depth_corrected_model.source_depth != depth: self.depth_corrected_model = self.model.depth_correct(depth) self.arrivals = [] if receiver_depth != depth: # If already split on receiver depth this does nothing. self.depth_corrected_model = \ self.depth_corrected_model.split_branch(receiver_depth) self.arrivals = [] self.source_depth = depth self.receiver_depth = receiver_depth def recalc_phases(self): """ Recalculates the given phases using a possibly new or changed tau model. """ new_phases = [] for temp_phase_name in self.phase_names: for phase_num, seismic_phase in enumerate(self.phases): pass # if seismic_phase.name == temp_phase_name: # self.phases.pop(phase_num) # if (seismic_phase.source_depth == self.source_depth and # seismic_phase.tau_model == # self.depth_corrected_model): # # OK so copy to new_phases: # new_phases.append(seismic_phase) # break # Executed, if break is NOT called. else: # Didn't find it precomputed, so recalculate: try: seismic_phase = SeismicPhase(temp_phase_name, self.depth_corrected_model, self.receiver_depth) new_phases.append(seismic_phase) except TauModelError: print("Error with this phase, skipping it: " + str(temp_phase_name)) self.phases = new_phases def calculate(self, degrees): """ Calculate the arrival times. """ self.depth_correct(self.source_depth, self.receiver_depth) # Called before, but depth_correct might have changed the phases. self.recalc_phases() self.METHOD_NAME(degrees) def METHOD_NAME(self, degrees): """ Calls the calc_time method of SeismicPhase to calculate arrival times for every phase, each sorted by time. """ self.degrees = degrees self.arrivals = [] for phase in self.phases: self.arrivals += phase.METHOD_NAME(degrees, self.ray_param_tol) # Sort them. self.arrivals = sorted(self.arrivals, key=lambda arrivals: arrivals.time)
2,057
global var
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Visualize Relay IR in AST text-form.""" from collections import deque from typing import ( Dict, Union, Tuple, List, ) import tvm from tvm import relay from .interface import ( DefaultVizParser, Plotter, VizEdge, VizGraph, VizNode, VizParser, ) class TermVizParser(VizParser): """`TermVizParser` parse nodes and edges for `TermPlotter`.""" def __init__(self): self._default_parser = DefaultVizParser() def get_node_edges( self, node: relay.Expr, relay_param: Dict[str, tvm.runtime.NDArray], node_to_id: Dict[relay.Expr, str], ) -> Tuple[Union[VizNode, None], List[VizEdge]]: """Parse a node and edges from a relay.Expr.""" if isinstance(node, relay.Call): return self._call(node, node_to_id) if isinstance(node, relay.Let): return self._let(node, node_to_id) if isinstance(node, relay.GlobalVar): return self.METHOD_NAME(node, node_to_id) if isinstance(node, relay.If): return self._if(node, node_to_id) if isinstance(node, tvm.ir.Op): return self._op(node, node_to_id) if isinstance(node, relay.Function): return self._function(node, node_to_id) # Leverage logics from default parser. return self._default_parser.get_node_edges(node, relay_param, node_to_id) def _call(self, node, node_to_id): node_id = node_to_id[node] viz_node = VizNode(node_id, "Call", "") viz_edges = [VizEdge(node_to_id[node.op], node_id)] for arg in node.args: arg_id = node_to_id[arg] viz_edges.append(VizEdge(arg_id, node_id)) return viz_node, viz_edges def _let(self, node, node_to_id): node_id = node_to_id[node] viz_node = VizNode(node_id, "Let", "(var, val, body)") viz_edges = [ VizEdge(node_to_id[node.var], node_id), VizEdge(node_to_id[node.value], node_id), VizEdge(node_to_id[node.body], node_id), ] return viz_node, viz_edges def METHOD_NAME(self, node, node_to_id): node_id = node_to_id[node] viz_node = VizNode(node_id, "GlobalVar", node.name_hint) viz_edges = [] return viz_node, viz_edges def _if(self, node, node_to_id): node_id = node_to_id[node] viz_node = VizNode(node_id, "If", "(cond, true, false)") viz_edges = [ VizEdge(node_to_id[node.cond], node_id), VizEdge(node_to_id[node.true_branch], node_id), VizEdge(node_to_id[node.false_branch], node_id), ] return viz_node, viz_edges def _op(self, node, node_to_id): node_id = node_to_id[node] op_name = node.name viz_node = VizNode(node_id, op_name, "") viz_edges = [] return viz_node, viz_edges def _function(self, node, node_to_id): node_id = node_to_id[node] viz_node = VizNode(node_id, "Func", str(node.params)) viz_edges = [VizEdge(node_to_id[node.body], node_id)] return viz_node, viz_edges class TermNode: """TermNode is aimed to generate text more suitable for terminal visualization.""" def __init__(self, viz_node: VizNode): self.type = viz_node.type_name # We don't want too many lines in a terminal. self.other_info = viz_node.detail.replace("\n", ", ") class TermGraph(VizGraph): """Terminal graph for a relay IR Module Parameters ---------- name: str name of this graph. """ def __init__(self, name: str): self._name = name # A graph in adjacency list form. # The key is source node, and the value is a list of destination nodes. self._graph = {} # a hash table for quick searching. self._id_to_term_node = {} # node_id in reversed post order # That mean, root is the first node. self._node_id_rpo = deque() def node(self, viz_node: VizNode) -> None: """Add a node to the underlying graph. Nodes in a Relay IR Module are expected to be added in the post-order. Parameters ---------- viz_node : VizNode A `VizNode` instance. """ self._node_id_rpo.appendleft(viz_node.identity) if viz_node.identity not in self._graph: # Add the node into the graph. self._graph[viz_node.identity] = [] # Create TermNode from VizNode node = TermNode(viz_node) self._id_to_term_node[viz_node.identity] = node def edge(self, viz_edge: VizEdge) -> None: """Add an edge to the terminal graph. Parameters ---------- viz_edge : VizEdge A `VizEdge` instance. """ # Take CallNode as an example, instead of "arguments point to CallNode", # we want "CallNode points to arguments" in ast-dump form. # # The direction of edge is typically controlled by the implemented VizParser. # Reverse start/end here simply because we leverage default parser implementation. if viz_edge.end in self._graph: self._graph[viz_edge.end].append(viz_edge.start) else: self._graph[viz_edge.end] = [viz_edge.start] def render(self) -> str: """Draw a terminal graph Returns ------- rv1: str text representing a graph. """ lines = [] seen_node = set() def gen_line(indent, n_id): if (indent, n_id) in seen_node: return seen_node.add((indent, n_id)) conn_symbol = ["|--", "`--"] last = len(self._graph[n_id]) - 1 for i, next_n_id in enumerate(self._graph[n_id]): node = self._id_to_term_node[next_n_id] lines.append( f"{indent}{conn_symbol[1 if i==last else 0]}{node.type} {node.other_info}" ) next_indent = indent # increase indent for the next level. next_indent += " " if (i == last) else "| " gen_line(next_indent, next_n_id) first_node_id = self._node_id_rpo[0] first_node = self._id_to_term_node[first_node_id] lines.append(f"@{self._name}({first_node.other_info})") gen_line("", first_node_id) return "\n".join(lines) class TermPlotter(Plotter): """Terminal plotter""" def __init__(self): self._name_to_graph = {} def create_graph(self, name): self._name_to_graph[name] = TermGraph(name) return self._name_to_graph[name] def render(self, filename): """If filename is None, print to stdio. Otherwise, write to the filename.""" lines = [] for name in self._name_to_graph: text_graph = self._name_to_graph[name].render() lines.append(text_graph) if filename is None: print("\n".join(lines)) else: with open(filename, "w") as out_file: out_file.write("\n".join(lines))
2,058
test delta reached pending
import itertools from datetime import timedelta import pytest from django.utils import timezone from freezegun import freeze_time from sentry.models import Group, GroupSnooze from sentry.testutils.cases import PerformanceIssueTestCase, SnubaTestCase, TestCase from sentry.testutils.helpers.datetime import before_now, iso_format from sentry.testutils.performance_issues.store_transaction import PerfIssueTransactionTestMixin from sentry.testutils.silo import region_silo_test from sentry.utils.samples import load_data from tests.sentry.issues.test_utils import SearchIssueTestMixin @region_silo_test(stable=True) class GroupSnoozeTest( TestCase, SnubaTestCase, PerfIssueTransactionTestMixin, SearchIssueTestMixin, PerformanceIssueTestCase, ): sequence = itertools.count() # generates unique values, class scope doesn't matter def setUp(self): super().setUp() self.project = self.create_project() self.group.times_seen_pending = 0 def test_until_not_reached(self): snooze = GroupSnooze.objects.create( group=self.group, until=timezone.now() + timedelta(days=1) ) assert snooze.is_valid() def test_until_reached(self): snooze = GroupSnooze.objects.create( group=self.group, until=timezone.now() - timedelta(days=1) ) assert not snooze.is_valid() def test_mismatched_group(self): snooze = GroupSnooze.objects.create(group=self.group) with pytest.raises(ValueError): snooze.is_valid(self.create_group()) def test_delta_not_reached(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0}) assert snooze.is_valid() def test_delta_reached(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0}) self.group.update(times_seen=100) assert not snooze.is_valid() def METHOD_NAME(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, state={"times_seen": 0}) self.group.update(times_seen=90) assert snooze.is_valid(use_pending_data=True) self.group.times_seen_pending = 10 assert not snooze.is_valid(use_pending_data=True) def test_user_delta_not_reached(self): snooze = GroupSnooze.objects.create( group=self.group, user_count=100, state={"users_seen": 0} ) assert snooze.is_valid(test_rates=True) @freeze_time() def test_user_delta_reached(self): for i in range(0, 100): self.store_event( data={ "user": {"id": i}, "timestamp": iso_format(before_now(seconds=1)), "fingerprint": ["group1"], }, project_id=self.project.id, ) group = list(Group.objects.all())[-1] snooze = GroupSnooze.objects.create(group=group, user_count=100, state={"users_seen": 0}) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_user_rate_reached(self): """Test that ignoring an error issue until it's hit by 10 users in an hour works.""" for i in range(5): group = self.store_event( data={ "fingerprint": ["group1"], "timestamp": iso_format(before_now(minutes=5 + i)), "tags": {"sentry:user": i}, }, project_id=self.project.id, ).group snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_user_rate_reached_perf_issues(self): """Test that ignoring a performance issue until it's hit by 10 users in an hour works.""" for i in range(0, 10): event_data = load_data( "transaction-n-plus-one", timestamp=before_now(minutes=10), ) event_data["user"]["id"] = str(i) event = self.create_performance_issue(event_data=event_data) perf_group = event.group snooze = GroupSnooze.objects.create(group=perf_group, user_count=10, user_window=60) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_user_rate_not_reached(self): snooze = GroupSnooze.objects.create(group=self.group, user_count=100, user_window=60) assert snooze.is_valid(test_rates=True) @freeze_time() def test_user_rate_without_test(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60) assert snooze.is_valid(test_rates=False) @freeze_time() def test_rate_not_reached(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60) assert snooze.is_valid(test_rates=True) @freeze_time() def test_rate_reached(self): """Test when an error issue is ignored until it happens 5 times in a day""" for i in range(5): group = self.store_event( data={ "fingerprint": ["group1"], "timestamp": iso_format(before_now(minutes=5 + i)), }, project_id=self.project.id, ).group snooze = GroupSnooze.objects.create(group=group, count=5, window=24 * 60) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_rate_reached_perf_issue(self): """Test when a performance issue is ignored until it happens 10 times in a day""" for i in range(0, 10): event = self.create_performance_issue() snooze = GroupSnooze.objects.create(group=event.group, count=10, window=24 * 60) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_rate_without_test(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60) assert snooze.is_valid(test_rates=False) @freeze_time() def test_user_rate_reached_generic_issues(self): """Test that ignoring a generic issue until it's hit by 10 users in an hour works.""" for i in range(0, 10): event, occurrence, group_info = self.store_search_issue( project_id=self.project.id, user_id=i, fingerprints=["test_user_rate_reached_generic_issues-group"], environment=None, ) assert group_info is not None generic_group = group_info.group assert generic_group is not None snooze = GroupSnooze.objects.create(group=generic_group, user_count=10, user_window=60) assert not snooze.is_valid(test_rates=True) @freeze_time() def test_rate_reached_generic_issue(self): """Test when a generic issue is ignored until it happens 10 times in a day""" for i in range(0, 10): event, occurrence, group_info = self.store_search_issue( project_id=self.project.id, user_id=3, # pin the user_id here to verify the number of events impacts the snooze fingerprints=["test_rate_reached_generic_issue-group"], environment=None, ) assert group_info is not None generic_group = group_info.group assert generic_group is not None snooze = GroupSnooze.objects.create(group=generic_group, count=10, window=24 * 60) assert not snooze.is_valid(test_rates=True)
2,059
validate tar archive
import errno import json import os import shutil import subprocess import requests DEFAULT_MODEL_PATH = "model_archiver/tests/integ_tests/resources/regular_model" DEFAULT_HANDLER = "service:handle" DEFAULT_RUNTIME = "python" DEFAULT_MODEL_NAME = "model" DEFAULT_EXPORT_PATH = "/tmp/model" MANIFEST_FILE = "MAR-INF/MANIFEST.json" def update_tests(test): test["modelName"] = test.get("modelName", DEFAULT_MODEL_NAME) test["modelPath"] = test.get("modelPath", DEFAULT_MODEL_PATH) test["handler"] = test.get("handler", DEFAULT_HANDLER) test["runtime"] = test.get("runtime", DEFAULT_RUNTIME) test["exportPath"] = test.get("exportPath", DEFAULT_EXPORT_PATH) test["archiveFormat"] = test.get("archiveFormat", "default") return test def create_file_path(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def delete_file_path(path): try: if os.path.isfile(path): os.remove(path) if os.path.isdir(path): shutil.rmtree(path) except OSError: pass def run_test(test, cmd): it = test.get("iterations") if test.get("iterations") is not None else 1 for i in range(it): try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as exc: if test.get("expectError") is not True: assert 0, "{}".format(exc.output) else: return 0 return 1 def validate_archive_exists(test): fmt = test.get("archiveFormat") if fmt == "tgz": assert os.path.isfile(os.path.join(test.get("exportPath"), test.get("modelName")+".tar.gz")) elif fmt == "no-archive": assert os.path.isdir(os.path.join(test.get("exportPath"), test.get("modelName"))) else: assert os.path.isfile(os.path.join(test.get("exportPath"), test.get("modelName")+".mar")) def validate_manifest_file(manifest, test): """ Validate the MANIFEST file :param manifest: :param test: :return: """ assert manifest.get("runtime") == test.get("runtime") assert manifest.get("model").get("modelName") == test.get("modelName") assert manifest.get("model").get("handler") == test.get("handler") def validate_files(file_list, prefix, regular): assert os.path.join(prefix, MANIFEST_FILE) in file_list assert os.path.join(prefix, "service.py") in file_list if regular: assert os.path.join(prefix, "dummy-artifacts.txt") in file_list assert os.path.join(prefix, "dir/1.py") in file_list else: assert os.path.join(prefix, "model.onnx") in file_list def METHOD_NAME(test_cfg): import tarfile file_name = os.path.join(test_cfg.get("exportPath"), test_cfg.get("modelName") + ".tar.gz") f = tarfile.open(file_name, "r:gz") manifest = json.loads(f.extractfile(os.path.join(test_cfg.get("modelName"), MANIFEST_FILE)).read()) validate_manifest_file(manifest, test_cfg) validate_files(f.getnames(), test_cfg.get("modelName"), "regular_model" in test_cfg.get("modelPath")) def validate_noarchive_archive(test): file_name = os.path.join(test.get("exportPath"), test.get("modelName"), MANIFEST_FILE) manifest = json.loads(open(file_name).read()) validate_manifest_file(manifest, test) def validate_mar_archive(test): import zipfile file_name = os.path.join(test.get("exportPath"), test.get("modelName") + ".mar") zf = zipfile.ZipFile(file_name, "r") manifest = json.loads(zf.open(MANIFEST_FILE).read()) validate_manifest_file(manifest, test) def validate_archive_content(test): fmt = test.get("archiveFormat") if fmt == "tgz": METHOD_NAME(test) if fmt == "no-archive": validate_noarchive_archive(test) if fmt == "default": validate_mar_archive(test) def validate(test): validate_archive_exists(test) validate_archive_content(test) def test_model_archiver(): f = open("model_archiver/tests/integ_tests/configuration.json", "r") tests = json.loads(f.read()) for t in tests: try: delete_file_path(t.get("exportPath")) create_file_path(t.get("exportPath")) t = update_tests(t) cmd = "model-archiver " \ "--model-name {} " \ "--model-path {} " \ "--handler {} " \ "--runtime {} " \ "--export-path {} " \ "--archive-format {}".format(t.get("modelName"), t.get("modelPath"), t.get("handler"), t.get("runtime"), t.get("exportPath"), t.get("archiveFormat")) if t.get("force"): cmd += " -f" # TODO: Add tests to check for "convert" functionality if run_test(t, cmd): validate(t) finally: delete_file_path(t.get("exportPath")) if __name__ == "__main__": test_model_archiver()
2,060
get enabled
""" Service support for Debian systems (uses update-rc.d and /sbin/service) .. important:: If you feel that Salt should be using this module to manage services on a minion, and it is using a different module (or gives an error similar to *'service.start' is not available*), see :ref:`here <module-provider-override>`. """ import fnmatch import glob import logging import os import re import shlex import salt.utils.systemd __func_alias__ = {"reload_": "reload"} # Define the module's virtual name __virtualname__ = "service" log = logging.getLogger(__name__) def __virtual__(): """ Only work on Debian and when systemd isn't running """ if __grains__["os"] in ( "Debian", "Raspbian", "Devuan", "NILinuxRT", ) and not salt.utils.systemd.booted(__context__): return __virtualname__ else: return ( False, "The debian_service module could not be loaded: " "unsupported OS family and/or systemd running.", ) def _service_cmd(*args): return "service {} {}".format(args[0], " ".join(args[1:])) def _get_runlevel(): """ returns the current runlevel """ out = __salt__["cmd.run"]("runlevel") # unknown can be returned while inside a container environment, since # this is due to a lack of init, it should be safe to assume runlevel # 2, which is Debian's default. If not, all service related states # will throw an out of range exception here which will cause # other functions to fail. if "unknown" in out: return "2" else: return out.split()[1] def METHOD_NAME(): """ Return a list of service that are enabled on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled """ prefix = "/etc/rc[S{}].d/S".format(_get_runlevel()) ret = set() for line in [x.rsplit(os.sep, 1)[-1] for x in glob.glob("{}*".format(prefix))]: ret.add(re.split(r"\d+", line)[-1]) return sorted(ret) def get_disabled(): """ Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled """ return sorted(set(get_all()) - set(METHOD_NAME())) def available(name): """ Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd """ return name in get_all() def missing(name): """ The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd """ return name not in get_all() def get_all(): """ Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all """ ret = set() lines = glob.glob("/etc/init.d/*") for line in lines: service = line.split("/etc/init.d/")[1] # Remove README. If it's an enabled service, it will be added back in. if service != "README": ret.add(service) return sorted(ret | set(METHOD_NAME())) def start(name): """ Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> """ cmd = _service_cmd(name, "start") return not __salt__["cmd.retcode"](cmd) def stop(name): """ Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ cmd = _service_cmd(name, "stop") return not __salt__["cmd.retcode"](cmd) def restart(name): """ Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ cmd = _service_cmd(name, "restart") return not __salt__["cmd.retcode"](cmd) def reload_(name): """ Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> """ cmd = _service_cmd(name, "reload") return not __salt__["cmd.retcode"](cmd) def force_reload(name): """ Force-reload the named service CLI Example: .. code-block:: bash salt '*' service.force_reload <service name> """ cmd = _service_cmd(name, "force-reload") return not __salt__["cmd.retcode"](cmd) def status(name, sig=None): """ Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Signature to use to find the service via ps Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> [service signature] """ if sig: return bool(__salt__["status.pid"](sig)) contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = _service_cmd(service, "status") results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True) if contains_globbing: return results return results[name] def enable(name, **kwargs): """ Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name> """ cmd = "insserv {0} && update-rc.d {0} enable".format(shlex.quote(name)) return not __salt__["cmd.retcode"](cmd, python_shell=True) def disable(name, **kwargs): """ Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> """ cmd = "update-rc.d {} disable".format(name) return not __salt__["cmd.retcode"](cmd) def enabled(name, **kwargs): """ Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.enabled <service name> """ return name in METHOD_NAME() def disabled(name): """ Return True if the named service is disabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> """ return name in get_disabled()
2,061
test resort solo groups
import unittest from drake.tools.lint.formatter import FormatterBase, IncludeFormatter class TestFormatterBase(unittest.TestCase): def test_essentials(self): original_lines = [ '// Line 1\n', '/* Line 2 */\n', '\n', ] dut = FormatterBase('filename.cc', readlines=original_lines) # Everything starts out unchanged. self.assertTrue(dut.is_same_as_original()) self.assertTrue(dut.is_permutation_of_original()) self.assertEqual(dut.get_all_lines(), original_lines) self.assertTrue(dut.get_first_differing_original_index() is None) # Basic getters. self.assertEqual(dut.get_num_lines(), 3) self.assertTrue(dut.is_blank_line(2)) self.assertEqual(dut.get_line(0), '// Line 1\n') # Reverse it and end up with a permutation. dut.set_all_lines(reversed(dut.get_all_lines())) self.assertFalse(dut.is_same_as_original()) self.assertTrue(dut.is_permutation_of_original()) self.assertEqual(dut.get_first_differing_original_index(), 0) # Rebuild it using insertion and removal. dut.set_all_lines(['\n'] * 3) dut.set_line(0, '/* Line 2 */\n') dut.insert_lines(0, ['AAA\n', '// Line 1\n']) dut.remove_all([0, 3]) self.assertEqual(dut.get_all_lines(), original_lines) def test_format_ranges(self): original_lines = [ '#include "line0"\n', '// clang-format off\n', '#include "line2"\n', '// clang-format on\n', '#include "line4"\n', '#include "line5"\n', '/* clang-format off */\n', '#include "line7"\n', '#include "line8"\n', '/* clang-format on */\n', '#include "line10"\n', ] dut = FormatterBase("filename.cc", readlines=original_lines) self.assertEqual( dut.get_format_ranges(), [[0], [4, 5], [10]]) self.assertEqual( dut.get_non_format_ranges(), [[1, 2, 3], [6, 7, 8, 9]]) def test_dos(self): original_lines = [ '#include "line0"\r\n', ] with self.assertRaisesRegex(Exception, "DOS newline"): FormatterBase("filename.cc", readlines=original_lines) def test_missing_eof(self): original_lines = [ '#include "line0"', ] with self.assertRaisesRegex(Exception, "newline.*end of file"): FormatterBase("filename.cc", readlines=original_lines) class TestIncludeFormatter(unittest.TestCase): def _split(self, triple_quoted_file_contents): lines = triple_quoted_file_contents.split("\n") assert len(lines) >= 2 assert lines[0] == "" # Detritus from first triple quote. assert lines[-1] == "" # Detritus from last triple quote. del lines[0] del lines[-1] return [line + "\n" for line in lines] def _check(self, basename, original, expected, first_differing): original_lines = self._split(original) expected_lines = self._split(expected) dut = IncludeFormatter( "drake/dummy/" + basename, readlines=original_lines) dut.format_includes() self.assertEqual(dut.get_all_lines(), expected_lines) self.assertEqual(dut.get_first_differing_original_index(), first_differing) def test_basic(self): # A pile of headers gets sorted per cppguide: # - The related header # - C system files # - C++ system files # - Other libraries' .h files # - Your project's .h files original = """ #include "drake/common/drake_assert.h" #include "drake/dummy/bar.h" #include "drake/dummy/dut.h" #include <gtest/gtest.h> #include <Eigen/Dense> #include <algorithm> #include <poll.h> #include <sys/wait.h> #include <vector> """ expected = """ #include "drake/dummy/dut.h" #include <poll.h> #include <sys/wait.h> #include <algorithm> #include <vector> #include <Eigen/Dense> #include <gtest/gtest.h> #include "drake/common/drake_assert.h" #include "drake/dummy/bar.h" """ self._check("dut.cc", original, expected, 0) def test_nothing(self): # A file with _no_ include statements. original = """ namespace { } """ self._check("dut.cc", original, original, None) def test_regroup(self): # Wrongly grouped whitespace. original = """ #include "drake/dummy/dut.h" #include <Eigen/Dense> #include <algorithm> #include <vector> #include "drake/common/drake_assert.h" #include "drake/dummy/bar.h" #include <gtest/gtest.h> """ expected = """ #include "drake/dummy/dut.h" #include <algorithm> #include <vector> #include <Eigen/Dense> #include <gtest/gtest.h> #include "drake/common/drake_assert.h" #include "drake/dummy/bar.h" """ self._check("dut.cc", original, expected, 2) def test_format_off(self): # "clang-format off". original = """ #include "drake/dummy/dut.h" // clang-format off #ifdef FOO #include <algorithm> #include <vector> #else #include <vector> #include <algorithm> #endif // clang-format on #include "drake/common/drake_assert.h" """ self._check("dut.cc", original, original, None) def test_target_is_header(self): # A header file. original = """ #include "drake/common/drake_assert.h" #include <algorithm> namespace { } """ expected = """ #include <algorithm> #include "drake/common/drake_assert.h" namespace { } """ self._check("dut.h", original, expected, 0) def test_associated_comment(self): # A comment prior to a line. original = """ #include "drake/dummy/dut.h" // Some comment describing the next line. #include <vector> namespace { } """ self._check("dut.cc", original, original, None) def test_file_opening_comment(self): # A comment atop the file with no blank line. original = """ /// @file dut.cc /// Mumble mumble /// #include <string> #include <vector> """ self._check("dut.cc", original, original, None) def test_internal_related_header(self): # Two related headers, guarded by "clang-format off". original = """ /* clang-format off (with explanatory comment) */ #include "drake/dummy/dut.h" #include "drake/dummy/dut_internal.h" /* clang-format on (with explanatory comment) */ #include <vector> #include <string> #include "drake/dummy/drake_assert.h" #include "drake/dummy/drake_deprecated.h" """ expected = """ /* clang-format off (with explanatory comment) */ #include "drake/dummy/dut.h" #include "drake/dummy/dut_internal.h" /* clang-format on (with explanatory comment) */ #include <string> #include <vector> #include "drake/dummy/drake_assert.h" #include "drake/dummy/drake_deprecated.h" """ self._check("dut.cc", original, expected, 5) def METHOD_NAME(self): # Groups of one, but sorted incorrectly. original = """ #include "drake/dummy/dut.h" #include "drake/common/drake_assert.h" #include <vector> """ expected = """ #include "drake/dummy/dut.h" #include <vector> #include "drake/common/drake_assert.h" """ self._check("dut.cc", original, expected, 2) def test_nontrivial_reformatting(self): # If clang-format changes any lines, we want to fail-fast. # (Note the two spaces between #include and the double quote.) original_lines = ['#include "nontrivial.h"\n'] dut = IncludeFormatter("nontrivial.cc", readlines=original_lines) dut.format_includes() with self.assertRaisesRegex(Exception, 'not just a shuffle'): dut.rewrite_file()
2,062
get logger
"""Ray Module.""" import logging import os from functools import wraps from typing import TYPE_CHECKING, Any, Callable, List, Optional, TypeVar, Union from awswrangler._config import apply_configs from awswrangler._distributed import EngineEnum, engine if engine.get() == EngineEnum.RAY or TYPE_CHECKING: import ray _logger: logging.Logger = logging.getLogger(__name__) FunctionType = TypeVar("FunctionType", bound=Callable[..., Any]) class RayLogger: """Create discrete Logger instance for Ray Tasks.""" def __init__( self, logging_level: int = logging.INFO, format: str = "%(asctime)s::%(levelname)-2s::%(name)s::%(message)s", # pylint: disable=redefined-builtin datefmt: str = "%Y-%m-%d %H:%M:%S", ): logging.basicConfig(level=logging_level, format=format, datefmt=datefmt) def METHOD_NAME(self, name: Union[str, Any] = None) -> Optional[logging.Logger]: """Return logger object.""" return logging.getLogger(name) @apply_configs def ray_logger( function: FunctionType, configure_logging: bool = True, logging_level: int = logging.INFO, ) -> FunctionType: """ Decorate callable to add RayLogger. Parameters ---------- function : Callable[..., Any] Callable as input to decorator. Returns ------- Callable[..., Any] """ @wraps(function) def wrapper(*args: Any, **kwargs: Any) -> Any: if configure_logging: RayLogger(logging_level=logging_level).METHOD_NAME(name=function.__name__) return function(*args, **kwargs) return wrapper def ray_remote(**options: Any) -> Callable[[FunctionType], FunctionType]: """ Decorate with @ray.remote providing .options(). Parameters ---------- options : Any Ray remote options Returns ------- Callable[..., Any] """ def remote_decorator(function: FunctionType) -> FunctionType: """ Decorate callable to wrap within ray.remote. Parameters ---------- function : Callable[..., Any] Callable as input to ray.remote. Returns ------- Callable[..., Any] """ # Access the source function if it exists function = getattr(function, "_source_func", function) @wraps(function) def wrapper(*args: Any, **kwargs: Any) -> Any: remote_fn = ray.remote(ray_logger(function)) if options: remote_fn = remote_fn.options(**options) return remote_fn.remote(*args, **kwargs) return wrapper return remote_decorator def ray_get(futures: Union["ray.ObjectRef[Any]", List["ray.ObjectRef[Any]"]]) -> Any: """ Run ray.get on futures if distributed. Parameters ---------- futures : List[Any] List of Ray futures Returns ------- List[Any] """ if engine.get() == EngineEnum.RAY: return ray.get(futures) # type: ignore[attr-defined] return futures @apply_configs def initialize_ray( address: Optional[str] = None, redis_password: Optional[str] = None, ignore_reinit_error: bool = True, include_dashboard: Optional[bool] = False, configure_logging: bool = True, log_to_driver: bool = False, logging_level: int = logging.INFO, object_store_memory: Optional[int] = None, cpu_count: Optional[int] = None, gpu_count: Optional[int] = None, ) -> None: """ Connect to an existing Ray cluster or start one and connect to it. Parameters ---------- address : Optional[str] Address of the Ray cluster to connect to, by default None redis_password : Optional[str] Password to the Redis cluster, by default None ignore_reinit_error : bool If true, Ray suppress errors from calling ray.init() twice, by default True include_dashboard : Optional[bool] Boolean flag indicating whether or not to start the Ray dashboard, by default False configure_logging : Optional[bool] Boolean flag indicating whether or not to enable logging, by default True log_to_driver : bool Boolean flag to enable routing of all worker logs to the driver, by default False logging_level : int Logging level, defaults to logging.INFO. Ignored unless "configure_logging" is True object_store_memory : Optional[int] The amount of memory (in bytes) to start the object store with, by default None cpu_count : Optional[int] Number of CPUs to assign to each raylet, by default None gpu_count : Optional[int] Number of GPUs to assign to each raylet, by default None """ if not ray.is_initialized(): # Detect an existing cluster ray_address = os.environ.get("RAY_ADDRESS") if not address and ray_address: _logger.info("Using address %s set in the environment variable RAY_ADDRESS", ray_address) address = ray_address if address: _logger.info("Connecting to a Ray instance at: %s", address) ray.init( address=address, include_dashboard=include_dashboard, ignore_reinit_error=ignore_reinit_error, configure_logging=configure_logging, log_to_driver=log_to_driver, logging_level=logging_level, ) else: ray_runtime_env_vars = [ "__MODIN_AUTOIMPORT_PANDAS__", ] ray_init_kwargs = { "num_cpus": cpu_count, "num_gpus": gpu_count, "include_dashboard": include_dashboard, "ignore_reinit_error": ignore_reinit_error, "configure_logging": configure_logging, "log_to_driver": log_to_driver, "logging_level": logging_level, "object_store_memory": object_store_memory, "_redis_password": redis_password, "_memory": object_store_memory, "runtime_env": { "env_vars": {var: os.environ.get(var) for var in ray_runtime_env_vars if os.environ.get(var)} }, } _logger.info("Initializing a Ray instance") ray.init(**ray_init_kwargs)
2,063
test hidetip
"""Test tooltip, coverage 100%. Coverage is 100% after excluding 6 lines with "# pragma: no cover". They involve TclErrors that either should or should not happen in a particular situation, and which are 'pass'ed if they do. """ from idlelib.tooltip import TooltipBase, Hovertip from test.support import requires requires('gui') from functools import wraps import time from tkinter import Button, Tk, Toplevel import unittest def setUpModule(): global root root = Tk() def tearDownModule(): global root root.update_idletasks() root.destroy() del root def add_call_counting(func): @wraps(func) def wrapped_func(*args, **kwargs): wrapped_func.call_args_list.append((args, kwargs)) return func(*args, **kwargs) wrapped_func.call_args_list = [] return wrapped_func def _make_top_and_button(testobj): global root top = Toplevel(root) testobj.addCleanup(top.destroy) top.title("Test tooltip") button = Button(top, text='ToolTip test button') button.pack() testobj.addCleanup(button.destroy) top.lift() return top, button class ToolTipBaseTest(unittest.TestCase): def setUp(self): self.top, self.button = _make_top_and_button(self) def test_base_class_is_unusable(self): global root top = Toplevel(root) self.addCleanup(top.destroy) button = Button(top, text='ToolTip test button') button.pack() self.addCleanup(button.destroy) with self.assertRaises(NotImplementedError): tooltip = TooltipBase(button) tooltip.showtip() class HovertipTest(unittest.TestCase): def setUp(self): self.top, self.button = _make_top_and_button(self) def is_tipwindow_shown(self, tooltip): return tooltip.tipwindow and tooltip.tipwindow.winfo_viewable() def test_showtip(self): tooltip = Hovertip(self.button, 'ToolTip text') self.addCleanup(tooltip.hidetip) self.assertFalse(self.is_tipwindow_shown(tooltip)) tooltip.showtip() self.assertTrue(self.is_tipwindow_shown(tooltip)) def test_showtip_twice(self): tooltip = Hovertip(self.button, 'ToolTip text') self.addCleanup(tooltip.hidetip) self.assertFalse(self.is_tipwindow_shown(tooltip)) tooltip.showtip() self.assertTrue(self.is_tipwindow_shown(tooltip)) orig_tipwindow = tooltip.tipwindow tooltip.showtip() self.assertTrue(self.is_tipwindow_shown(tooltip)) self.assertIs(tooltip.tipwindow, orig_tipwindow) def METHOD_NAME(self): tooltip = Hovertip(self.button, 'ToolTip text') self.addCleanup(tooltip.hidetip) tooltip.showtip() tooltip.hidetip() self.assertFalse(self.is_tipwindow_shown(tooltip)) def test_showtip_on_mouse_enter_no_delay(self): tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None) self.addCleanup(tooltip.hidetip) tooltip.showtip = add_call_counting(tooltip.showtip) root.update() self.assertFalse(self.is_tipwindow_shown(tooltip)) self.button.event_generate('<Enter>', x=0, y=0) root.update() self.assertTrue(self.is_tipwindow_shown(tooltip)) self.assertGreater(len(tooltip.showtip.call_args_list), 0) def test_hover_with_delay(self): # Run multiple tests requiring an actual delay simultaneously. # Test #1: A hover tip with a non-zero delay appears after the delay. tooltip1 = Hovertip(self.button, 'ToolTip text', hover_delay=100) self.addCleanup(tooltip1.hidetip) tooltip1.showtip = add_call_counting(tooltip1.showtip) root.update() self.assertFalse(self.is_tipwindow_shown(tooltip1)) self.button.event_generate('<Enter>', x=0, y=0) root.update() self.assertFalse(self.is_tipwindow_shown(tooltip1)) # Test #2: A hover tip with a non-zero delay doesn't appear when # the mouse stops hovering over the base widget before the delay # expires. tooltip2 = Hovertip(self.button, 'ToolTip text', hover_delay=100) self.addCleanup(tooltip2.hidetip) tooltip2.showtip = add_call_counting(tooltip2.showtip) root.update() self.button.event_generate('<Enter>', x=0, y=0) root.update() self.button.event_generate('<Leave>', x=0, y=0) root.update() time.sleep(0.15) root.update() # Test #1 assertions. self.assertTrue(self.is_tipwindow_shown(tooltip1)) self.assertGreater(len(tooltip1.showtip.call_args_list), 0) # Test #2 assertions. self.assertFalse(self.is_tipwindow_shown(tooltip2)) self.assertEqual(tooltip2.showtip.call_args_list, []) def test_hidetip_on_mouse_leave(self): tooltip = Hovertip(self.button, 'ToolTip text', hover_delay=None) self.addCleanup(tooltip.hidetip) tooltip.showtip = add_call_counting(tooltip.showtip) root.update() self.button.event_generate('<Enter>', x=0, y=0) root.update() self.button.event_generate('<Leave>', x=0, y=0) root.update() self.assertFalse(self.is_tipwindow_shown(tooltip)) self.assertGreater(len(tooltip.showtip.call_args_list), 0) if __name__ == '__main__': unittest.main(verbosity=2)
2,064
test biosample characterization upgrade status encode2
import pytest def test_antibody_characterization_upgrade(upgrader, antibody_characterization_1): value = upgrader.upgrade('antibody_characterization', antibody_characterization_1, target_version='3') assert value['schema_version'] == '3' assert value['status'] == 'PENDING DCC REVIEW' assert value['characterization_method'] == 'immunoprecipitation followed by mass spectrometry' def test_biosample_characterization_upgrade(upgrader, biosample_characterization_1): value = upgrader.upgrade('biosample_characterization', biosample_characterization_1, target_version='3') assert value['schema_version'] == '3' assert value['status'] == 'NOT REVIEWED' assert value['characterization_method'] == 'FACs analysis' def test_antibody_characterization_upgrade_status(upgrader, antibody_characterization_2): value = upgrader.upgrade('antibody_characterization', antibody_characterization_2, target_version='4') assert value['schema_version'] == '4' assert value['status'] == 'compliant' def METHOD_NAME(upgrader, biosample_characterization_2): value = upgrader.upgrade('biosample_characterization', biosample_characterization_2, target_version='4') assert value['schema_version'] == '4' assert value['status'] == 'released' def test_antibody_characterization_upgrade_primary(upgrader, antibody_characterization_3): value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5') assert value['schema_version'] == '5' assert value['primary_characterization_method'] == 'immunoblot' assert 'characterization_method' not in value def test_antibody_characterization_upgrade_secondary(upgrader, antibody_characterization_3): antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry' value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5') assert value['schema_version'] == '5' assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry' assert 'characterization_method' not in value def test_antibody_characterization_upgrade_compliant_status(upgrader, antibody_characterization_3): antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry' antibody_characterization_3['status'] = 'compliant' value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5') assert value['schema_version'] == '5' assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry' assert 'characterization_method' not in value assert value['reviewed_by'] == '81a6cc12-2847-4e2e-8f2c-f566699eb29e' assert value['documents'] == ['88dc12f7-c72d-4b43-a6cd-c6f3a9d08821'] def test_antibody_characterization_upgrade_not_compliant_status(upgrader, antibody_characterization_3): antibody_characterization_3['characterization_method'] = 'immunoprecipitation followed by mass spectrometry' antibody_characterization_3['status'] = 'not reviewed' value = upgrader.upgrade('antibody_characterization', antibody_characterization_3, target_version='5') assert value['schema_version'] == '5' assert value['secondary_characterization_method'] == 'immunoprecipitation followed by mass spectrometry' assert 'characterization_method' not in value assert value['reviewed_by'] == 'ff7b77e7-bb55-4307-b665-814c9f1e65fb' def test_biosample_characterization_upgrade_references(root, upgrader, biosample_characterization, biosample_characterization_4, publication, threadlocals, dummy_request): context = root.get_by_uuid(biosample_characterization['uuid']) dummy_request.context = context value = upgrader.upgrade('biosample_characterization', biosample_characterization_4, target_version='5', context=context) assert value['schema_version'] == '5' assert value['references'] == [publication['uuid']] def test_antibody_characterization_upgrade_inline(testapp, registry, antibody_characterization_1): from snovault import TYPES schema = registry[TYPES]['antibody_characterization'].schema res = testapp.post_json('/antibody-characterizations?validate=false&render=uuid', antibody_characterization_1) location = res.location # The properties are stored un-upgraded. res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow() assert res.json['schema_version'] == '1' # When the item is fetched, it is upgraded automatically. res = testapp.get(location).maybe_follow() assert res.json['schema_version'] == schema['properties']['schema_version']['default'] res = testapp.patch_json(location, {}) # The stored properties are now upgraded. res = testapp.get(location + '?frame=raw&upgrade=false').maybe_follow() assert res.json['schema_version'] == schema['properties']['schema_version']['default'] def test_antibody_characterization_comment_to_submitter_comment_upgrade(upgrader, antibody_characterization_10, antibody_characterization): value = upgrader.upgrade('antibody_characterization', antibody_characterization_10, current_version='10', target_version='11') assert value['schema_version'] == '11' assert 'comment' not in value assert value['submitter_comment'] == 'We tried really hard to characterize this antibody.' def test_upgrade_antibody_characterization_11_to_12(upgrader, antibody_characterization_11, biosample): value = upgrader.upgrade('antibody_characterization', antibody_characterization_11, current_version='11', target_version='12') for characterization_review in value['characterization_reviews']: assert characterization_review['biosample_type'] == 'cell line' def test_upgrade_antibody_characterization_13_to_14(upgrader, antibody_characterization_13, biosample): value = upgrader.upgrade('antibody_characterization', antibody_characterization_13, current_version='13', target_version='14') for characterization_review in value['characterization_reviews']: assert characterization_review['biosample_type'] == 'cell line' def test_upgrade_antibody_characterization_14_to_15(root, upgrader, antibody_characterization_14, a549): value = upgrader.upgrade('antibody_characterization', antibody_characterization_14, current_version='14', target_version='15', context=root.get_by_uuid(a549['uuid'])) for characterization_review in value['characterization_reviews']: assert characterization_review['biosample_ontology'] == a549['uuid'] def test_upgrade_antibody_characterization_15_to_16(upgrader, antibody_characterization_14): value = upgrader.upgrade( 'antibody_characterization', antibody_characterization_14, current_version='15', target_version='16' ) for char_review in value['characterization_reviews']: assert 'biosample_type' not in char_review assert 'biosample_term_id' not in char_review assert 'biosample_term_name' not in char_review
2,065
sub
# # SPDX-License-Identifier: GPL-2.0-only # import errno import re import os class OEList(list): """OpenEmbedded 'list' type Acts as an ordinary list, but is constructed from a string value and a separator (optional), and re-joins itself when converted to a string with str(). Set the variable type flag to 'list' to use this type, and the 'separator' flag may be specified (defaulting to whitespace).""" name = "list" def __init__(self, value, separator = None): if value is not None: list.__init__(self, value.split(separator)) else: list.__init__(self) if separator is None: self.separator = " " else: self.separator = separator def __str__(self): return self.separator.join(self) def choice(value, choices): """OpenEmbedded 'choice' type Acts as a multiple choice for the user. To use this, set the variable type flag to 'choice', and set the 'choices' flag to a space separated list of valid values.""" if not isinstance(value, str): raise TypeError("choice accepts a string, not '%s'" % type(value)) value = value.lower() choices = choices.lower() if value not in choices.split(): raise ValueError("Invalid choice '%s'. Valid choices: %s" % (value, choices)) return value class NoMatch(object): """Stub python regex pattern object which never matches anything""" def findall(self, string, flags=0): return None def finditer(self, string, flags=0): return None def match(self, flags=0): return None def search(self, string, flags=0): return None def split(self, string, maxsplit=0): return None def METHOD_NAME(pattern, repl, string, count=0): return None def subn(pattern, repl, string, count=0): return None NoMatch = NoMatch() def regex(value, regexflags=None): """OpenEmbedded 'regex' type Acts as a regular expression, returning the pre-compiled regular expression pattern object. To use this type, set the variable type flag to 'regex', and optionally, set the 'regexflags' type to a space separated list of the flags to control the regular expression matching (e.g. FOO[regexflags] += 'ignorecase'). See the python documentation on the 're' module for a list of valid flags.""" flagval = 0 if regexflags: for flag in regexflags.split(): flag = flag.upper() try: flagval |= getattr(re, flag) except AttributeError: raise ValueError("Invalid regex flag '%s'" % flag) if not value: # Let's ensure that the default behavior for an undefined or empty # variable is to match nothing. If the user explicitly wants to match # anything, they can match '.*' instead. return NoMatch try: return re.compile(value, flagval) except re.error as exc: raise ValueError("Invalid regex value '%s': %s" % (value, exc.args[0])) def boolean(value): """OpenEmbedded 'boolean' type Valid values for true: 'yes', 'y', 'true', 't', '1' Valid values for false: 'no', 'n', 'false', 'f', '0', None """ if value is None: return False if isinstance(value, bool): return value if not isinstance(value, str): raise TypeError("boolean accepts a string, not '%s'" % type(value)) value = value.lower() if value in ('yes', 'y', 'true', 't', '1'): return True elif value in ('no', 'n', 'false', 'f', '0'): return False raise ValueError("Invalid boolean value '%s'" % value) def integer(value, numberbase=10): """OpenEmbedded 'integer' type Defaults to base 10, but this can be specified using the optional 'numberbase' flag.""" return int(value, int(numberbase)) _float = float def float(value, fromhex='false'): """OpenEmbedded floating point type To use this type, set the type flag to 'float', and optionally set the 'fromhex' flag to a true value (obeying the same rules as for the 'boolean' type) if the value is in base 16 rather than base 10.""" if boolean(fromhex): return _float.fromhex(value) else: return _float(value) def path(value, relativeto='', normalize='true', mustexist='false'): value = os.path.join(relativeto, value) if boolean(normalize): value = os.path.normpath(value) if boolean(mustexist): try: with open(value, 'r'): pass except IOError as exc: if exc.errno == errno.ENOENT: raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) return value def is_x86(arch): """ Check whether arch is x86 or x86_64 """ if arch.startswith('x86_') or re.match('i.*86', arch): return True else: return False def qemu_use_kvm(kvm, target_arch): """ Enable kvm if target_arch == build_arch or both of them are x86 archs. """ use_kvm = False if kvm and boolean(kvm): build_arch = os.uname()[4] if is_x86(build_arch) and is_x86(target_arch): use_kvm = True elif build_arch == target_arch: use_kvm = True return use_kvm
2,066
test place no place info
""" Test PlaceInfo """ import pytest PHOTOS_DB = "./tests/Test-Places-Catalina-10_15_1.photoslibrary/database/photos.db" UUID_DICT = { "place_dc": "128FB4C6-0B16-4E7D-9108-FB2E90DA1546", "place_maui": "FF7AFE2C-49B0-4C9B-B0D7-7E1F8B8F2F0C", "no_place": "A9B73E13-A6F2-4915-8D67-7213B39BAE9F", } MAUI_DICT = { "name": "Maui, Wailea, Hawai'i, United States", "names": { "field0": [], "country": ["United States"], "state_province": ["Hawai'i"], "sub_administrative_area": ["Maui"], "city": ["Wailea", "Kihei", "Kihei"], "field5": [], "additional_city_info": [], "ocean": [], "area_of_interest": [], "inland_water": [], "field10": [], "region": ["Maui"], "sub_throughfare": [], "field13": [], "postal_code": [], "field15": [], "field16": [], "street_address": ["3700 Wailea Alanui Dr"], "body_of_water": [], }, "country_code": "US", "ishome": False, "address_str": "3700 Wailea Alanui Dr, Kihei, HI 96753, United States", "address": { "street": "3700 Wailea Alanui Dr", "sub_locality": None, "city": "Kihei", "sub_administrative_area": "Maui", "state_province": "HI", "postal_code": "96753", "country": "United States", "iso_country_code": "US", }, } def test_place_place_info_1(): # test valid place info import osxphotos photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB) photo = photosdb.photos(uuid=[UUID_DICT["place_dc"]])[0] assert photo.place is not None assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo) assert not photo.place.ishome assert photo.place.name == "Washington, District of Columbia, United States" assert photo.place.names.country[0] == "United States" assert photo.place.names.state_province[0] == "District of Columbia" assert photo.place.names.city[0] == "Washington" assert photo.place.names.additional_city_info[0] == "Adams Morgan" assert photo.place.names.street_address[0] == "2038 18th St NW" assert photo.place.names.ocean == [] assert photo.place.names.area_of_interest == [] assert photo.place.names.inland_water == [] assert photo.place.names.postal_code == [] assert photo.place.names.sub_throughfare == [] assert photo.place.names.body_of_water == [] assert photo.place.country_code == "US" assert ( photo.place.address_str == "2038 18th St NW, Washington, DC 20009, United States" ) assert photo.place.address.city == "Washington" assert photo.place.address.country == "United States" assert photo.place.address.postal_code == "20009" assert photo.place.address.state_province == "DC" assert photo.place.address.street == "2038 18th St NW" assert photo.place.address.sub_administrative_area is None assert photo.place.address.sub_locality == "Adams Morgan" assert photo.place.address.iso_country_code == "US" def test_place_place_info_2(): # test valid place info import osxphotos photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB) photo = photosdb.photos(uuid=[UUID_DICT["place_maui"]])[0] assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo) assert photo.place is not None assert not photo.place.ishome assert photo.place.name == "Maui, Wailea, Hawai'i, United States" assert photo.place.names.street_address == ["3700 Wailea Alanui Dr"] assert photo.place.names.city == ["Wailea", "Kihei", "Kihei"] assert photo.place.names.region == ["Maui"] assert photo.place.names.sub_administrative_area == ["Maui"] assert photo.place.names.state_province == ["Hawai'i"] assert photo.place.names.country == ["United States"] assert photo.place.country_code == "US" assert ( photo.place.address_str == "3700 Wailea Alanui Dr, Kihei, HI 96753, United States" ) assert type(photo.place.address) == osxphotos.placeinfo.PostalAddress assert photo.place.address.city == "Kihei" assert photo.place.address.country == "United States" assert photo.place.address.postal_code == "96753" assert photo.place.address.state_province == "HI" assert photo.place.address.street == "3700 Wailea Alanui Dr" assert photo.place.address.sub_administrative_area == "Maui" assert photo.place.address.sub_locality is None assert photo.place.address.iso_country_code == "US" def METHOD_NAME(): # test valid place info import osxphotos photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB) photo = photosdb.photos(uuid=[UUID_DICT["no_place"]])[0] assert photo.place is None def test_place_place_info_asdict(): # test PlaceInfo.asdict() import osxphotos photosdb = osxphotos.PhotosDB(dbfile=PHOTOS_DB) photo = photosdb.photos(uuid=[UUID_DICT["place_maui"]])[0] assert isinstance(photo.place, osxphotos.placeinfo.PlaceInfo) assert photo.place.asdict() == MAUI_DICT
2,067
test create view no view aborts
import os import pytest from tempfile import TemporaryDirectory from mindsdb.api.http.initialize import initialize_app from mindsdb.migrations import migrate from mindsdb.interfaces.storage import db from mindsdb.utilities.config import Config @pytest.fixture(scope="session", autouse=True) def app(): old_minds_db_con = '' if 'MINDSDB_DB_CON' in os.environ: old_minds_db_con = os.environ['MINDSDB_DB_CON'] with TemporaryDirectory(prefix='views_test_') as temp_dir: db_path = 'sqlite:///' + os.path.join(temp_dir, 'mindsdb.sqlite3.db') # Need to change env variable for migrate module, since it calls db.init(). os.environ['MINDSDB_DB_CON'] = db_path db.init() migrate.migrate_to_head() app = initialize_app(Config(), True, False) yield app os.environ['MINDSDB_DB_CON'] = old_minds_db_con @pytest.fixture() def client(app): return app.test_client() def test_get_view_project_not_found_abort(client): response = client.get('/api/projects/zoopy/views', follow_redirects=True) assert '404' in response.status def test_get_view_not_found(client): response = client.get('/api/projects/mindsdb/views/vroom', follow_redirects=True) assert '404' in response.status def test_create_view(client): view_data = { 'view': { 'name': 'test_create_view', 'query': 'SELECT * FROM example_db.house_sales' } } response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) # Make sure we use the CREATED HTTP status code. assert '201' in response.status new_view = response.get_json() expected_view = { 'name': 'test_create_view', 'query': 'SELECT * FROM example_db.house_sales', 'id': new_view['id'] } assert new_view == expected_view def test_create_view_project_not_found_abort(client): view_data = { 'view': { 'name': 'test_create_view', 'query': 'SELECT * FROM example_db.house_sales' } } response = client.post('/api/projects/muhproject/views', json=view_data, follow_redirects=True) assert '404' in response.status def test_create_view_already_exists_abort(client): view_data = { 'view': { 'name': 'test_create_view_duplicate', 'query': 'SELECT * FROM example_db.house_sales' } } response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) assert '201' in response.status create_duplicate_response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) # Make sure we use CONFLICT status code. assert '409' in create_duplicate_response.status def METHOD_NAME(client): view_data = { 'name': 'test_create_view', 'query': 'SELECT * FROM example_db.house_sales' } response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) assert '400' in response.status def test_create_view_no_name_aborts(client): view_data = { 'view': { 'query': 'SELECT * FROM example_db.house_sales' } } response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) assert '400' in response.status def test_create_view_no_query_aborts(client): view_data = { 'view': { 'name': 'test_create_view' } } response = client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) assert '400' in response.status def test_update_view(client): view_data = { 'view': { 'name': 'test_update_view', 'query': 'SELECT * FROM example_db.house_sales' } } updated_view = { 'view': { 'query': 'SELECT * FROM example_db.updated_house_sales' } } client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) response = client.put('/api/projects/mindsdb/views/test_update_view', json=updated_view, follow_redirects=True) assert '200' in response.status updated_view = response.get_json() expected_view = { 'name': 'test_update_view', 'query': 'SELECT * FROM example_db.updated_house_sales', 'id': updated_view['id'] } assert updated_view == expected_view def test_update_view_creates(client): view_data = { 'view': { 'query': 'SELECT * FROM example_db.house_sales' } } response = client.put('/api/projects/mindsdb/views/test_update_view_creates', json=view_data, follow_redirects=True) assert '201' in response.status created_view = response.get_json() expected_view = { 'name': 'test_update_view_creates', 'query': 'SELECT * FROM example_db.house_sales', 'id': created_view['id'] } assert created_view == expected_view def test_update_view_no_view_aborts(client): view_data = { 'name': 'test_update_view', 'query': 'SELECT * FROM example_db.house_sales' } response = client.put('/api/projects/mindsdb/views/test_update_view', json=view_data, follow_redirects=True) assert '400' in response.status def test_delete_view(client): view_data = { 'view': { 'name': 'test_delete_view', 'query': 'SELECT * FROM example_db.house_sales' } } # Delete newly created DB. client.post('/api/projects/mindsdb/views', json=view_data, follow_redirects=True) response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True) assert '200' in response.status response = client.delete('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True) # Make sure we return NO_CONTENT status since we don't return the deleted DB. assert '204' in response.status response = client.get('/api/projects/mindsdb/views/test_delete_view', follow_redirects=True) assert '404' in response.status def test_delete_view_does_not_exist(client): response = client.delete('/api/projects/mindsdb/views/florp', follow_redirects=True) assert '404' in response.status def test_delete_view_project_not_found(client): response = client.delete('/api/projects/dindsmb/views/test_delete_view', follow_redirects=True) assert '404' in response.status
2,068
register attributes
from collections import UserList from collections.abc import Iterable from datetime import datetime from functools import reduce from mage_ai.api.operations.constants import READ from mage_ai.api.resources.BaseResource import BaseResource from mage_ai.orchestration.db.models.base import BaseModel from mage_ai.shared.hash import merge_dict import importlib import inspect class BasePresenter(): all_attributes_attr = {} all_formats_attr = {} default_attributes = [] def __init__(self, resource, current_user, **kwargs): self.current_user = current_user self.options = kwargs self.resource = resource @classmethod def all_attributes(self): if not self.all_attributes_attr.get(self.__name__): self.all_attributes_attr[self.__name__] = {} return self.all_attributes_attr[self.__name__] @classmethod def all_formats(self): if not self.all_formats_attr.get(self.__name__): self.all_formats_attr[self.__name__] = { 'default': self.default_attributes, } return self.all_formats_attr[self.__name__] @classmethod def formats(self, format_arg): if format_arg and self.all_formats().get(format_arg, None) is not None: return self.all_formats()[format_arg] else: return self.all_formats()['default'] @classmethod def METHOD_NAME(self, keys, klass_symbol_or_lambda): for key in keys: self.all_attributes()[key] = klass_symbol_or_lambda @classmethod def register_format(self, format_arg, keys): self.all_formats()[format_arg] = keys @classmethod def register_formats(self, formats, keys): arr = formats if isinstance(formats, list) else [formats] for format_arg in arr: self.register_format(format_arg, keys) @classmethod async def present_resource(self, resource, user, **kwargs): async def present_lambda(r): if r and inspect.isawaitable(r): r = await r results = r.__class__.presenter_class()( r, user, **kwargs, ).present( **kwargs, ) if results and inspect.isawaitable(results): results = await results return results if isinstance(resource, Iterable): return [await present_lambda(r) for r in resource] else: return await present_lambda(resource) @classmethod def present_model(self, model, resource_class, user, **kwargs): if model: return self.present_resource( resource_class(model, user, **kwargs), user, **kwargs, ) @classmethod def present_models(self, models, resource_class, user, **kwargs): return self.present_resource( resource_class.build_result_set(models, user, **kwargs), user, **kwargs, ) async def present(self, **kwargs): def _build(obj, key): value = getattr(self, key) if callable(value): value = value(**kwargs) self.__validate_attribute_type(key, value) if issubclass( value.__class__, list) or issubclass( value.__class__, UserList): obj[key] = [ self.__transform_value( key, v, **kwargs) for v in value] else: obj[key] = self.__transform_value(key, value, **kwargs) return obj format_to_present = kwargs.get('format', None) if format_to_present and self.options.get('from_resource'): from_resource_name = self.options['from_resource'].resource_name_singular( ) format_to_present = f'{from_resource_name}/{format_to_present}' return reduce(_build, self.__class__.formats(format_to_present), {}) def __transform_value(self, key, value, **kwargs): klass_symbol_or_lambda = self.__class__.all_attributes().get(key, None) if issubclass(value.__class__, BaseModel): resource_class_name = f'{value.__class__.__name__}Resource' resource_class = getattr(importlib.import_module( f'mage_ai.api.resources.{resource_class_name}'), resource_class_name, ) value = resource_class(value, self.current_user, **kwargs) if isinstance(value, datetime): return str(value) elif klass_symbol_or_lambda is float: return float(value) elif klass_symbol_or_lambda is int: return int(value) elif issubclass(value.__class__, BaseResource): opts = self.options.copy() opts['from_resource'] = self.resource data = value.presenter_class().present_resource( value, self.current_user, **merge_dict(kwargs, opts), ) if not kwargs.get('ignore_permissions'): policy = value.policy_class()(value, self.current_user, **opts) policy.authorize_attributes( READ, data.keys(), **opts, ) return data else: return value def __validate_attribute_class(self, klass_symbol, value): pass def __validate_attribute_type(self, key, value): pass def __getattr__(self, name): def _missing(*args, **kwargs): val = getattr(self.resource, name) if callable(val): return val(*args, **kwargs) else: return val return _missing()
2,069
rebuild cases
from django.core.management.base import BaseCommand from casexml.apps.case.cleanup import rebuild_case_from_forms from casexml.apps.case.xform import get_case_updates from corehq.apps.users.models import CouchUser from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL from corehq.form_processor.models import RebuildWithReason, XFormInstance from corehq.util.log import with_progress_bar from corehq.form_processor.interfaces.processor import FormProcessorInterface from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions class Command(BaseCommand): help = """ Bulk archive forms for user on domain. First archive all forms and then rebuild corresponding cases """ def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) self.forms = [] self.case_ids_to_rebuild = [] self.user_id = None self.domain = None def add_arguments(self, parser): parser.add_argument('user_id') parser.add_argument('domain') def _get_forms_to_archive(self): # ordered with latest form's id on top get_forms = XFormInstance.objects.get_forms form_ids = XFormInstance.objects.get_form_ids_for_user(self.domain, self.user_id) return [f for f in get_forms(form_ids, self.domain) if f.is_normal] def _fetch_case_ids_to_rebuild(self): case_ids_to_rebuild = set() for form in with_progress_bar(self.forms): form_case_ids = set(cu.id for cu in get_case_updates(form)) if form_case_ids: case_ids_to_rebuild.update(form_case_ids) return list(case_ids_to_rebuild) def _archive_forms(self): with open("forms_archived.txt", "w") as forms_log: for form in with_progress_bar(self.forms): forms_log.write("%s\n" % form.form_id) form.archive(trigger_signals=False) def _remove_ledger_transactions(self): with open("ledger_transactions_removed_case_ids.txt", "w") as case_ids_log: forms_iterated = 0 for xform in with_progress_bar(self.forms): forms_iterated += 1 if forms_iterated % 100 == 0: print("traversed %s forms" % forms_iterated) ledger_case_ids = get_case_ids_from_stock_transactions(xform) if ledger_case_ids: ledger_case_ids = list(ledger_case_ids) for ledger_case_id in ledger_case_ids: case_ids_log.write("%s\n" % ledger_case_id) LedgerAccessorSQL.delete_ledger_transactions_for_form(ledger_case_ids, xform.form_id) def METHOD_NAME(self): user = CouchUser.get_by_user_id(self.user_id) reason = "User %s forms archived for domain %s by system" % (user.raw_username, self.domain) form_processor_interface = FormProcessorInterface(self.domain) with open("cases_rebuilt.txt", "w") as case_log: for case_id in with_progress_bar(self.case_ids_to_rebuild): case_log.write("%s\n" % case_id) rebuild_case_from_forms(self.domain, case_id, RebuildWithReason(reason=reason)) ledgers = form_processor_interface.ledger_db.get_ledgers_for_case(case_id) for ledger in ledgers: form_processor_interface.ledger_processor.rebuild_ledger_state( case_id, ledger.section_id, ledger.entry_id) def handle(self, user_id, domain, **options): self.user_id = user_id self.domain = domain self.forms = self._get_forms_to_archive() print("Found %s normal forms for user" % len(self.forms)) self.case_ids_to_rebuild = self._fetch_case_ids_to_rebuild() print("Found %s cases that would need to be rebuilt" % len(self.case_ids_to_rebuild)) print("Starting with form archival") self._archive_forms() print("Starting with removing ledger transactions") self._remove_ledger_transactions() print("Starting with cases rebuild") self.METHOD_NAME() print("Completed!")
2,070
short name
# Copyright 2023 The JAX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Tests for cross-lowering. We check that we produce the same exact HLO using native lowering and with cross-lowering. This will save the HLO for all PrimitiveHarnesses as generated on the current backend (`jax.default_backend()`) for all of `cpu`, `gpu`, and `tpu`. The file names are <save_directory>/<harness_name>/for_{cpu,tpu}_on_{cpu,tpu}.mlir. If a saved file already exists produced on a different backend, then compare the currently saved file with the saved one. """ from collections.abc import Sequence import contextlib import dataclasses import os import re from typing import Callable, Optional import zlib from absl import app from absl import logging import numpy.random as npr import jax from jax import config # Must import before TF from jax.experimental import jax2tf # Defines needed flags from jax._src import test_util # Defines needed flags config.parse_flags_with_absl() # Import after parsing flags from jax.experimental.jax2tf.tests import primitive_harness @dataclasses.dataclass class Scenario: harness: primitive_harness.Harness on_platform: str for_platform: str @property def METHOD_NAME(self) -> str: basename = re.sub(r"[^a-zA-Z0-9_\-]", "_", self.harness.fullname) if len(basename) >= 128: basename = basename[0:100] + str(hash(self.harness.fullname)) return basename def output_file(self, save_directory: str) -> str: basename = self.METHOD_NAME return os.path.join( save_directory, basename, f"for_{self.for_platform}_on_{self.on_platform}.mlir") def __str__(self): return f"Scenario(harness={self.harness.fullname}, on={self.on_platform}, for={self.for_platform}, basename={self.METHOD_NAME}" class Io: """Abstracts a few IO operation over standard "open" vs. gfile.""" def __init__(self, use_gfile=False): self.use_gfile = use_gfile if use_gfile: from tensorflow.io import gfile self.gfile = gfile else: self.gfile = None def exists(self, filename: str) -> bool: if self.use_gfile: return self.gfile.exists(filename) else: return os.path.exists(filename) def makedirs(self, dirname: str): if self.use_gfile: return self.gfile.makedirs(dirname) else: return os.makedirs(dirname) @contextlib.contextmanager def open(self, filename: str, mode: str): if self.use_gfile: f = self.gfile.GFile(filename, mode=mode) else: f = open(filename, mode=mode) try: yield f finally: f.close() def write_and_check_harness(harness: primitive_harness.Harness, io: Io, save_directory: str, for_platforms: Sequence[str] = ("cpu", "tpu"),) -> Sequence[str]: """Writes and checks HLO for a given harness. Writes the HLOs generated in the current platform for all platforms. If it finds previously written HLOs generated on other platforms, compares them with the ones generated on this platform. Returns a list of harnesses on which diffs were found. """ diffs = [] func_jax = harness.dyn_fun rng = npr.RandomState(zlib.adler32(harness.fullname.encode())) args = harness.dyn_args_maker(rng) # Generate the HLO for all platforms for for_platform in for_platforms: if not harness.filter(for_platform): logging.info("Skip harness %s for %s because it is not implemented in JAX", harness.fullname, for_platform) continue scenario1 = Scenario(harness, jax.default_backend(), for_platform) output_file = scenario1.output_file(save_directory) output_dir = os.path.dirname(output_file) if not io.exists(output_dir): io.makedirs(output_dir) if io.exists(output_file): with open(output_file) as f: hlo = f.read() else: # For a tighter check, detect the native platform lowering and do not # trigger cross-lowering if for_platform == jax.default_backend(): lowered = jax.jit(func_jax).lower(*args) else: # TODO: replace this with JAX cross-platform API, without going through # jax2tf from jax.experimental.jax2tf.jax2tf import cross_platform_lowering lowered = cross_platform_lowering(func_jax, args, platforms=[for_platform]) hlo = lowered.compiler_ir(dialect="stablehlo") # type: ignore with open(output_file, "w") as f: f.write(str(hlo)) # Compare with previously written files for on_platform in ['cpu', 'tpu']: if on_platform == jax.default_backend(): continue scenario2 = Scenario(harness, on_platform, for_platform) other_file = scenario2.output_file(save_directory) if io.exists(other_file): logging.info("Comparing for %s harness %s on %s vs %s", for_platform, harness.fullname, jax.default_backend(), on_platform) with open(other_file) as f: other_hlo = f.read() if hlo != other_hlo: logging.info("Found diff", for_platform, harness.fullname, jax.default_backend(), on_platform) diffs.append(f"Found diff between {output_file} and {other_file}") return diffs def write_and_check_harnesses(io: Io, save_directory: str, *, filter_harness: Optional[Callable[[str], bool]] = None, for_platforms: Sequence[str] = ("cpu", "tpu"), verbose = False): logging.info("Writing and checking harnesses at %s", save_directory) nr_harnesses = len(primitive_harness.all_harnesses) for i, harness in enumerate(primitive_harness.all_harnesses): if i % 100 == 0: logging.info("Trying cross-lowering for harness #%d/%d", i, nr_harnesses) enable_xla = harness.params.get("enable_xla", True) if not enable_xla: if verbose: logging.info("Skip %s due to enable_xla=False", harness.fullname) continue if filter_harness is not None and not filter_harness(harness.fullname): if verbose: logging.info("Skip %s due to filter_harness", harness.fullname) continue write_and_check_harness(harness, io, save_directory, for_platforms=for_platforms) def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") def filter_harness(name: str) -> bool: return "cummax" in name for_platforms = ('cpu', 'tpu') write_and_check_harnesses(Io(False), "./hlo_dumps", filter_harness=filter_harness, for_platforms=for_platforms) if __name__ == "__main__": app.run(main)
2,071
test status subscriber error
import unittest import multiprocessing import sys import time import ipaddress import broker class TestCommunication(unittest.TestCase): def test_ping(self): # --peer-start with broker.Endpoint() as ep1, \ broker.Endpoint() as ep2, \ ep1.make_subscriber("/test") as s1, \ ep2.make_subscriber("/test") as s2: port = ep1.listen("127.0.0.1", 0) self.assertTrue(ep2.peer("127.0.0.1", port, 1.0)) ep1.await_peer(ep2.node_id()) ep2.await_peer(ep1.node_id()) # --peer-end # --ping-start ep2.publish("/test", ["ping"]) (t, d) = s1.get() # t == "/test", d == ["ping"] # --ping-end self.assertEqual(t, "/test") self.assertEqual(d[0], "ping") ep1.publish(t, ["pong"]) while True: # This loop exists just for sake of test coverage for "poll()" msgs = s2.poll() if msgs: self.assertEqual(len(msgs), 1) (t, d) = msgs[0] break; time.sleep(0.1) self.assertEqual(t, "/test") self.assertEqual(d[0], "pong") def test_messages(self): with broker.Endpoint() as ep1, \ broker.Endpoint() as ep2, \ ep1.make_subscriber("/test") as s1: port = ep1.listen("127.0.0.1", 0) self.assertTrue(ep2.peer("127.0.0.1", port, 1.0)) ep1.await_peer(ep2.node_id()) ep2.await_peer(ep1.node_id()) msg0 = ("/test/1", ()) ep2.publish(*msg0) # --messages-start msg1 = ("/test/2", (1, 2, 3)) msg2 = ("/test/3", (42, "foo", {"a": "A", "b": ipaddress.IPv4Address('1.2.3.4')})) ep2.publish_batch(msg1, msg2) # --messages-end msgs = s1.get(3) self.assertFalse(s1.available()) self.assertEqual(msgs[0], msg0) self.assertEqual(msgs[1], msg1) self.assertEqual(msgs[2], msg2) # These results are not (all) immutable: try modifying the third # value (the dict) of the last message above. dict_data = msgs[2][1][2] self.assertEqual(len(dict_data), 2) dict_data["c"] = "not immutable" self.assertEqual(len(dict_data), 3) def test_immutable_messages(self): with broker.Endpoint() as ep1, \ broker.Endpoint() as ep2, \ ep1.make_safe_subscriber("/test") as s1: port = ep1.listen("127.0.0.1", 0) ep2.peer("127.0.0.1", port, 1.0) msg = ("/test/1", ({"a": "A"}, set([1,2,3]), ('a', 'b', 'c'))) ep2.publish(*msg) topic, (dict_data, set_data, tuple_data) = s1.get() # The return values are immutable, so each of the following triggers # a type-specific exception. with self.assertRaises(TypeError): # 'mappingproxy' object does not support item assignment dict_data["b"] = "B" with self.assertRaises(AttributeError): # 'frozenset' object has no attribute 'add' set_data.add(4) with self.assertRaises(TypeError): # 'tuple' object does not support item assignment tuple_data[3] = 'd' def test_publisher(self): with broker.Endpoint() as ep1, \ broker.Endpoint() as ep2, \ ep1.make_subscriber("/test") as s1, \ ep2.make_publisher("/test") as p2: port = ep1.listen("127.0.0.1", 0) self.assertTrue(ep2.peer("127.0.0.1", port, 1.0)) ep1.await_peer(ep2.node_id()) ep2.await_peer(ep1.node_id()) p2.publish([1, 2, 3]) p2.publish_batch(["a", "b", "c"], [True, False]) msgs = s1.get(3) self.assertFalse(s1.available()) self.assertEqual(msgs[0], ("/test", (1, 2, 3))) self.assertEqual(msgs[1], ("/test", ("a", "b", "c"))) self.assertEqual(msgs[2], ("/test", (True, False))) def test_status_subscriber(self): # --status-start with broker.Endpoint() as ep1, \ broker.Endpoint() as ep2, \ ep1.make_status_subscriber(True) as es1, \ ep2.make_status_subscriber(True) as es2: port = ep1.listen("127.0.0.1", 0) self.assertEqual(ep2.peer("127.0.0.1", port, 1.0), True) ep1.await_peer(ep2.node_id()) ep2.await_peer(ep1.node_id()) st1 = es1.get(2) st2 = es2.get(2) # st1.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded] # st2.code() == [broker.SC.EndpointDiscovered, broker.SC.PeerAdded] # --status-end self.assertEqual(len(st1), 2) self.assertEqual(st1[0].code(), broker.SC.EndpointDiscovered) self.assertEqual(st1[1].code(), broker.SC.PeerAdded) self.assertEqual(len(st2), 2) self.assertEqual(st2[0].code(), broker.SC.EndpointDiscovered) self.assertEqual(st2[1].code(), broker.SC.PeerAdded) self.assertEqual(st2[1].context().network.get().address, "127.0.0.1") def METHOD_NAME(self): # --error-start with broker.Endpoint() as ep1, \ ep1.make_status_subscriber() as es1: r = ep1.peer("127.0.0.1", 1947, 0.0) # Try unavailable port, no retry self.assertEqual(r, False) # Not shown in docs. st1 = es1.get() # s1.code() == broker.EC.PeerUnavailable # --error-end self.assertEqual(st1.code(), broker.EC.PeerUnavailable) # Async version. ep1.peer_nosync("127.0.0.1", 1947, 1.0) st1 = es1.get() self.assertEqual(st1.code(), broker.EC.PeerUnavailable) st1 = es1.get() self.assertEqual(st1.code(), broker.EC.PeerUnavailable) def test_idle_endpoint(self): with broker.Endpoint() as ep1, \ ep1.make_status_subscriber() as es1, \ ep1.make_subscriber("/test") as s1: pass if __name__ == '__main__': unittest.main(verbosity=3)
2,072
drop index
from redis import Redis, RedisError, ConnectionPool import datetime import itertools import json import time class Document(object): def __init__(self, id, **fields): self.id = id for k, v in fields.iteritems(): setattr(self, k, v) def __repr__(self): return 'Document %s' % self.__dict__ def snippetize(self, field, size=500, boldTokens=[]): txt = getattr(self, field, '') for tok in boldTokens: txt = txt.replace(tok, "<b>%s</b>" % tok) while size < len(txt) and txt[size] != ' ': size+=1 setattr(self, field, (txt[:size] + '...') if len(txt) > size else txt) class Result(object): def __init__(self, res, hascontent, queryText, duration=0): self.total = res[0] self.duration = duration self.docs = [] tokens = filter(None, queryText.rstrip("\" ").lstrip(" \"").split(' ')) for i in xrange(1, len(res), 2 if hascontent else 1): id = res[i] fields = {} if hascontent: fields = dict( dict(itertools.izip(res[i + 1][::2], res[i + 1][1::2]))) if hascontent else {} try: del fields['id'] except KeyError: pass doc = Document(id, **fields) #print doc if hascontent: try: doc.snippetize('body', size=500, boldTokens = tokens) except Exception as e: print e self.docs.append(doc) def __repr__(self): return 'Result{%d total, docs: %s}' % (self.total, self.docs) class Client(object): NUMERIC = 'numeric' CREATE_CMD = 'FT.CREATE' SEARCH_CMD = 'FT.SEARCH' ADD_CMD = 'FT.ADD' DROP_CMD = 'FT.DROP' class BatchIndexer(object): """ A batch indexer allows you to automatically batch document indexeing in pipelines, flushing it every N documents. """ def __init__(self, client, chunk_size = 1000): self.client = client self.pipeline = client.redis.pipeline(False) self.total = 0 self.chunk_size = chunk_size self.current_chunk = 0 def __del__(self): if self.current_chunk: self.commit() def add_document(self, doc_id, nosave = False, score=1.0, **fields): self.client._add_document(doc_id, conn=self.pipeline, nosave = nosave, score = score, **fields) self.current_chunk += 1 self.total += 1 if self.current_chunk >= self.chunk_size: self.commit() def commit(self): self.pipeline.execute() self.current_chunk = 0 def __init__(self, index_name, host='localhost', port=6379): self.host = host self.port = port self.index_name = index_name self.redis = Redis( connection_pool = ConnectionPool(host=host, port=port)) def batch_indexer(self, chunk_size = 100): """ Create a new batch indexer from the client with a given chunk size """ return Client.BatchIndexer(self, chunk_size = chunk_size) def create_index(self, **fields): """ Create the search index. Creating an existing index juts updates its properties :param fields: a kwargs consisting of field=[score|NUMERIC] :return: """ self.redis.execute_command( self.CREATE_CMD, self.index_name, *itertools.chain(*fields.items())) def METHOD_NAME(self): """ Drop the index if it exists :return: """ self.redis.execute_command(self.DROP_CMD, self.index_name) def _add_document(self, doc_id, conn = None, nosave = False, score=1.0, **fields): """ Internal add_document used for both batch and single doc indexing """ if conn is None: conn = self.redis args = [self.ADD_CMD, self.index_name, doc_id, score] if nosave: args.append('NOSAVE') args.append('FIELDS') args += list(itertools.chain(*fields.items())) return conn.execute_command(*args) def add_document(self, doc_id, nosave = False, score=1.0, **fields): """ Add a single document to the index. :param doc_id: the id of the saved document. :param nosave: if set to true, we just index the document, and don't save a copy of it. this means that searches will just return ids. :param score: the document ranking, between 0.0 and 1.0. :fields: kwargs dictionary of the document fields to be saved and/or indexed """ return self._add_document(doc_id, conn=None, nosave=nosave, score=score, **fields) def load_document(self, id): """ Load a single document by id """ fields = self.redis.hgetall(id) try: del fields['id'] except KeyError: pass return Document(id=id, **fields) def search(self, query, offset =0, num = 10, verbatim = False, no_content=False, no_stopwords = False, fields=None, **filters): """ Search eht :param query: :param fields: :param filters: :return: """ args = [self.index_name, query] if no_content: args.append('NOCONTENT') if fields: args.append('INFIELDS') args.append(len(fields)) args += fields if verbatim: args.append('VERBATIM') if no_stopwords: args.append('NOSTOPWORDS') if filters: for k, v in filters.iteritems(): args += ['FILTER', k] + list(v) args += ["LIMIT", offset, num] st = time.time() res = self.redis.execute_command(self.SEARCH_CMD, *args) return Result(res, no_content == False, queryText=query, duration = (time.time()-st)*1000.0)
2,073
create next relation
# Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 from typing import Iterator, Union from amundsen_common.utils.atlas import AtlasCommonParams, AtlasCommonTypes from databuilder.models.atlas_entity import AtlasEntity from databuilder.models.atlas_relationship import AtlasRelationship from databuilder.models.atlas_serializable import AtlasSerializable from databuilder.models.graph_node import GraphNode from databuilder.models.graph_relationship import GraphRelationship from databuilder.models.graph_serializable import GraphSerializable from databuilder.serializers.atlas_serializer import get_entity_attrs from databuilder.utils.atlas import AtlasRelationshipTypes, AtlasSerializedEntityOperation class ResourceReport(GraphSerializable, AtlasSerializable): """ Resource Report matching model Report represents a document that can be linked to any resource (like a table) in Amundsen. Example would be Pandas Profiling HTML report containing full advanced profile of a table. """ RESOURCE_REPORT_LABEL = 'Report' RESOURCE_REPORT_NAME = 'name' RESOURCE_REPORT_URL = 'url' REPORT_KEY_FORMAT = '{resource_uri}/_report/{report_name}' REPORT_RESOURCE_RELATION_TYPE = 'REFERS_TO' RESOURCE_REPORT_RELATION_TYPE = 'HAS_REPORT' def __init__(self, name: str, url: str, resource_uri: str, resource_label: str, # for example 'Table' ) -> None: self.report_name = name self.report_url = url self.resource_uri = resource_uri self.resource_label = resource_label self.resource_report_key = self.get_resource_model_key() self._node_iter = self._create_node_iterator() self._relation_iter = self._create_relation_iterator() self._atlas_entity_iterator = self._create_next_atlas_entity() self._atlas_relation_iterator = self._create_atlas_relation_iterator() def get_resource_model_key(self) -> str: return ResourceReport.REPORT_KEY_FORMAT.format(resource_uri=self.resource_uri, report_name=self.report_name) def create_next_node(self) -> Union[GraphNode, None]: # creates new node try: return next(self._node_iter) except StopIteration: return None def METHOD_NAME(self) -> Union[GraphRelationship, None]: try: return next(self._relation_iter) except StopIteration: return None def _create_node_iterator(self) -> Iterator[GraphNode]: """ Create an application node :return: """ report_node = GraphNode( key=self.resource_report_key, label=ResourceReport.RESOURCE_REPORT_LABEL, attributes={ ResourceReport.RESOURCE_REPORT_NAME: self.report_name, ResourceReport.RESOURCE_REPORT_URL: self.report_url } ) yield report_node def _create_relation_iterator(self) -> Iterator[GraphRelationship]: """ Create relations between application and table nodes :return: """ graph_relationship = GraphRelationship( start_key=self.resource_uri, start_label=self.resource_label, end_key=self.resource_report_key, end_label=ResourceReport.RESOURCE_REPORT_LABEL, type=ResourceReport.RESOURCE_REPORT_RELATION_TYPE, reverse_type=ResourceReport.REPORT_RESOURCE_RELATION_TYPE, attributes={} ) yield graph_relationship def create_next_atlas_entity(self) -> Union[AtlasEntity, None]: try: return next(self._atlas_entity_iterator) except StopIteration: return None def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]: group_attrs_mapping = [ (AtlasCommonParams.qualified_name, self.resource_report_key), ('name', self.report_name), ('url', self.report_url) ] entity_attrs = get_entity_attrs(group_attrs_mapping) entity = AtlasEntity( typeName=AtlasCommonTypes.resource_report, operation=AtlasSerializedEntityOperation.CREATE, relationships=None, attributes=entity_attrs, ) yield entity def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]: try: return next(self._atlas_relation_iterator) except StopIteration: return None def _create_atlas_relation_iterator(self) -> Iterator[AtlasRelationship]: relationship = AtlasRelationship( relationshipType=AtlasRelationshipTypes.referenceable_report, entityType1=self.resource_label, entityQualifiedName1=self.resource_uri, entityType2=AtlasCommonTypes.resource_report, entityQualifiedName2=self.resource_report_key, attributes={} ) yield relationship
2,074
get source
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 """Data Resource Provider implementation. """ import os from pathlib import Path from c7n.actions import ActionRegistry from c7n.exceptions import PolicyExecutionError, PolicyValidationError from c7n.filters import FilterRegistry from c7n.manager import ResourceManager from c7n.provider import Provider, clouds from c7n.query import sources from c7n.registry import PluginRegistry from c7n.utils import load_file, jmespath_search @clouds.register("c7n") class CustodianProvider(Provider): display_name = "Custodian Core" resources = PluginRegistry("policy") resource_prefix = "c7n" # lazy load chicken sacrifice resource_map = {"c7n.data": "c7n.data.Data"} def get_session_factory(self, config): return NullSession() def initialize(self, options): return def initialize_policies(self, policy_collection, options): return policy_collection class NullSession: """dummy session""" @sources.register('static') class StaticSource: def __init__(self, queries): self.queries = queries def __iter__(self): records = [] for q in self.queries: records.extend(q.get("records", ())) return iter(records) def validate(self): for q in self.queries: if not isinstance(q.get("records", None), (list, tuple)): raise PolicyValidationError("invalid static data source `records`") @sources.register('disk') class DiskSource: def __init__(self, queries): self.queries = queries def validate(self): for q in self.queries: if not os.path.exists(q["path"]): raise PolicyValidationError("invalid disk path %s" % q) if os.path.isdir(q["path"]) and "glob" not in q: raise PolicyValidationError("glob pattern required for dir") def __iter__(self): for q in self.queries: for collection in self.scan_path( path=q["path"], resource_key=q.get("key"), glob=q.get("glob") ): for p in collection: yield p def scan_path(self, path, glob, resource_key): if os.path.isfile(path): yield self.load_file(path, resource_key) return for path in Path(path).glob(glob): yield self.load_file(str(path), resource_key) def load_file(self, path, resource_key): data = load_file(path) if resource_key: data = jmespath_search(resource_key, data) if not isinstance(data, list): raise PolicyExecutionError( "found disk records at %s in non list format %s" % (path, type(data)) ) return DataFile(path, resource_key, data) class DataFile: __slots__ = ("path", "records", "resource_key") def __init__(self, path, resource_key, records): self.path = path self.resource_key = resource_key self.records = records def __iter__(self): return iter(self.records) @CustodianProvider.resources.register("data") class Data(ResourceManager): action_registry = ActionRegistry("c7n.data.actions") filter_registry = FilterRegistry("c7n.data.filters") source_mapping = {"static": StaticSource, "disk": DiskSource} def validate(self): if self.data.get("source", "disk") not in self.source_mapping: raise PolicyValidationError("invalid source %s" % self.data["source"]) self.METHOD_NAME().validate() def get_resources(self, resource_ids): return [] def resources(self): with self.ctx.tracer.subsegment("resource-fetch"): source = self.METHOD_NAME() resources = list(source) with self.ctx.tracer.subsegment("filter"): resources = self.filter_resources(resources) return resources def METHOD_NAME(self): source_type = self.data.get("source", "disk") return self.source_mapping[source_type](self.data.get("query", []))
2,075
test force open completions event
import unittest from test.test_support import requires from Tkinter import Tk, Text import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event class AutoCompleteWindow: def complete(): return class DummyEditwin: def __init__(self, root, text): self.root = root self.text = text self.indentwidth = 8 self.tabwidth = 8 self.context_use_ps1 = True class AutoCompleteTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) @classmethod def tearDownClass(cls): del cls.editor, cls.text cls.root.destroy() del cls.root def setUp(self): self.editor.text.delete('1.0', 'end') self.autocomplete = ac.AutoComplete(self.editor) def test_init(self): self.assertEqual(self.autocomplete.editwin, self.editor) def test_make_autocomplete_window(self): testwin = self.autocomplete._make_autocomplete_window() self.assertIsInstance(testwin, acw.AutoCompleteWindow) def test_remove_autocomplete_window(self): self.autocomplete.autocompletewindow = ( self.autocomplete._make_autocomplete_window()) self.autocomplete._remove_autocomplete_window() self.assertIsNone(self.autocomplete.autocompletewindow) def METHOD_NAME(self): # Test that force_open_completions_event calls _open_completions o_cs = Func() self.autocomplete.open_completions = o_cs self.autocomplete.force_open_completions_event('event') self.assertEqual(o_cs.args, (True, False, True)) def test_try_open_completions_event(self): Equal = self.assertEqual autocomplete = self.autocomplete trycompletions = self.autocomplete.try_open_completions_event o_c_l = Func() autocomplete._open_completions_later = o_c_l # _open_completions_later should not be called with no text in editor trycompletions('event') Equal(o_c_l.args, None) # _open_completions_later should be called with COMPLETE_ATTRIBUTES (1) self.text.insert('1.0', 're.') trycompletions('event') Equal(o_c_l.args, (False, False, False, 1)) # _open_completions_later should be called with COMPLETE_FILES (2) self.text.delete('1.0', 'end') self.text.insert('1.0', '"./Lib/') trycompletions('event') Equal(o_c_l.args, (False, False, False, 2)) def test_autocomplete_event(self): Equal = self.assertEqual autocomplete = self.autocomplete # Test that the autocomplete event is ignored if user is pressing a # modifier key in addition to the tab key ev = Event(mc_state=True) self.assertIsNone(autocomplete.autocomplete_event(ev)) del ev.mc_state # If autocomplete window is open, complete() method is called self.text.insert('1.0', 're.') # This must call autocomplete._make_autocomplete_window() Equal(self.autocomplete.autocomplete_event(ev), 'break') # If autocomplete window is not active or does not exist, # open_completions is called. Return depends on its return. autocomplete._remove_autocomplete_window() o_cs = Func() # .result = None autocomplete.open_completions = o_cs Equal(self.autocomplete.autocomplete_event(ev), None) Equal(o_cs.args, (False, True, True)) o_cs.result = True Equal(self.autocomplete.autocomplete_event(ev), 'break') Equal(o_cs.args, (False, True, True)) def test_open_completions_later(self): # Test that autocomplete._delayed_completion_id is set pass def test_delayed_open_completions(self): # Test that autocomplete._delayed_completion_id set to None and that # open_completions only called if insertion index is the same as # _delayed_completion_index pass def test_open_completions(self): # Test completions of files and attributes as well as non-completion # of errors pass def test_fetch_completions(self): # Test that fetch_completions returns 2 lists: # For attribute completion, a large list containing all variables, and # a small list containing non-private variables. # For file completion, a large list containing all files in the path, # and a small list containing files that do not start with '.' pass def test_get_entity(self): # Test that a name is in the namespace of sys.modules and # __main__.__dict__ pass if __name__ == '__main__': unittest.main(verbosity=2)
2,076
replace variables
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <[email protected]> # # SPDX-License-Identifier: GPL-3.0-or-later """Module containing command managers (SearchRunner and CommandRunner).""" import traceback import re import contextlib from typing import TYPE_CHECKING, Callable, Dict, Iterator, Mapping, MutableMapping from qutebrowser.qt.core import pyqtSlot, QUrl, QObject from qutebrowser.api import cmdutils from qutebrowser.commands import cmdexc, parser from qutebrowser.utils import message, objreg, qtutils, usertypes, utils from qutebrowser.keyinput import macros, modeman if TYPE_CHECKING: from qutebrowser.mainwindow import tabbedbrowser _ReplacementFunction = Callable[['tabbedbrowser.TabbedBrowser'], str] last_command = {} def _url(tabbed_browser): """Convenience method to get the current url.""" try: return tabbed_browser.current_url() except qtutils.QtValueError as e: msg = "Current URL is invalid" if e.reason: msg += " ({})".format(e.reason) msg += "!" raise cmdutils.CommandError(msg) def _init_variable_replacements() -> Mapping[str, _ReplacementFunction]: """Return a dict from variable replacements to fns processing them.""" replacements: Dict[str, _ReplacementFunction] = { 'url': lambda tb: _url(tb).toString( QUrl.ComponentFormattingOption.FullyEncoded | QUrl.UrlFormattingOption.RemovePassword), 'url:pretty': lambda tb: _url(tb).toString( QUrl.ComponentFormattingOption.DecodeReserved | QUrl.UrlFormattingOption.RemovePassword), 'url:domain': lambda tb: "{}://{}{}".format( _url(tb).scheme(), _url(tb).host(), ":" + str(_url(tb).port()) if _url(tb).port() != -1 else ""), 'url:auth': lambda tb: "{}:{}@".format( _url(tb).userName(), _url(tb).password()) if _url(tb).userName() else "", 'url:scheme': lambda tb: _url(tb).scheme(), 'url:username': lambda tb: _url(tb).userName(), 'url:password': lambda tb: _url(tb).password(), 'url:host': lambda tb: _url(tb).host(), 'url:port': lambda tb: str( _url(tb).port()) if _url(tb).port() != -1 else "", 'url:path': lambda tb: _url(tb).path(), 'url:query': lambda tb: _url(tb).query(), 'title': lambda tb: tb.widget.page_title(tb.widget.currentIndex()), 'clipboard': lambda _: utils.get_clipboard(), 'primary': lambda _: utils.get_clipboard(selection=True), } for key in list(replacements): modified_key = '{' + key + '}' # x = modified_key is to avoid binding x as a closure replacements[modified_key] = ( lambda _, x=modified_key: x) # type: ignore[misc] return replacements VARIABLE_REPLACEMENTS = _init_variable_replacements() # A regex matching all variable replacements VARIABLE_REPLACEMENT_PATTERN = re.compile( "{(?P<var>" + "|".join(VARIABLE_REPLACEMENTS.keys()) + ")}") def METHOD_NAME(win_id, arglist): """Utility function to replace variables like {url} in a list of args.""" tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) values: MutableMapping[str, str] = {} args = [] def repl_cb(matchobj): """Return replacement for given match.""" var = matchobj.group("var") if var not in values: values[var] = VARIABLE_REPLACEMENTS[var](tabbed_browser) return values[var] try: for arg in arglist: # using re.sub with callback function replaces all variables in a # single pass and avoids expansion of nested variables (e.g. # "{url}" from clipboard is not expanded) args.append(VARIABLE_REPLACEMENT_PATTERN.sub(repl_cb, arg)) except utils.ClipboardError as e: raise cmdutils.CommandError(e) return args class AbstractCommandRunner(QObject): """Abstract base class for CommandRunner.""" def run(self, text, count=None, *, safely=False): raise NotImplementedError @pyqtSlot(str, int) @pyqtSlot(str) def run_safely(self, text, count=None): """Run a command and display exceptions in the statusbar.""" self.run(text, count, safely=True) class CommandRunner(AbstractCommandRunner): """Parse and run qutebrowser commandline commands. Attributes: _win_id: The window this CommandRunner is associated with. """ def __init__(self, win_id, partial_match=False, find_similar=True, parent=None): super().__init__(parent) self._parser = parser.CommandParser( partial_match=partial_match, find_similar=find_similar, ) self._win_id = win_id @contextlib.contextmanager def _handle_error(self, safely: bool) -> Iterator[None]: """Show exceptions as errors if safely=True is given.""" try: yield except cmdexc.Error as e: if safely: message.error(str(e), stack=traceback.format_exc()) else: raise def run(self, text, count=None, *, safely=False): """Parse a command from a line of text and run it. Args: text: The text to parse. count: The count to pass to the command. safely: Show CmdError exceptions as messages. """ record_last_command = True record_macro = True mode_manager = modeman.instance(self._win_id) cur_mode = mode_manager.mode parsed = None with self._handle_error(safely): parsed = self._parser.parse_all(text) if parsed is None: return # type: ignore[unreachable] for result in parsed: with self._handle_error(safely): if result.cmd.no_replace_variables: args = result.args else: args = METHOD_NAME(self._win_id, result.args) result.cmd.run(self._win_id, args, count=count) if result.cmdline[0] in ['repeat-command', 'cmd-repeat-last']: record_last_command = False if result.cmdline[0] in ['macro-record', 'macro-run', 'set-cmd-text', 'cmd-set-text']: record_macro = False if record_last_command: last_command[cur_mode] = (text, count) if record_macro and cur_mode == usertypes.KeyMode.normal: macros.macro_recorder.record_command(text, count)
2,077
get name cache info
from functools import lru_cache import logging import re from lona import default_settings ABSTRACT_ROUTE_RE = re.compile(r'<(?P<name>[^:>]+)(:(?P<pattern>[^>]+))?>') ROUTE_PART_FORMAT_STRING = r'(?P<{}>{})' DEFAULT_PATTERN = r'[^/]+' OPTIONAL_TRAILING_SLASH_PATTERN = r'(/)' MATCH_ALL = 1 logger = logging.getLogger('lona.routing') class Route: def __init__(self, raw_pattern, view, name='', interactive=True, http_pass_through=False, frontend_view=None): self.raw_pattern = raw_pattern self.view = view self.name = name self.interactive = interactive self.http_pass_through = http_pass_through self.frontend_view = frontend_view self.path = None self.format_string = '' self.optional_trailing_slash = False # match all if self.raw_pattern == MATCH_ALL: self.path = MATCH_ALL # string or regex else: raw_pattern = self.raw_pattern if raw_pattern.endswith(OPTIONAL_TRAILING_SLASH_PATTERN): self.optional_trailing_slash = True raw_pattern = \ raw_pattern[:-len(OPTIONAL_TRAILING_SLASH_PATTERN)] groups = ABSTRACT_ROUTE_RE.findall(raw_pattern) # path is no pattern but simple string if not groups: self.path = raw_pattern self.format_string = raw_pattern return pattern_names = [i[0] for i in groups] patterns = [(i[0], i[2] or DEFAULT_PATTERN) for i in groups] cleaned_pattern = ABSTRACT_ROUTE_RE.sub('{}', raw_pattern) # setup format string self.format_string = cleaned_pattern.format( *['{' + i + '}' for i in pattern_names]) # compile pattern self.pattern = re.compile( r'^{}{}$'.format( # NOQA: FS002 cleaned_pattern.format( *[ROUTE_PART_FORMAT_STRING.format(*i) for i in patterns], ), (r'(/)?' if self.optional_trailing_slash else ''), ), ) def match(self, path): # match all if self.path == MATCH_ALL: return True, {} # simple string if self.path: if self.optional_trailing_slash and path.endswith('/'): path = path[:-1] return path == self.path, {} # pattern match_object = self.pattern.match(path) if not match_object: return False, {} return True, match_object.groupdict() def __repr__(self): raw_pattern = self.raw_pattern if raw_pattern == MATCH_ALL: raw_pattern = 'MATCH_ALL' return f'<Route({raw_pattern}, {self.view})>' class Router: def __init__(self): self.routes = [] self.resize_name_cache( default_settings.ROUTING_NAME_CACHE_MAX_SIZE, ) self.resize_resolve_cache( default_settings.ROUTING_RESOLVE_CACHE_MAX_SIZE, ) self.resize_reverse_cache( default_settings.ROUTING_REVERSE_CACHE_MAX_SIZE, ) # caches ################################################################## # name def resize_name_cache(self, max_size): self._name_lru_cache = lru_cache(max_size)(self._get_route) def METHOD_NAME(self): return self._name_lru_cache.cache_info() def clear_name_cache_info(self): return self._name_lru_cache.cache_clear() # resolve def resize_resolve_cache(self, max_size): self._resolve_lru_cache = lru_cache(max_size)(self._resolve) def get_resolve_cache_info(self): return self._resolve_lru_cache.cache_info() def clear_resolve_cache_info(self): return self._resolve_lru_cache.cache_clear() # reverse def resize_reverse_cache(self, max_size): self._reverse_lru_cache = lru_cache(max_size)(self._reverse) def get_reverse_cache_info(self): return self._reverse_lru_cache.cache_info() def clear_reverse_cache_info(self): return self._reverse_lru_cache.cache_clear() # routes ################################################################## def add_route(self, route): # check if route name already exists if route.name: for _route in self.routes: if route.name == _route.name: logger.warning( "route name '%s' already exists", route.name, ) self.routes.append(route) def add_routes(self, *routes): for route in routes: self.add_route(route) def _get_route(self, name): for route in self.routes: if route.name == name: return route def get_route(self, *args, **kwargs): return self._name_lru_cache(*args, **kwargs) # resolve ################################################################# def _resolve(self, path): logger.debug("resolving '%s'", path) for route in self.routes: match, match_info = route.match(path) if match: logger.debug('%s matched', route) return True, route, match_info logger.debug("no match for '%s'", path) return False, None, {} def resolve(self, *args, **kwargs): return self._resolve_lru_cache(*args, **kwargs) # reverse ################################################################# def _reverse(self, route_name, *args, **kwargs): route = None for _route in self.routes: if _route.name == route_name: route = _route break if not route: raise ValueError(f"no route named '{route_name}' found") if route.path: return route.path try: return route.format_string.format(*args, **kwargs) except KeyError as e: key_error = e # raise is outside of except block to avoid stacking tracebacks raise ValueError(f'missing URL arg: {key_error.args[0]}') def reverse(self, *args, **kwargs): return self._reverse_lru_cache(*args, **kwargs)
2,078
get function
from math import floor import numpy as np from scipy.signal import savgol_coeffs, savgol_filter from woodwork.column_schema import ColumnSchema from woodwork.logical_types import Double from featuretools.primitives.base import TransformPrimitive class SavgolFilter(TransformPrimitive): """Applies a Savitzky-Golay filter to a list of values. Description: Given a list of values, return a smoothed list which increases the signal to noise ratio without greatly distoring the signal. Uses the `Savitzky–Golay filter` method. If the input list has less than 20 values, it will be returned as is. See the following page for more info: https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.savgol_filter.html Args: window_length (int): The length of the filter window (i.e. the number of coefficients). `window_length` must be a positive odd integer. polyorder (int): The order of the polynomial used to fit the samples. `polyorder` must be less than `window_length`. deriv (int): Optional. The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to filter the data without differentiating. delta (float): Optional. The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. Default is 1.0. mode (str): Optional. Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This determines the type of extension to use for the padded signal to which the filter is applied. When `mode` is 'constant', the padding value is given by `cval`. See the Notes for more details on 'mirror', 'constant', 'wrap', and 'nearest'. When the 'interp' mode is selected (the default), no extension is used. Instead, a degree `polyorder` polynomial is fit to the last `window_length` values of the edges, and this polynomial is used to evaluate the last `window_length // 2` output values. cval (scalar): Optional. Value to fill past the edges of the input if `mode` is 'constant'. Default is 0.0. Examples: >>> savgol_filter = SavgolFilter() >>> data = [0, 1, 1, 2, 3, 4, 5, 7, 8, 7, 9, 9, 12, 11, 12, 14, 15, 17, 17, 17, 20] >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]] [0.0429, 0.8286, 1.2571] We can control `window_length` and `polyorder` of the filter. >>> savgol_filter = SavgolFilter(window_length=13, polyorder=3) >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]] [-0.0962, 0.6484, 1.4451] We can also control the `deriv` and `delta` parameters. >>> savgol_filter = SavgolFilter(deriv=1, delta=1.5) >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]] [0.754, 0.3492, 0.2778] Finally, we can use `mode` to control how edge values are handled. >>> savgol_filter = SavgolFilter(mode='constant', cval=5) >>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]] [1.5429, 0.2286, 1.2571] """ name = "savgol_filter" input_types = [ColumnSchema(semantic_tags={"numeric"})] return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"}) def __init__( self, window_length=None, polyorder=None, deriv=0, delta=1.0, mode="interp", cval=0.0, ): if window_length is not None and polyorder is not None: try: if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: raise ValueError( "mode must be 'mirror', 'constant', " "'nearest', 'wrap' or 'interp'.", ) savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) except Exception: raise elif (window_length is None and polyorder is not None) or ( window_length is not None and polyorder is None ): error_text = ( "Both window_length and polyorder must be defined if you define one." ) raise ValueError(error_text) self.window_length = window_length self.polyorder = polyorder self.deriv = deriv self.delta = delta self.mode = mode self.cval = cval def METHOD_NAME(self): def smooth(x): if x.shape[0] < 20: return x if np.isnan(np.min(x)): # interpolate the nan values, works for edges & middle nans mask = np.isnan(x) x[mask] = np.interp( np.flatnonzero(mask), np.flatnonzero(~mask), x[~mask], ) window_length = self.window_length polyorder = self.polyorder if window_length is None and polyorder is None: window_length = floor(len(x) / 10) * 2 + 1 polyorder = 3 return savgol_filter( x, window_length=window_length, polyorder=polyorder, deriv=self.deriv, delta=self.delta, mode=self.mode, cval=self.cval, ) return smooth
2,079
get revision object
from time import time from django.contrib.admin.utils import unquote from django.core.exceptions import PermissionDenied from django.http import Http404, JsonResponse from django.http.request import QueryDict from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from django.utils.decorators import method_decorator from django.views.generic import View from wagtail.admin.panels import get_edit_handler from wagtail.models import PreviewableMixin, RevisionMixin from wagtail.utils.decorators import xframe_options_sameorigin_override class PreviewOnEdit(View): model = None form_class = None http_method_names = ("post", "get", "delete") preview_expiration_timeout = 60 * 60 * 24 # seconds session_key_prefix = "wagtail-preview-" def setup(self, request, *args, **kwargs): super().setup(request, *args, **kwargs) self.object = self.get_object() def dispatch(self, request, *args, **kwargs): if not isinstance(self.object, PreviewableMixin): raise Http404 return super().dispatch(request, *args, **kwargs) def remove_old_preview_data(self): expiration = time() - self.preview_expiration_timeout expired_keys = [ k for k, v in self.request.session.items() if k.startswith(self.session_key_prefix) and v[1] < expiration ] # Removes the session key gracefully for k in expired_keys: self.request.session.pop(k) @property def session_key(self): app_label = self.model._meta.app_label model_name = self.model._meta.model_name unique_key = f"{app_label}-{model_name}-{self.object.pk}" return f"{self.session_key_prefix}{unique_key}" def get_object(self): obj = get_object_or_404(self.model, pk=unquote(self.kwargs["pk"])) if isinstance(obj, RevisionMixin): obj = obj.get_latest_revision_as_object() return obj def get_form_class(self): if self.form_class: return self.form_class return get_edit_handler(self.model).get_form_class() def get_form(self, query_dict): form_class = self.get_form_class() if not query_dict: # Query dict is empty, return null form return form_class(instance=self.object, for_user=self.request.user) return form_class(query_dict, instance=self.object, for_user=self.request.user) def _get_data_from_session(self): post_data, _ = self.request.session.get(self.session_key, (None, None)) if not isinstance(post_data, str): post_data = "" return QueryDict(post_data) def post(self, request, *args, **kwargs): self.remove_old_preview_data() form = self.get_form(request.POST) is_valid = form.is_valid() if is_valid: # TODO: Handle request.FILES. request.session[self.session_key] = request.POST.urlencode(), time() is_available = True else: # Check previous data in session to determine preview availability form = self.get_form(self._get_data_from_session()) is_available = form.is_valid() return JsonResponse({"is_valid": is_valid, "is_available": is_available}) def error_response(self): return TemplateResponse( self.request, "wagtailadmin/generic/preview_error.html", {"object": self.object}, ) @method_decorator(xframe_options_sameorigin_override) def get(self, request, *args, **kwargs): form = self.get_form(self._get_data_from_session()) if not form.is_valid(): return self.error_response() form.save(commit=False) try: preview_mode = request.GET.get("mode", self.object.default_preview_mode) except IndexError: raise PermissionDenied extra_attrs = { "in_preview_panel": request.GET.get("in_preview_panel") == "true", "is_editing": True, } return self.object.make_preview_request(request, preview_mode, extra_attrs) def delete(self, request, *args, **kwargs): request.session.pop(self.session_key, None) return JsonResponse({"success": True}) class PreviewOnCreate(PreviewOnEdit): @property def session_key(self): app_label = self.model._meta.app_label model_name = self.model._meta.model_name return f"{self.session_key_prefix}{app_label}-{model_name}" def get_object(self): return self.model() class PreviewRevision(View): model = None http_method_names = ("get",) def setup(self, request, pk, revision_id, *args, **kwargs): super().setup(request, *args, **kwargs) self.pk = pk self.revision_id = revision_id self.object = self.get_object() self.revision_object = self.METHOD_NAME() def get_object(self): if not issubclass(self.model, RevisionMixin): raise Http404 return get_object_or_404(self.model, pk=unquote(self.pk)) def METHOD_NAME(self): revision = get_object_or_404(self.object.revisions, id=self.revision_id) return revision.as_object() def get(self, request, *args, **kwargs): try: preview_mode = request.GET.get( "mode", self.revision_object.default_preview_mode ) except IndexError: raise PermissionDenied return self.revision_object.make_preview_request(request, preview_mode)
2,080
execution instance
# Python import pytest from unittest import mock from contextlib import contextmanager from awx.main.models import Credential, UnifiedJob, Instance from awx.main.tests.factories import ( create_organization, create_job_template, create_instance, create_instance_group, create_notification_template, create_survey_spec, create_workflow_job_template, ) from django.core.cache import cache from django.conf import settings def pytest_addoption(parser): parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator") def pytest_configure(config): import sys sys._called_from_test = True def pytest_unconfigure(config): import sys del sys._called_from_test @pytest.fixture def mock_access(): @contextmanager def access_given_class(TowerClass): try: mock_instance = mock.MagicMock(__name__='foobar') MockAccess = mock.MagicMock(return_value=mock_instance) the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False) the_patch.__enter__() yield mock_instance finally: the_patch.__exit__() return access_given_class @pytest.fixture def job_template_factory(): return create_job_template @pytest.fixture def organization_factory(): return create_organization @pytest.fixture def notification_template_factory(): return create_notification_template @pytest.fixture def survey_spec_factory(): return create_survey_spec @pytest.fixture def instance_factory(): return create_instance @pytest.fixture def instance_group_factory(): return create_instance_group @pytest.fixture def controlplane_instance_group(instance_factory, instance_group_factory): """There always has to be a controlplane instancegroup and at least one instance in it""" return create_instance_group(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, create_instance('hybrid-1', node_type='hybrid', capacity=500)) @pytest.fixture def default_instance_group(instance_factory, instance_group_factory): return create_instance_group("default", instances=[create_instance("hostA", node_type='execution')]) @pytest.fixture def control_instance(): '''Control instance in the controlplane automatic IG''' inst = create_instance('control-1', node_type='control', capacity=500) return inst @pytest.fixture def control_instance_low_capacity(): '''Control instance in the controlplane automatic IG that has low capacity''' inst = create_instance('control-1', node_type='control', capacity=5) return inst @pytest.fixture def METHOD_NAME(): '''Execution node in the automatic default IG''' ig = create_instance_group('default') inst = create_instance('receptor-1', node_type='execution', capacity=500) ig.instances.add(inst) return inst @pytest.fixture def hybrid_instance(): '''Hybrid node in the default controlplane IG''' inst = create_instance('hybrid-1', node_type='hybrid', capacity=500) return inst @pytest.fixture def job_template_with_survey_passwords_factory(job_template_factory): def rf(persisted): "Returns job with linked JT survey with password survey questions" objects = job_template_factory( 'jt', organization='org1', survey=[ {'variable': 'submitter_email', 'type': 'text', 'default': '[email protected]'}, {'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'}, {'variable': 'SSN', 'type': 'password'}, ], persisted=persisted, ) return objects.job_template return rf @pytest.fixture def job_with_secret_key_unit(job_with_secret_key_factory): return job_with_secret_key_factory(persisted=False) @pytest.fixture def workflow_job_template_factory(): return create_workflow_job_template @pytest.fixture def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory): return job_template_with_survey_passwords_factory(persisted=False) @pytest.fixture def mock_cache(): class MockCache(object): cache = {} def get(self, key, default=None): return self.cache.get(key, default) def set(self, key, value, timeout=60): self.cache[key] = value def delete(self, key): del self.cache[key] return MockCache() def pytest_runtest_teardown(item, nextitem): # clear Django cache at the end of every test ran # NOTE: this should not be memcache (as it is deprecated), nor should it be redis. # This is a local test cache, so we want every test to start with an empty cache cache.clear() @pytest.fixture(scope='session', autouse=True) def mock_external_credential_input_sources(): # Credential objects query their related input sources on initialization. # We mock that behavior out of credentials by default unless we need to # test it explicitly. with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture: yield _fixture @pytest.fixture(scope='session', autouse=True) def mock_has_unpartitioned_events(): # has_unpartitioned_events determines if there are any events still # left in the old, unpartitioned job events table. In order to work, # this method looks up when the partition migration occurred. When # Django's unit tests run, however, there will be no record of the migration. # We mock this out to circumvent the migration query. with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture: yield _fixture @pytest.fixture(scope='session', autouse=True) def mock_get_event_queryset_no_job_created(): """ SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the job_created field. That field does not actually exist in a non-partition scenario. """ def event_qs(self): kwargs = {self.event_parent_key: self.id} return self.event_class.objects.filter(**kwargs) with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture: yield _fixture @pytest.fixture def mock_me(): me_mock = mock.MagicMock(return_value=Instance(id=1, hostname=settings.CLUSTER_HOST_ID, uuid='00000000-0000-0000-0000-000000000000')) with mock.patch.object(Instance.objects, 'me', me_mock): yield
2,081
type
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetApiPortalResult', 'AwaitableGetApiPortalResult', 'get_api_portal', 'get_api_portal_output', ] @pulumi.output_type class GetApiPortalResult: """ API portal resource """ def __init__(__self__, id=None, name=None, properties=None, sku=None, system_data=None, METHOD_NAME=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if sku and not isinstance(sku, dict): raise TypeError("Expected argument 'sku' to be a dict") pulumi.set(__self__, "sku", sku) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if METHOD_NAME and not isinstance(METHOD_NAME, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", METHOD_NAME) @property @pulumi.getter def id(self) -> str: """ Fully qualified resource Id for the resource. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> 'outputs.ApiPortalPropertiesResponse': """ API portal properties payload """ return pulumi.get(self, "properties") @property @pulumi.getter def sku(self) -> Optional['outputs.SkuResponse']: """ Sku of the API portal resource """ return pulumi.get(self, "sku") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Metadata pertaining to creation and last modification of the resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def METHOD_NAME(self) -> str: """ The type of the resource. """ return pulumi.get(self, "type") class AwaitableGetApiPortalResult(GetApiPortalResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetApiPortalResult( id=self.id, name=self.name, properties=self.properties, sku=self.sku, system_data=self.system_data, METHOD_NAME=self.METHOD_NAME) def get_api_portal(api_portal_name: Optional[str] = None, resource_group_name: Optional[str] = None, service_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiPortalResult: """ Get the API portal and its properties. :param str api_portal_name: The name of API portal. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str service_name: The name of the Service resource. """ __args__ = dict() __args__['apiPortalName'] = api_portal_name __args__['resourceGroupName'] = resource_group_name __args__['serviceName'] = service_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230701preview:getApiPortal', __args__, opts=opts, typ=GetApiPortalResult).value return AwaitableGetApiPortalResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), sku=pulumi.get(__ret__, 'sku'), system_data=pulumi.get(__ret__, 'system_data'), METHOD_NAME=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_api_portal) def get_api_portal_output(api_portal_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiPortalResult]: """ Get the API portal and its properties. :param str api_portal_name: The name of API portal. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str service_name: The name of the Service resource. """ ...
2,082
test invalid human handle
# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS from __future__ import annotations import zlib from unicodedata import normalize import pytest from parsec._parsec import ( DataError, DeviceID, DeviceName, EntryName, EntryNameError, HumanHandle, OrganizationID, SecretKey, UserID, ) from parsec._parsec import ( FileManifest as RemoteFileManifest, ) from parsec._parsec import ( FolderManifest as RemoteFolderManifest, ) from parsec._parsec import ( UserManifest as RemoteUserManifest, ) from parsec._parsec import ( WorkspaceManifest as RemoteWorkspaceManifest, ) from parsec.serde import packb from tests.common import LocalDevice @pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) @pytest.mark.parametrize( "data", ( "!x", # Invalid character " x", # Invalid character "x" * 33, # Too long # Sinogram encoded on 3 bytes with utf8, so those 11 characters # form a 33 bytes long utf8 string ! "飞" * 11, "😀", # Not a unicode word "", ), ) def test_max_bytes_size(cls, data): with pytest.raises(ValueError): cls(data) @pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) def test_normalization(cls): nfc_str = normalize("NFC", "àæßšūÿź") # cspell: disable-line nfd_str = normalize("NFD", nfc_str) assert nfc_str != nfd_str assert cls(nfd_str).str == nfc_str assert cls(nfc_str).str == nfc_str assert cls(nfc_str + nfd_str).str == nfc_str + nfc_str @pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) @pytest.mark.parametrize( "data", ("x", "x" * 32, "飞" * 10 + "xx", "X1-_é飞") # 32 bytes long utf8 string # Mix-and-match ) def test_good_pattern(cls, data): cls(data) @pytest.mark.parametrize( "data", ( "!x@x", # Invalid character "x@ ", # Invalid character "x" * 66, # Too long # Sinogram encoded on 3 bytes with utf8, so those 22 characters # form a 66 bytes long utf8 string ! "飞" * 22, "😀@x", # Not a unicode word "x", # Missing @ separator "@x", "x@", "x" * 62 + "@x", # Respect overall length but not UserID length "x@" + "x" * 62, # Respect overall length but not DeviceName length "", ), ) def test_max_bytes_size_device_id(data): with pytest.raises(ValueError): DeviceID(data) @pytest.mark.parametrize( "data", ( "x@x", "x" * 32 + "@" + "x" * 32, "飞" * 10 + "xx@xx" + "飞" * 10, # 65 bytes long utf8 string "X1-_é飞@X1-_é飞", # Mix-and-match ), ) def test_good_pattern_device_id(data): DeviceID(data) def test_human_handle_compare(): a = HumanHandle(email="[email protected]", label="Alice") a2 = HumanHandle(email="[email protected]", label="Whatever") b = HumanHandle(email="[email protected]", label="Bob") assert a == a2 assert a != b assert b == b @pytest.mark.parametrize( "email,label", ( ("[email protected]", "Alice"), ("a@x", "A"), # Smallest size (f"{'a' * 64}@{'x' * 185}.com", "x" * 254), # Max sizes (f"{'飞' * 21}@{'飞' * 62}.com", f"{'飞' * 84}xx"), # Unicode & max size ("[email protected]", "J.D."), ), ) def test_valid_human_handle(email, label): HumanHandle(email, label) @pytest.mark.parametrize( "email,label", ( ("[email protected]", "x" * 255), (f"{'@example.com':a>255}", "Alice"), ("[email protected]", "飞" * 85), # 255 bytes long utf8 label (f"{'飞' * 21}@{'飞' * 63}.x", "Alice"), # 255 bytes long utf8 email ("[email protected]", ""), # Empty label ("", "Alice"), # Empty email ("", "Alice <[email protected]>"), # Empty email and misleading label ("Alice <[email protected]>", ""), # Empty label and misleading label ("Alice <@example.com>", "Alice"), # Missing local part in email ), ) def METHOD_NAME(email, label): with pytest.raises(ValueError): HumanHandle(email, label) def test_human_handle_normalization(): nfc_label = normalize("NFC", "àæßšūÿź") # cspell: disable-line nfd_label = normalize("NFD", nfc_label) nfc_email = normalize("NFC", "àæßš@ūÿ.ź") # cspell: disable-line nfd_email = normalize("NFD", nfc_email) assert nfc_label != nfd_label assert nfc_email != nfd_email hh = HumanHandle(nfd_email, nfd_label) assert hh.email == nfc_email assert hh.label == nfc_label hh = HumanHandle(nfc_email, nfc_label) assert hh.email == nfc_email assert hh.label == nfc_label @pytest.mark.parametrize( "data", ( "foo", "foo.txt", "x" * 255, # Max size "飞" * 85, # Unicode & max size "X1-_é飞", "🌍☄️==🦕🦖💀", # Probably a bad name for a real folder... ".a", # Dot and dot-dot are allowed if they are not alone "..a", "a..", "a.", ), ) def test_valid_entry_name(data): EntryName(data) @pytest.mark.parametrize("data", ("x" * 256, "飞" * 85 + "x")) def test_entry_name_too_long(data): with pytest.raises(EntryNameError): EntryName(data) @pytest.mark.parametrize( "data", ( ".", # Not allowed "..", # Not allowed "/x", # Slash not allowed "x/x", "x/", "/", "\x00x", # Null-byte not allowed "x\x00x", "x\x00", "\x00", ), ) def test_invalid_entry_name(data): with pytest.raises(ValueError): EntryName(data) def test_entry_name_normalization(): nfc_str = normalize( "NFC", "àáâäæãåāçćčèéêëēėęîïíīįìłñńôöòóœøōõßśšûüùúūÿžźż" # cspell: disable-line ) nfd_str = normalize("NFD", nfc_str) assert nfc_str != nfd_str assert EntryName(nfd_str).str == nfc_str assert EntryName(nfc_str).str == nfc_str assert EntryName(nfc_str + nfd_str).str == nfc_str + nfc_str def test_remote_manifests_load_invalid_data(alice: LocalDevice): key = SecretKey.generate() valid_zip_msgpack_but_bad_fields = zlib.compress(packb({"foo": 42})) valid_zip_bud_bad_msgpack = zlib.compress(b"dummy") invalid_zip = b"\x42" * 10 for cls in ( RemoteFileManifest, RemoteFolderManifest, RemoteWorkspaceManifest, RemoteUserManifest, ): print(f"Testing class {cls.__name__}") with pytest.raises(DataError): cls.decrypt_verify_and_load( b"", key=key, author_verify_key=alice.verify_key, expected_author=alice.device_id, expected_timestamp=alice.timestamp(), ) with pytest.raises(DataError): cls.decrypt_verify_and_load( invalid_zip, key=key, author_verify_key=alice.verify_key, expected_author=alice.device_id, expected_timestamp=alice.timestamp(), ) with pytest.raises(DataError): cls.decrypt_verify_and_load( valid_zip_bud_bad_msgpack, key=key, author_verify_key=alice.verify_key, expected_author=alice.device_id, expected_timestamp=alice.timestamp(), ) # Valid to deserialize, invalid fields with pytest.raises(DataError): cls.decrypt_verify_and_load( valid_zip_msgpack_but_bad_fields, key=key, author_verify_key=alice.verify_key, expected_author=alice.device_id, expected_timestamp=alice.timestamp(), )
2,083
test out file
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module.""" import os from xml.dom import minidom, Node from googletest.test import gtest_test_utils from googletest.test import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyOne" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*"> <testcase name="TestSomeProperties" file="gtest_xml_outfile1_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne"> <properties> <property name="SetUpProp" value="1"/> <property name="TestSomeProperty" value="1"/> <property name="TearDownProp" value="1"/> </properties> </testcase> </testsuite> </testsuites> """ EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyTwo" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*"> <testcase name="TestSomeProperties" file="gtest_xml_outfile2_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo"> <properties> <property name="SetUpProp" value="2"/> <property name="TestSomeProperty" value="2"/> <property name="TearDownProp" value="2"/> </properties> </testcase> </testsuite> </testsuites> """ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase): """Unit test for Google Test's XML output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, "") self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml")) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml")) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self.METHOD_NAME(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1) def testOutfile2(self): self.METHOD_NAME(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2) def METHOD_NAME(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == "__main__": os.environ["GTEST_STACK_TRACE_DEPTH"] = "0" gtest_test_utils.Main()
2,084
replace tests
#!/usr/bin/env python3 # NUnit test validator for csplugin tasks import json import os import re import sys from subprocess import call, DEVNULL def replace_all(lines, s1, s2): for i in range(len(lines)): s = lines[i] lines[i] = re.sub(s1, s2, lines[i]) def replace_by(lines, instructions): replace = instructions.get("replace", None) if not replace: return for cond in replace: s1 = cond.get("sub", "") if not s1: continue s2 = cond.get("by", "") replace_all(lines, s1, s2) def find_test(lines, testname): if not testname: return -1, -1 reg = re.compile(testname) i1 = i2 = -1 n = 0 for i in range(len(lines)): s = lines[i] res = reg.match(s) if res: i1 = i - 1 else: if i1 >= 0 and s.find("{") >= 0: n += 1 if i1 >= 0 and s.find("}") >= 0: i2 = i n -= 1 if n <= 0: break return i1, i2 def METHOD_NAME(lines, test): n = 1 for t in test: i1, i2 = find_test(lines, t.get("replaceline", None)) if i1 < 0 or i2 < 0: continue tlines = lines[i1 : i2 + 1] del lines[i1 : i2 + 1] replacecall = t.get("replacecall", "") byline = t.get("byline", "") for tst in t.get("bycalls", []): tmethod = list(tlines) tc = tst.get("call", "") tr = tst.get("result", "") if byline: tst["name"] = tc + "xxxx" + str(n) tmethod[1] = byline + tst["name"] + "()\n" n += 1 # replace_all(tmethod, replacecall, tr) replace_all(tmethod, replacecall, tc) lines[i1:i1] = tmethod def count_points(lines, test): p = 0 for t in test: for tst in t.get("bycalls", []): name = tst.get("call", "XXXX") result = tst.get("result", "XXXX") # NUnit 3 uses "Passed" instead of "Success" and "Failed" instead of "Failure" if result == "Success": result = "Passed" elif result == "Failure": result = "Failed" expl = tst.get("expl", "???") pts = tst.get("pts", 1) line = [s for s in lines if s.find(name + "xxxx") >= 0] if line: line = line[0] rst = 'result="' i = line.find(rst) if i >= 0: line = line[i + len(rst) :] i = line.find('"') if i >= 0: xmlres = line[:i] if xmlres == result: p += pts else: pts = tst.get("wrong", 0) p += pts print( expl + ": pitäisi tulla " + result + ", tuli: " + xmlres + ". Pisteitä:", pts, ) return p def scale_points(pts, points): if not points: return pts p = 0 for pt in points: if pts < pt.get("from", 0): return p p = pt.get("p", pts) return p GLOBAL_NUGET_PACKAGES_PATH = "/cs_data/dotnet/nuget_cache" def get_build_refs(ref_type): with open(f"/cs_data/dotnet/configs/{ref_type}.build.deps", encoding="utf-8") as f: dep_paths = [ os.path.join(GLOBAL_NUGET_PACKAGES_PATH, dep_line.strip()) for dep_line in f.readlines() ] return [f"-r:{p}" for p in dep_paths] def main(): filename = sys.argv[1] filename2 = sys.argv[2] filename3 = "T" + filename lines = open(filename).readlines() lines2 = open(filename2).read() # yaml # instructions = yaml.load(lines2, CLoader) # insert = instructions.get("insert", None) # json instructions = json.loads(lines2) insertfile = instructions.get("insert", None) insert = "" if insertfile: insert = open(insertfile).read() replace_by(lines, instructions) METHOD_NAME(lines, instructions.get("test", None)) # print("".join(lines)) # print(insert) f = open(filename3, "w") f.writelines(lines) if insert: f.write(insert) f.close() args1 = [ "/cs/dotnet/csc", "-nologo", f"-out:{filename3}.dll", "-target:library", *get_build_refs("nunit_test"), *get_build_refs("jypeli"), filename3, ] sourceFiles = instructions.get("sourceFiles", []) for sourceFile in sourceFiles: args1.append(sourceFile) ret = call(args1) # print(ret) # print(args1) if ret != 0: print("Testikoodi ei käänny") return args = ["/cs/dotnet/nunit-test-dll", f"{filename3}.dll"] ret = call(args, stdout=DEVNULL, stderr=DEVNULL, timeout=20) # https://docs.nunit.org/articles/nunit/running-tests/Console-Runner.html # print(args) if ret < 0: print("Testikoodia ei voi ajaa") xml = open("TestResult.xml").readlines() # print("\n".join(xml)) points = count_points(xml, instructions.get("test", None)) points = scale_points(points, instructions.get("points", None)) print("Points: " + f"{points:.2f}") if __name__ == "__main__": main()
2,085
locals example defined before
# pylint: disable=missing-docstring, invalid-name, too-few-public-methods, import-outside-toplevel, fixme, line-too-long, broad-exception-raised def test_regression_737(): import xml # [unused-import] def test_regression_923(): import unittest.case # [unused-import] import xml as sql # [unused-import] def test_unused_with_prepended_underscore(): _foo = 42 _ = 24 __a = 24 dummy = 24 _a_ = 42 # [unused-variable] __a__ = 24 # [unused-variable] __never_used = 42 def test_local_field_prefixed_with_unused_or_ignored(): flagged_local_field = 42 # [unused-variable] unused_local_field = 42 ignored_local_field = 42 class HasUnusedDunderClass: def test(self): __class__ = 42 # [unused-variable] def best(self): self.test() def METHOD_NAME(): value = 42 # [possibly-unused-variable] return locals() def locals_example_defined_after(): local_variables = locals() value = 42 # [unused-variable] return local_variables def locals_does_not_account_for_subscopes(): value = 42 # [unused-variable] def some_other_scope(): return locals() return some_other_scope def unused_import_from(): from functools import wraps as abc # [unused-import] from collections import namedtuple # [unused-import] def unused_import_in_function(value): from string import digits, hexdigits # [unused-import] return value if value in digits else "Nope" def hello(arg): my_var = 'something' # [unused-variable] if arg: return True raise Exception # pylint: disable=wrong-import-position PATH = OS = collections = deque = None def function(matches): """"yo""" aaaa = 1 # [unused-variable] index = -1 for match in matches: index += 1 print(match) from astroid import nodes def visit_if(self, node: nodes.If) -> None: """increments the branches counter""" branches = 1 # don't double count If nodes coming from some 'elif' if node.orelse and len(node.orelse) > 1: branches += 1 self.inc_branch(branches) self.stmts += branches def test_global(): """ Test various assignments of global variables through imports. """ # pylint: disable=redefined-outer-name global PATH, OS, collections, deque # [global-statement] from os import path as PATH import os as OS import collections from collections import deque # make sure that these triggers unused-variable from sys import platform # [unused-import] from sys import version as VERSION # [unused-import] import this # [unused-import] import re as RE # [unused-import] # test cases that include exceptions def function2(): unused = 1 # [unused-variable] try: 1 / 0 except ZeroDivisionError as error: try: 1 / 0 except ZeroDivisionError as error: # [redefined-outer-name] raise Exception("") from error def func(): try: 1 / 0 except ZeroDivisionError as error: try: 1 / 0 except error: print("error") def func2(): try: 1 / 0 except ZeroDivisionError as error: try: 1 / 0 except: raise Exception("") from error def func3(): try: 1 / 0 except ZeroDivisionError as error: print(f"{error}") try: 1 / 2 except TypeError as error: # [unused-variable, redefined-outer-name] print("warning") def func4(): try: 1 / 0 except ZeroDivisionError as error: # [unused-variable] try: 1 / 0 except ZeroDivisionError as error: # [redefined-outer-name] print("error") def main(lst): """https://github.com/pylint-dev/astroid/pull/1111#issuecomment-890367609""" try: raise ValueError except ValueError as e: # [unused-variable] pass for e in lst: pass # e will be undefined if lst is empty print(e) # [undefined-loop-variable] main([]) def func5(): """No unused-variable for a container if iterated in comprehension""" x = [] # Test case requires homonym between "for x" and "in x" assert [True for x in x] def sibling_except_handlers(): try: pass except ValueError as e: print(e) try: pass except ValueError as e: print(e) def func6(): a = 1 def nonlocal_writer(): nonlocal a for a in range(10): pass nonlocal_writer() assert a == 9, a def test_regression_8595(): # pylint: disable=broad-exception-caught import logging def compute(): pass try: compute() error = False except Exception as e: logging.error(e) error = True if error: try: compute() except Exception as e: # [unused-variable] pass
2,086
can see ban details
from django.apps import AppConfig from django.utils.translation import pgettext_lazy from .pages import user_profile, usercp, users_list class MisagoUsersConfig(AppConfig): name = "misago.users" label = "misago_users" verbose_name = "Misago Auth" def ready(self): from . import signals as _ from .admin import tasks # pylint: disable=unused-import self.register_default_usercp_pages() self.register_default_users_list_pages() self.register_default_user_profile_pages() def register_default_usercp_pages(self): def auth_is_not_delegated(request): return not request.settings.enable_oauth2_client usercp.add_section( link="misago:usercp-change-forum-options", name=pgettext_lazy("user options page", "Forum options"), component="forum-options", icon="settings", ) usercp.add_section( link="misago:usercp-edit-details", name=pgettext_lazy("user options page", "Edit details"), component="edit-details", icon="person_outline", ) usercp.add_section( link="misago:usercp-change-username", name=pgettext_lazy("user options page", "Change username"), component="change-username", icon="card_membership", visible_if=auth_is_not_delegated, ) usercp.add_section( link="misago:usercp-change-email-password", name=pgettext_lazy("user options page", "Change email or password"), component="sign-in-credentials", icon="vpn_key", visible_if=auth_is_not_delegated, ) def can_download_own_data(request): return request.settings.allow_data_downloads usercp.add_section( link="misago:usercp-download-data", name=pgettext_lazy("user options page", "Download data"), component="download-data", icon="save_alt", visible_if=can_download_own_data, ) def can_delete_own_account(request): if not auth_is_not_delegated(request): return False return request.settings.allow_delete_own_account usercp.add_section( link="misago:usercp-delete-account", name=pgettext_lazy("user options page", "Delete account"), component="delete-account", icon="cancel", visible_if=can_delete_own_account, ) def register_default_users_list_pages(self): users_list.add_section( link="misago:users-active-posters", component="active-posters", name=pgettext_lazy("users lists page", "Top posters"), ) def register_default_user_profile_pages(self): def can_see_names_history(request, profile): if request.user.is_authenticated: is_account_owner = profile.pk == request.user.pk has_permission = request.user_acl["can_see_users_name_history"] return is_account_owner or has_permission return False def METHOD_NAME(request, profile): if request.user.is_authenticated: if request.user_acl["can_see_ban_details"]: from .bans import get_user_ban return bool(get_user_ban(profile, request.cache_versions)) return False return False user_profile.add_section( link="misago:user-posts", name=pgettext_lazy("user profile page", "Posts"), icon="message", component="posts", ) user_profile.add_section( link="misago:user-threads", name=pgettext_lazy("user profile page", "Threads"), icon="forum", component="threads", ) user_profile.add_section( link="misago:user-followers", name=pgettext_lazy("user profile page", "Followers"), icon="favorite", component="followers", ) user_profile.add_section( link="misago:user-follows", name=pgettext_lazy("user profile page", "Follows"), icon="favorite_border", component="follows", ) user_profile.add_section( link="misago:user-details", name=pgettext_lazy("user profile page", "Details"), icon="person_outline", component="details", ) user_profile.add_section( link="misago:username-history", name=pgettext_lazy("user profile page", "Username history"), icon="card_membership", component="username-history", visible_if=can_see_names_history, ) user_profile.add_section( link="misago:user-ban", name=pgettext_lazy("user profile page", "Ban details"), icon="remove_circle_outline", component="ban-details", visible_if=METHOD_NAME, )
2,087
add xml attr
## Original version of code heavily based on recipe written by Wai Yip ## Tung, released under PSF license. ## http://code.activestate.com/recipes/534109/ import re import os import xml.sax.handler class DataNode (object): def __init__ (self, **kwargs): self._attrs = {} # XML attributes and child elements self._data = None # child text data self._ncDict = kwargs.get ('nameChangeDict', {}) def __len__ (self): # treat single element as a list of 1 return 1 def __getitem__ (self, key): if isinstance (key, str): return self._attrs.get(key,None) else: return [self][key] def __contains__ (self, name): return name in self._attrs def __nonzero__ (self): return bool (self._attrs or self._data) def __getattr__ (self, name): if name.startswith('__'): # need to do this for Python special methods??? raise AttributeError (name) return self._attrs.get (name, None) def METHOD_NAME (self, name, value): change = self._ncDict.get (name) if change: name = change if name in self._attrs: # multiple attribute of the same name are represented by a list children = self._attrs[name] if not isinstance(children, list): children = [children] self._attrs[name] = children children.append(value) else: self._attrs[name] = value def __str__ (self): return self._data or '' def __repr__ (self): items = sorted (self._attrs.items()) if self._data: items.append(('data', self._data)) return u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items]) def attributes (self): return self._attrs class TreeBuilder (xml.sax.handler.ContentHandler): non_id_char = re.compile('[^_0-9a-zA-Z]') def __init__ (self, **kwargs): self._stack = [] self._text_parts = [] self._ncDict = kwargs.get ('nameChangeDict', {}) self._root = DataNode (nameChangeDict = self._ncDict) self.current = self._root def startElement (self, name, attrs): self._stack.append( (self.current, self._text_parts)) self.current = DataNode (nameChangeDict = self._ncDict) self._text_parts = [] # xml attributes --> python attributes for k, v in attrs.items(): self.current.METHOD_NAME (TreeBuilder._name_mangle(k), v) def endElement (self, name): text = ''.join (self._text_parts).strip() if text: self.current._data = text if self.current.attributes(): obj = self.current else: # a text only node is simply represented by the string obj = text or '' self.current, self._text_parts = self._stack.pop() self.current.METHOD_NAME (TreeBuilder._name_mangle(name), obj) def characters (self, content): self._text_parts.append(content) def root (self): return self._root def topLevel (self): '''Returns top level object''' return self._root.attributes().values()[0] @staticmethod def _name_mangle (name): return TreeBuilder.non_id_char.sub('_', name) regexList = [ (re.compile (r'&'), '&amp;' ), (re.compile (r'<'), '&lt;' ), (re.compile (r'>'), '&gt;' ), (re.compile (r'"'), '&quote;' ), (re.compile (r"'"), '&#39;' ) ] quoteRE = re.compile (r'(\w\s*=\s*")([^"]+)"') def fixQuoteValue (match): '''Changes all characters inside of the match''' quote = match.group(2) for regexTup in regexList: quote = regexTup[0].sub( regexTup[1], quote ) return match.group(1) + quote + '"' def xml2obj (**kwargs): ''' Converts XML data into native Python object. Takes either file handle or string as input. Does NOT fix illegal characters. input source: Exactly one of the three following is needed filehandle - input from file handle contents - input from string filename - input from filename options: filtering - boolean value telling code whether or not to fileter input selection to remove illegal XML characters nameChangeDict - dictionaries of names to change in python object''' # make sure we have exactly 1 input source filehandle = kwargs.get ('filehandle') contents = kwargs.get ('contents') filename = kwargs.get ('filename') if not filehandle and not contents and not filename: raise RuntimeError("You must provide 'filehandle', 'contents', or 'filename'") if filehandle and contents or \ filehandle and filename or \ contents and filename: raise RuntimeError("You must provide only ONE of 'filehandle', 'contents', or 'filename'") # are we filtering? filtering = kwargs.get ('filtering') if filtering: # if we are filtering, we need to read in the contents to modify them if not contents: if not filehandle: try: filehandle = open (filename, 'r') except: raise RuntimeError("Failed to open '%s'" % filename) contents = '' for line in filehandle: contents += line filehandle.close() filehandle = filename = '' contents = quoteRE.sub (fixQuoteValue, contents) ncDict = kwargs.get ('nameChangeDict', {}) builder = TreeBuilder (nameChangeDict = ncDict) if contents: xml.sax.parseString(contents, builder) else: if not filehandle: try: filehandle = open (filename, 'r') except: raise RuntimeError("Failed to open '%s'" % filename) xml.sax.parse(filehandle, builder) return builder.topLevel()
2,088
retry
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. import logging import re # A nosec comment is appended to the following line in order to disable the B404 check. # In this file the input of the module subprocess is trusted. import subprocess as sub # nosec B404 import time import webbrowser from typing import List from argparse import ArgumentParser, Namespace from pcluster.cli.commands.common import CliCommand from pcluster.constants import PCLUSTER_ISSUES_LINK from pcluster.models.cluster import Cluster from pcluster.utils import error DCV_CONNECT_SCRIPT = "/opt/parallelcluster/scripts/pcluster_dcv_connect.sh" LOGGER = logging.getLogger(__name__) class DCVConnectionError(Exception): """Error raised with DCV connection fails.""" pass def _check_command_output(cmd): # A nosec comment is appended to the following line in order to disable the B602 check. # This is done because it's needed to enable the desired functionality. The only caller # of this function is _retrieve_dcv_session_url, which passes a command that is safe. return sub.check_output(cmd, shell=True, universal_newlines=True, stderr=sub.STDOUT).strip() # nosec B602 nosemgrep def _dcv_connect(args): """ Execute pcluster dcv connect command. :param args: pcluster cli arguments. """ try: head_node = Cluster(args.cluster_name).head_node_instance except Exception as e: error(f"Unable to connect to the cluster.\n{e}") else: head_node_ip = head_node.public_ip or head_node.private_ip # Prepare ssh command to execute in the head node instance cmd = 'ssh {CFN_USER}@{HEAD_NODE_IP} {KEY} "{REMOTE_COMMAND} /home/{CFN_USER}"'.format( CFN_USER=head_node.default_user, HEAD_NODE_IP=head_node_ip, KEY="-i {0}".format(args.key_path) if args.key_path else "", REMOTE_COMMAND=DCV_CONNECT_SCRIPT, ) try: url = METHOD_NAME(_retrieve_dcv_session_url, func_args=[cmd, args.cluster_name, head_node_ip], attempts=4) url_message = f"Please use the following one-time URL in your browser within 30 seconds:\n{url}" if args.show_url: print(url_message) return try: if not webbrowser.open_new(url): raise webbrowser.Error("Unable to open the Web browser.") except webbrowser.Error as e: print(f"{e}\n{url_message}") except DCVConnectionError as e: error( "Something went wrong during DCV connection.\n{0}" "Please check the logs in the /var/log/parallelcluster/ folder " "of the head node and submit an issue {1}\n".format(e, PCLUSTER_ISSUES_LINK) ) def _retrieve_dcv_session_url(ssh_cmd, cluster_name, head_node_ip): """Connect by ssh to the head node instance, prepare DCV session and return the DCV session URL.""" try: LOGGER.debug("SSH command: %s", ssh_cmd) output = _check_command_output(ssh_cmd) # At first ssh connection, the ssh command alerts it is adding the host to the known hosts list if re.search("Permanently added .* to the list of known hosts.", output): output = _check_command_output(ssh_cmd) dcv_parameters = re.search( r"PclusterDcvServerPort=([\d]+) PclusterDcvSessionId=([\w]+) PclusterDcvSessionToken=([\w-]+)", output ) if dcv_parameters: dcv_server_port = dcv_parameters.group(1) dcv_session_id = dcv_parameters.group(2) dcv_session_token = dcv_parameters.group(3) else: error( "Something went wrong during DCV connection. Please manually execute the command:\n{0}\n" "If the problem persists, please check the logs in the /var/log/parallelcluster/ folder " "of the head node and submit an issue {1}".format(ssh_cmd, PCLUSTER_ISSUES_LINK) ) except sub.CalledProcessError as e: if "{0}: No such file or directory".format(DCV_CONNECT_SCRIPT) in e.output: error( "The cluster {0} has been created with an old version of ParallelCluster " "without the DCV support.".format(cluster_name) ) else: raise DCVConnectionError(e.output) return "https://{IP}:{PORT}?authToken={TOKEN}#{SESSION_ID}".format( IP=head_node_ip, PORT=dcv_server_port, TOKEN=dcv_session_token, SESSION_ID=dcv_session_id ) def METHOD_NAME(func, func_args, attempts=1, wait=0): # pylint: disable=R1710 """ Call function and re-execute it if it raises an Exception. :param func: the function to execute. :param func_args: the positional arguments of the function. :param attempts: the maximum number of attempts. Default: 1. :param wait: delay between attempts. Default: 0. :returns: the result of the function. """ while attempts: try: return func(*func_args) except Exception as e: attempts -= 1 if not attempts: raise e LOGGER.debug("%s, retrying in %s seconds..", e, wait) time.sleep(wait) return None class DcvConnectCommand(CliCommand): """Implement pcluster dcv connect command.""" # CLI name = "dcv-connect" help = "Permits to connect to the head node through an interactive session by using NICE DCV." description = help def __init__(self, subparsers): super().__init__(subparsers, name=self.name, help=self.help, description=self.description) def register_command_args(self, parser: ArgumentParser) -> None: # noqa: D102 parser.add_argument("-n", "--cluster-name", help="Name of the cluster to connect to", required=True) parser.add_argument("--key-path", dest="key_path", help="Key path of the SSH key to use for the connection") parser.add_argument("--show-url", action="store_true", default=False, help="Print URL and exit") def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument _dcv_connect(args)
2,089
display participation result
from datetime import timedelta from django.core.exceptions import ValidationError from django.db.models import Min, OuterRef, Subquery from django.template.defaultfilters import floatformat from django.urls import reverse from django.utils.html import format_html from django.utils.safestring import mark_safe from django.utils.translation import gettext as _, gettext_lazy from judge.contest_format.default import DefaultContestFormat from judge.contest_format.registry import register_contest_format from judge.utils.timedelta import nice_repr @register_contest_format('ioi') class LegacyIOIContestFormat(DefaultContestFormat): name = gettext_lazy('IOI (pre-2016)') config_defaults = {'cumtime': False} """ cumtime: Specify True if time penalties are to be computed. Defaults to False. """ @classmethod def validate(cls, config): if config is None: return if not isinstance(config, dict): raise ValidationError('IOI-styled contest expects no config or dict as config') for key, value in config.items(): if key not in cls.config_defaults: raise ValidationError('unknown config key "%s"' % key) if not isinstance(value, type(cls.config_defaults[key])): raise ValidationError('invalid type for config key "%s"' % key) def __init__(self, contest, config): self.config = self.config_defaults.copy() self.config.update(config or {}) self.contest = contest def update_participation(self, participation): cumtime = 0 score = 0 format_data = {} queryset = (participation.submissions.values('problem_id') .filter(points=Subquery( participation.submissions.filter(problem_id=OuterRef('problem_id')) .order_by('-points').values('points')[:1])) .annotate(time=Min('submission__date')) .values_list('problem_id', 'time', 'points')) for problem_id, time, points in queryset: if self.config['cumtime']: dt = (time - participation.start).total_seconds() if points: cumtime += dt else: dt = 0 format_data[str(problem_id)] = {'points': points, 'time': dt} score += points participation.cumtime = max(cumtime, 0) participation.score = round(score, self.contest.points_precision) participation.tiebreaker = 0 participation.format_data = format_data participation.save() def display_user_problem(self, participation, contest_problem): format_data = (participation.format_data or {}).get(str(contest_problem.id)) if format_data: return format_html( '<td class="{state}"><a href="{url}">{points}<div class="solving-time">{time}</div></a></td>', state=(('pretest-' if self.contest.run_pretests_only and contest_problem.is_pretested else '') + self.best_solution_state(format_data['points'], contest_problem.points)), url=reverse('contest_user_submissions', args=[self.contest.key, participation.user.user.username, contest_problem.problem.code]), points=floatformat(format_data['points']), time=nice_repr(timedelta(seconds=format_data['time']), 'noday') if self.config['cumtime'] else '', ) else: return mark_safe('<td></td>') def METHOD_NAME(self, participation): return format_html( '<td class="user-points"><a href="{url}">{points}<div class="solving-time">{cumtime}</div></a></td>', url=reverse('contest_all_user_submissions', args=[self.contest.key, participation.user.user.username]), points=floatformat(participation.score, -self.contest.points_precision), cumtime=nice_repr(timedelta(seconds=participation.cumtime), 'noday') if self.config['cumtime'] else '', ) def get_short_form_display(self): yield _('The maximum score submission for each problem will be used.') if self.config['cumtime']: yield _('Ties will be broken by the sum of the last score altering submission time on problems with a ' 'non-zero score.') else: yield _('Ties by score will **not** be broken.')
2,090
d set p
"""DistributedNode module: contains the DistributedNode class""" from panda3d.core import NodePath from . import GridParent from . import DistributedObject class DistributedNode(DistributedObject.DistributedObject, NodePath): """Distributed Node class:""" def __init__(self, cr): if not hasattr(self, 'DistributedNode_initialized'): self.DistributedNode_initialized = 1 self.gotStringParentToken = 0 DistributedObject.DistributedObject.__init__(self, cr) if not self.this: NodePath.__init__(self, "DistributedNode") # initialize gridParent self.gridParent = None def disable(self): if self.activeState != DistributedObject.ESDisabled: if not self.isEmpty(): self.reparentTo(hidden) DistributedObject.DistributedObject.disable(self) def delete(self): if not hasattr(self, 'DistributedNode_deleted'): self.DistributedNode_deleted = 1 if not self.isEmpty(): self.removeNode() if self.gridParent: self.gridParent.delete() DistributedObject.DistributedObject.delete(self) def generate(self): DistributedObject.DistributedObject.generate(self) self.gotStringParentToken = 0 def setLocation(self, parentId, zoneId, teleport=0): # Redefine DistributedObject setLocation, so that when # location is set to the ocean grid, we can update our parenting # under gridParent DistributedObject.DistributedObject.setLocation(self, parentId, zoneId) parentObj = self.cr.doId2do.get(parentId) if parentObj: # Make sure you in a zone that is in the grid before making a GridParent if (parentObj.isGridParent() and (zoneId >= parentObj.startingZone)): if not self.gridParent: self.gridParent = GridParent.GridParent(self) self.gridParent.setGridParent(parentObj, zoneId, teleport) else: if self.gridParent: self.gridParent.delete() self.gridParent = None else: if self.gridParent: self.gridParent.delete() self.gridParent = None def __cmp__(self, other): # DistributedNode inherits from NodePath, which inherits a # definition of __cmp__ from FFIExternalObject that uses the # NodePath's compareTo() method to compare different # NodePaths. But we don't want this behavior for # DistributedNodes; DistributedNodes should only be compared # pointerwise. if self is other: return 0 else: return 1 ### setParent ### def b_setParent(self, parentToken): if isinstance(parentToken, str): self.setParentStr(parentToken) else: self.setParent(parentToken) # it's important to call the local setParent first. self.d_setParent(parentToken) def d_setParent(self, parentToken): if isinstance(parentToken, str): self.sendUpdate("setParentStr", [parentToken]) else: self.sendUpdate("setParent", [parentToken]) def setParentStr(self, parentTokenStr): assert self.notify.debug('setParentStr: %s' % parentTokenStr) assert self.notify.debug('isGenerated: %s' % self.isGenerated()) if len(parentTokenStr) > 0: self.do_setParent(parentTokenStr) self.gotStringParentToken = 1 def setParent(self, parentToken): assert self.notify.debug('setParent: %s' % parentToken) assert self.notify.debug('isGenerated: %s' % self.isGenerated()) # if we are not yet generated and we just got a parent token # as a string, ignore whatever value comes in here justGotRequiredParentAsStr = ((not self.isGenerated()) and self.gotStringParentToken) if not justGotRequiredParentAsStr: if parentToken != 0: self.do_setParent(parentToken) self.gotStringParentToken = 0 def do_setParent(self, parentToken): """do_setParent(self, int parentToken) This function is defined simply to allow a derived class (like DistributedAvatar) to override the behavior of setParent if desired. """ if not self.isDisabled(): self.cr.parentMgr.requestReparent(self, parentToken) ###### set pos and hpr functions ####### # setX provided by NodePath def d_setX(self, x): self.sendUpdate("setX", [x]) # setY provided by NodePath def d_setY(self, y): self.sendUpdate("setY", [y]) # setZ provided by NodePath def d_setZ(self, z): self.sendUpdate("setZ", [z]) # setH provided by NodePath def d_setH(self, h): self.sendUpdate("setH", [h]) # setP provided by NodePath def METHOD_NAME(self, p): self.sendUpdate("setP", [p]) # setR provided by NodePath def d_setR(self, r): self.sendUpdate("setR", [r]) def setXY(self, x, y): self.setX(x) self.setY(y) def d_setXY(self, x, y): self.sendUpdate("setXY", [x, y]) def setXZ(self, x, z): self.setX(x) self.setZ(z) def d_setXZ(self, x, z): self.sendUpdate("setXZ", [x, z]) # setPos provided by NodePath def d_setPos(self, x, y, z): self.sendUpdate("setPos", [x, y, z]) # setHpr provided by NodePath def d_setHpr(self, h, p, r): self.sendUpdate("setHpr", [h, p, r]) def setXYH(self, x, y, h): self.setX(x) self.setY(y) self.setH(h) def d_setXYH(self, x, y, h): self.sendUpdate("setXYH", [x, y, h]) def setXYZH(self, x, y, z, h): self.setPos(x, y, z) self.setH(h) def d_setXYZH(self, x, y, z, h): self.sendUpdate("setXYZH", [x, y, z, h]) # setPosHpr provided by NodePath def d_setPosHpr(self, x, y, z, h, p, r): self.sendUpdate("setPosHpr", [x, y, z, h, p, r])
2,091
test delete
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import os import shutil import unittest from unittest import mock from azure.monitor.opentelemetry.exporter._storage import ( LocalFileBlob, LocalFileStorage, _now, _seconds, ) TEST_FOLDER = os.path.abspath(".test.storage") def throw(exc_type, *args, **kwargs): def func(*_args, **_kwargs): raise exc_type(*args, **kwargs) return func def clean_folder(folder): if os.path.isfile(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) # pylint: disable=no-self-use class TestLocalFileBlob(unittest.TestCase): @classmethod def setup_class(cls): os.makedirs(TEST_FOLDER, exist_ok=True) @classmethod def tearDownClass(cls): shutil.rmtree(TEST_FOLDER, True) def tearDown(self): clean_folder(TEST_FOLDER) def METHOD_NAME(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar")) blob.delete() def test_get(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar")) self.assertIsNone(blob.get()) blob.get() def test_put_error(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar")) with mock.patch("os.rename", side_effect=throw(Exception)): blob.put([1, 2, 3]) @unittest.skip("transient storage") def test_put(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar.blob")) test_input = (1, 2, 3) blob.put(test_input) self.assertGreaterEqual(len(os.listdir(TEST_FOLDER)), 1) @unittest.skip("transient storage") def test_lease_error(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar.blob")) blob.delete() self.assertEqual(blob.lease(0.01), None) # pylint: disable=protected-access class TestLocalFileStorage(unittest.TestCase): @classmethod def setup_class(cls): os.makedirs(TEST_FOLDER, exist_ok=True) @classmethod def tearDownClass(cls): shutil.rmtree(TEST_FOLDER, True) def test_get_nothing(self): with LocalFileStorage(os.path.join(TEST_FOLDER, "test", "a")) as stor: pass with LocalFileStorage(os.path.join(TEST_FOLDER, "test")) as stor: self.assertIsNone(stor.get()) def test_get(self): now = _now() with LocalFileStorage(os.path.join(TEST_FOLDER, "foo")) as stor: stor.put((1, 2, 3), lease_period=10) with mock.patch("azure.monitor.opentelemetry.exporter._storage._now") as m: m.return_value = now - _seconds(30 * 24 * 60 * 60) stor.put((1, 2, 3)) stor.put((1, 2, 3), lease_period=10) with mock.patch("os.rename"): stor.put((1, 2, 3)) with mock.patch("os.rename"): stor.put((1, 2, 3)) with mock.patch("os.remove", side_effect=throw(Exception)): with mock.patch("os.rename", side_effect=throw(Exception)): self.assertIsNone(stor.get()) self.assertIsNone(stor.get()) def test_put(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor: stor.put(test_input, 0) self.assertEqual(stor.get().get(), test_input) with LocalFileStorage(os.path.join(TEST_FOLDER, "bar")) as stor: self.assertEqual(stor.get().get(), test_input) with mock.patch("os.rename", side_effect=throw(Exception)): self.assertIsNone(stor.put(test_input)) def test_put_max_size(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd")) as stor: size_mock = mock.Mock() size_mock.return_value = False stor._check_storage_size = size_mock stor.put(test_input) self.assertEqual(stor.get(), None) def test_check_storage_size_full(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd2"), 1) as stor: stor.put(test_input) self.assertFalse(stor._check_storage_size()) def test_check_storage_size_not_full(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor: stor.put(test_input) self.assertTrue(stor._check_storage_size()) def test_check_storage_size_no_files(self): with LocalFileStorage(os.path.join(TEST_FOLDER, "asd3"), 1000) as stor: self.assertTrue(stor._check_storage_size()) def test_check_storage_size_links(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd4"), 1000) as stor: stor.put(test_input) with mock.patch("os.path.islink") as os_mock: os_mock.return_value = True self.assertTrue(stor._check_storage_size()) def test_check_storage_size_error(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "asd5"), 1) as stor: with mock.patch("os.path.getsize", side_effect=throw(OSError)): stor.put(test_input) with mock.patch("os.path.islink") as os_mock: os_mock.return_value = True self.assertTrue(stor._check_storage_size()) def test_maintenance_routine(self): with mock.patch("os.makedirs") as m: with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor: m.return_value = None with mock.patch("os.makedirs", side_effect=throw(Exception)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) stor.close() with mock.patch("os.listdir", side_effect=throw(Exception)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) stor.close() with LocalFileStorage(os.path.join(TEST_FOLDER, "baz")) as stor: with mock.patch("os.listdir", side_effect=throw(Exception)): stor._maintenance_routine() with mock.patch("os.path.isdir", side_effect=throw(Exception)): stor._maintenance_routine()
2,092
test create association in new diagram should
import pytest from gaphor import UML from gaphor.core.modeling import Diagram from gaphor.diagram.tests.fixtures import allow, connect, disconnect, get_connected from gaphor.UML.classes.association import AssociationItem from gaphor.UML.classes.klass import ClassItem @pytest.fixture def connected_association(create): asc = create(AssociationItem) c1 = create(ClassItem, UML.Class) c2 = create(ClassItem, UML.Class) connect(asc, asc.head, c1) assert asc.subject is None # no UML metaclass yet connect(asc, asc.tail, c2) assert asc.subject is not None return asc, c1, c2 @pytest.fixture def clone(create): def _clone(item): new = create(type(item)) new.subject = item.subject new.head_subject = item.head_subject new.tail_subject = item.tail_subject return new return _clone def test_glue_to_class(connected_association): asc, c1, c2 = connected_association glued = allow(asc, asc.head, c1) assert glued connect(asc, asc.head, c1) glued = allow(asc, asc.tail, c2) assert glued def test_association_item_connect(connected_association, element_factory): asc, c1, c2 = connected_association # Diagram, Class *2, Property *2, Association assert len(element_factory.lselect()) == 9 assert asc.head_subject is not None assert asc.tail_subject is not None def test_association_item_reconnect_copies_properties(connected_association, create): asc, c1, c2 = connected_association c3 = create(ClassItem, UML.Class) asc.subject.name = "Name" a = asc.subject connect(asc, asc.tail, c3) assert a is not asc.subject ends = [p.type for p in asc.subject.memberEnd] assert c1.subject in ends assert c3.subject in ends assert c2.subject not in ends assert asc.subject.name == "Name" def test_association_item_reconnect_with_navigability(connected_association, create): asc, c1, c2 = connected_association c3 = create(ClassItem, UML.Class) UML.recipes.set_navigability(asc.subject, asc.tail_subject, True) connect(asc, asc.tail, c3) assert asc.tail_subject.navigability is True def test_association_item_reconnect_with_aggregation(connected_association, create): asc, c1, c2 = connected_association c3 = create(ClassItem, UML.Class) asc.tail_subject.aggregation = "composite" connect(asc, asc.tail, c3) assert asc.tail_subject.aggregation == "composite" def test_disconnect_should_disconnect_model(connected_association, element_factory): asc, c1, c2 = connected_association disconnect(asc, asc.head) disconnect(asc, asc.tail) assert c1 is not get_connected(asc, asc.head) assert c2 is not get_connected(asc, asc.tail) assert not asc.subject assert not asc.head_subject assert not asc.tail_subject assert not element_factory.lselect(UML.Property) def test_disconnect_of_second_association_should_leave_model_in_tact( connected_association, clone ): asc, c1, c2 = connected_association new = clone(asc) disconnect(new, new.head) assert asc.subject.memberEnd[0].type is c1.subject assert asc.subject.memberEnd[1].type is c2.subject assert new.subject is asc.subject def test_disconnect_of_navigable_end_should_remove_owner_relationship( connected_association, element_factory ): asc, c1, c2 = connected_association UML.recipes.set_navigability(asc.subject, asc.head_subject, True) assert asc.head_subject in c2.subject.ownedAttribute disconnect(asc, asc.head) assert not asc.subject assert not asc.head_subject assert not asc.tail_subject assert not element_factory.lselect(UML.Property) def test_allow_reconnect_for_single_presentation(connected_association, create): asc, c1, c2 = connected_association c3 = create(ClassItem, UML.Class) assert allow(asc, asc.head, c3) def test_allow_reconnect_on_same_class_for_multiple_presentations( connected_association, clone, create ): asc, c1, c2 = connected_association new = clone(asc) assert allow(new, new.head, c1) assert allow(new, new.tail, c2) def test_allow_reconnect_if_only_one_connected_presentations( connected_association, clone, create ): asc, c1, c2 = connected_association clone(asc) c3 = create(ClassItem, UML.Class) assert allow(asc, asc.head, c3) def METHOD_NAME( connected_association, element_factory ): asc, c1, c2 = connected_association diagram2 = element_factory.create(Diagram) c3 = diagram2.create(ClassItem, subject=c1.subject) c4 = diagram2.create(ClassItem, subject=c2.subject) asc2 = diagram2.create(AssociationItem) connect(asc2, asc2.head, c3) connect(asc2, asc2.tail, c4) assert asc.subject is asc2.subject assert asc.head_subject is asc2.head_subject assert asc.tail_subject is asc2.tail_subject def test_create_association_in_new_diagram_reversed_should_reuse_existing( connected_association, element_factory ): asc, c1, c2 = connected_association diagram2 = element_factory.create(Diagram) c3 = diagram2.create(ClassItem, subject=c1.subject) c4 = diagram2.create(ClassItem, subject=c2.subject) asc2 = diagram2.create(AssociationItem) connect(asc2, asc2.tail, c3) connect(asc2, asc2.head, c4) assert asc.subject is asc2.subject assert asc.head_subject is asc2.tail_subject assert asc.tail_subject is asc2.head_subject def test_disconnect_association_in_new_diagram_should_clear_ends( connected_association, element_factory ): asc, c1, c2 = connected_association diagram2 = element_factory.create(Diagram) c3 = diagram2.create(ClassItem, subject=c1.subject) c4 = diagram2.create(ClassItem, subject=c2.subject) asc2 = diagram2.create(AssociationItem) connect(asc2, asc2.tail, c3) connect(asc2, asc2.head, c4) disconnect(asc, asc.head) assert not asc.subject assert not asc.head_subject assert not asc.tail_subject
2,093
test add noise column df
import numpy as np import pandas as pd import pytest from numpy.core.fromnumeric import sort from autogluon.core.utils.feature_selection import * from autogluon.core.utils.utils import unevaluated_fi_df_template def evaluated_fi_df_template(features, importance=None, n=None): rng = np.random.default_rng(0) importance_df = pd.DataFrame({"name": features}) importance_df["importance"] = rng.standard_normal(len(features)) if importance is None else importance importance_df["stddev"] = rng.standard_normal(len(features)) importance_df["p_value"] = None importance_df["n"] = 5 if n is None else n importance_df.set_index("name", inplace=True) importance_df.index.name = None return importance_df @pytest.fixture def sample_features(): return ["a", "b", "c", "d", "e"] @pytest.fixture def sample_importance_df_1(sample_features): return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1.0, None], n=[10, 5, 0, 5, 0]) @pytest.fixture def sample_importance_df_2(sample_features): return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0]) def METHOD_NAME(): # test noise columns are appended to input dataframe and feature_metadata X = pd.DataFrame({"a": [1, 2]}) args = {"rng": np.random.default_rng(0), "count": 2} X_noised, noise_columns = add_noise_column(X, **args) expected_features = X.columns.tolist() + noise_columns assert expected_features == X_noised.columns.tolist() def test_merge_importance_dfs_base(sample_features): # test the scenario when previous feature importance df is none prev_df, curr_df = None, unevaluated_fi_df_template(sample_features) assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2): # test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model prev_df, curr_df = sample_importance_df_1, sample_importance_df_2 result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) assert [score if score == score else None for score in result_df["importance"].tolist()] == [0.0, 0.1, 0.1, 1.0, None] assert result_df["n"].tolist() == [15, 15, 10, 5, 0] def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2): # test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model prev_df, curr_df = sample_importance_df_1, sample_importance_df_2 using_prev_fit_fi = set(sample_features) result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index() assert len(using_prev_fit_fi) == 2 assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, -0.1, 0.1, 1.0, None] assert result_df["n"].tolist() == [5, 10, 10, 5, 0] def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2): # test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models prev_df, curr_df = sample_importance_df_1, sample_importance_df_2 using_prev_fit_fi = set([sample_features[0]]) result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index() assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, 0.0, 0.1, 1.0, None] assert result_df["n"].tolist() == [5, 15, 10, 5, 0] assert using_prev_fit_fi == set() def test_sort_features_by_priority_base(sample_features): # test the ordering of feature importance computation when no prior feature importance computation was done sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set()) assert sorted_features == sample_features def test_sort_features_by_priority_same_model(sample_features): # test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done prev_importance_df = evaluated_fi_df_template(sample_features) sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set()) assert sorted_features == prev_importance_df.sort_values("importance").index.tolist() def test_sort_features_by_priority_different_model(sample_features): # test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done prev_importance_df = evaluated_fi_df_template(sample_features) using_prev_fit_fi = sample_features[-2:] sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi) sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist() sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist() expected_features = sorted_prev_fit_features + sorted_curr_fit_features assert sorted_features == expected_features def test_sort_features_by_priority_all(sample_features): # test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models, # and some feature are unevaluated length = len(sample_features) using_prev_fit_fi = set(sample_features[: length // 3]) evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[: length // 2]), unevaluated_fi_df_template(sample_features[length // 2 :]) prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows]) sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi) unevaluated_features = unevaluated_rows.index.tolist() sorted_prev_fit_features = ( evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (evaluated_rows.index.isin(using_prev_fit_fi))] .sort_values("importance") .index.tolist() ) sorted_curr_fit_features = ( evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (~evaluated_rows.index.isin(using_prev_fit_fi))] .sort_values("importance") .index.tolist() ) expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features assert sorted_features == expected_features
2,094
test nonhashable
import unittest import twowaymap class TestTwoWayMap(unittest.TestCase): def assertTwoWayMap(self, twmap, forward, reverse): map_repr = ( { k: twmap.lookup_left(k) for k in twmap.left_all() }, { k: twmap.lookup_right(k) for k in twmap.right_all() } ) self.assertEqual(map_repr, (forward, reverse)) def test_set_list(self): tmap = twowaymap.TwoWayMap(left=set, right=list) self.assertFalse(tmap) tmap.insert(1, "a") self.assertTrue(tmap) self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": {1}}) tmap.insert(1, "a") # should be a no-op, since this pair already exists tmap.insert(1, "b") tmap.insert(2, "a") self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["a"]}, {"a": {1,2}, "b": {1}}) tmap.insert(1, "b") tmap.insert(2, "b") self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["a", "b"]}, {"a": {1,2}, "b": {1,2}}) tmap.remove(1, "b") tmap.remove(2, "b") self.assertTwoWayMap(tmap, {1: ["a"], 2: ["a"]}, {"a": {1,2}}) tmap.insert(1, "b") tmap.insert(2, "b") tmap.remove_left(1) self.assertTwoWayMap(tmap, {2: ["a", "b"]}, {"a": {2}, "b": {2}}) tmap.insert(1, "a") tmap.insert(2, "b") tmap.remove_right("b") self.assertTwoWayMap(tmap, {1: ["a"], 2: ["a"]}, {"a": {1,2}}) self.assertTrue(tmap) tmap.clear() self.assertTwoWayMap(tmap, {}, {}) self.assertFalse(tmap) def test_set_single(self): tmap = twowaymap.TwoWayMap(left=set, right="single") self.assertFalse(tmap) tmap.insert(1, "a") self.assertTrue(tmap) self.assertTwoWayMap(tmap, {1: "a"}, {"a": {1}}) tmap.insert(1, "a") # should be a no-op, since this pair already exists tmap.insert(1, "b") tmap.insert(2, "a") self.assertTwoWayMap(tmap, {1: "b", 2: "a"}, {"a": {2}, "b": {1}}) tmap.insert(1, "b") tmap.insert(2, "b") self.assertTwoWayMap(tmap, {1: "b", 2: "b"}, {"b": {1,2}}) tmap.remove(1, "b") self.assertTwoWayMap(tmap, {2: "b"}, {"b": {2}}) tmap.remove(2, "b") self.assertTwoWayMap(tmap, {}, {}) tmap.insert(1, "b") tmap.insert(2, "b") self.assertTwoWayMap(tmap, {1: "b", 2: "b"}, {"b": {1,2}}) tmap.remove_left(1) self.assertTwoWayMap(tmap, {2: "b"}, {"b": {2}}) tmap.insert(1, "a") tmap.insert(2, "b") tmap.remove_right("b") self.assertTwoWayMap(tmap, {1: "a"}, {"a": {1}}) self.assertTrue(tmap) tmap.clear() self.assertTwoWayMap(tmap, {}, {}) self.assertFalse(tmap) def test_strict_list(self): tmap = twowaymap.TwoWayMap(left="strict", right=list) self.assertFalse(tmap) tmap.insert(1, "a") self.assertTrue(tmap) self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": 1}) tmap.insert(1, "a") # should be a no-op, since this pair already exists tmap.insert(1, "b") with self.assertRaises(ValueError): tmap.insert(2, "a") self.assertTwoWayMap(tmap, {1: ["a", "b"]}, {"a": 1, "b": 1}) tmap.insert(1, "b") with self.assertRaises(ValueError): tmap.insert(2, "b") tmap.insert(2, "c") self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["c"]}, {"a": 1, "b": 1, "c": 2}) tmap.remove(1, "b") self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2}) tmap.remove(2, "b") self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2}) tmap.insert(1, "b") with self.assertRaises(ValueError): tmap.insert(2, "b") self.assertTwoWayMap(tmap, {1: ["a", "b"], 2: ["c"]}, {"a": 1, "b": 1, "c": 2}) tmap.remove_left(1) self.assertTwoWayMap(tmap, {2: ["c"]}, {"c": 2}) tmap.insert(1, "a") tmap.insert(2, "b") tmap.remove_right("b") self.assertTwoWayMap(tmap, {1: ["a"], 2: ["c"]}, {"a": 1, "c": 2}) self.assertTrue(tmap) tmap.clear() self.assertTwoWayMap(tmap, {}, {}) self.assertFalse(tmap) def test_strict_single(self): tmap = twowaymap.TwoWayMap(left="strict", right="single") tmap.insert(1, "a") tmap.insert(2, "b") tmap.insert(2, "c") self.assertTwoWayMap(tmap, {1: "a", 2: "c"}, {"a": 1, "c": 2}) with self.assertRaises(ValueError): tmap.insert(2, "a") tmap.insert(2, "c") # This pair already exists, so not an error. self.assertTwoWayMap(tmap, {1: "a", 2: "c"}, {"a": 1, "c": 2}) def METHOD_NAME(self): # Test that we don't get into an inconsistent state if we attempt to use a non-hashable value. tmap = twowaymap.TwoWayMap(left=list, right=list) tmap.insert(1, "a") self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": [1]}) with self.assertRaises(TypeError): tmap.insert(1, {}) with self.assertRaises(TypeError): tmap.insert({}, "a") self.assertTwoWayMap(tmap, {1: ["a"]}, {"a": [1]}) if __name__ == "__main__": unittest.main()
2,095
get default display mode
# *************************************************************************** # * Copyright (c) 2017 Markus Hovorka <[email protected]> * # * * # * This file is part of the FreeCAD CAx development system. * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** __title__ = "FreeCAD FEM solver equation base object" __author__ = "Markus Hovorka" __url__ = "https://www.freecad.org" ## \addtogroup FEM # @{ import FreeCAD if FreeCAD.GuiUp: from pivy import coin class BaseProxy(object): BaseType = "App::FeaturePython" def __init__(self, obj): obj.Proxy = self obj.addProperty( "App::PropertyLinkSubList", "References", "Base", "") def execute(self, obj): return True class BaseViewProxy(object): def __init__(self, vobj): vobj.Proxy = self def attach(self, vobj): default = coin.SoGroup() vobj.addDisplayMode(default, "Default") def getDisplayModes(self, obj): "Return a list of display modes." modes = ["Default"] return modes def METHOD_NAME(self): return "Default" def setDisplayMode(self, mode): return mode class DeformationProxy(BaseProxy): pass class DeformationViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationDeformation.svg" class ElasticityProxy(BaseProxy): pass class ElasticityViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationElasticity.svg" class ElectricforceProxy(BaseProxy): pass class ElectricforceViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationElectricforce.svg" class ElectrostaticProxy(BaseProxy): pass class ElectrostaticViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationElectrostatic.svg" class FlowProxy(BaseProxy): pass class FlowViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationFlow.svg" class FluxProxy(BaseProxy): pass class FluxViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationFlux.svg" class HeatProxy(BaseProxy): pass class HeatViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationHeat.svg" class MagnetodynamicProxy(BaseProxy): pass class MagnetodynamicViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationMagnetodynamic.svg" class Magnetodynamic2DProxy(BaseProxy): pass class Magnetodynamic2DViewProxy(BaseViewProxy): def getIcon(self): return ":/icons/FEM_EquationMagnetodynamic2D.svg" ## @}
2,096
write parameters
# Copyright (c) 2017 The University of Manchester # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy from spinn_utilities.overrides import overrides from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_SHORT) from spynnaker.pyNN.data import SpynnakerDataView from .abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightAccumulator) from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( STDP_FIXED_POINT_ONE) class TimingDependenceRecurrent(AbstractTimingDependence): """ A timing dependence STDP rule based on recurrences. """ __slots__ = [ "__accumulator_depression_plus_one", "__accumulator_potentiation_minus_one", "__dual_fsm", "__mean_post_window", "__mean_pre_window", "__synapse_structure", "__a_plus", "__a_minus"] __PARAM_NAMES = ( 'accumulator_depression', 'accumulator_potentiation', 'mean_pre_window', 'mean_post_window', 'dual_fsm') default_parameters = { 'accumulator_depression': -6, 'accumulator_potentiation': 6, 'mean_pre_window': 35.0, 'mean_post_window': 35.0, 'dual_fsm': True} def __init__( self, accumulator_depression=default_parameters[ 'accumulator_depression'], accumulator_potentiation=default_parameters[ 'accumulator_potentiation'], mean_pre_window=default_parameters['mean_pre_window'], mean_post_window=default_parameters['mean_post_window'], dual_fsm=default_parameters['dual_fsm'], A_plus=0.01, A_minus=0.01): """ :param int accumulator_depression: :param int accumulator_potentiation: :param float mean_pre_window: :param float mean_post_window: :param bool dual_fsm: :param float A_plus: :math:`A^+` :param float A_minus: :math:`A^-` """ # pylint: disable=too-many-arguments self.__accumulator_depression_plus_one = accumulator_depression + 1 self.__accumulator_potentiation_minus_one = \ accumulator_potentiation - 1 self.__mean_pre_window = mean_pre_window self.__mean_post_window = mean_post_window self.__dual_fsm = dual_fsm self.__a_plus = A_plus self.__a_minus = A_minus self.__synapse_structure = SynapseStructureWeightAccumulator() @property def A_plus(self): r""" :math:`A^+` :rtype: float """ return self.__a_plus @A_plus.setter def A_plus(self, new_value): self.__a_plus = new_value @property def A_minus(self): r""" :math:`A^-` :rtype: float """ return self.__a_minus @A_minus.setter def A_minus(self, new_value): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) def is_same_as(self, timing_dependence): if timing_dependence is None or not isinstance( timing_dependence, TimingDependenceRecurrent): return False return ((self.__accumulator_depression_plus_one == timing_dependence.accumulator_depression_plus_one) and (self.__accumulator_potentiation_minus_one == timing_dependence.accumulator_potentiation_minus_one) and (self.__mean_pre_window == timing_dependence.mean_pre_window) and (self.__mean_post_window == timing_dependence.mean_post_window)) @property def vertex_executable_suffix(self): """ The suffix to be appended to the vertex executable for this rule. :rtype: str """ if self.__dual_fsm: return "recurrent_dual_fsm" return "recurrent_pre_stochastic" @property def pre_trace_n_bytes(self): """ The number of bytes used by the pre-trace of the rule per neuron. :rtype: int """ # When using the separate FSMs, pre-trace contains window length, # otherwise it's in the synapse return BYTES_PER_SHORT if self.__dual_fsm else 0 @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes(self): # 2 * 32-bit parameters # 2 * LUTS with STDP_FIXED_POINT_ONE * 16-bit entries return (2 * BYTES_PER_WORD) + ( 2 * STDP_FIXED_POINT_ONE * BYTES_PER_SHORT) @property def n_weight_terms(self): """ The number of weight terms expected by this timing rule. :rtype: int """ return 1 @overrides(AbstractTimingDependence.METHOD_NAME) def METHOD_NAME( self, spec, global_weight_scale, synapse_weight_scales): # Write parameters spec.write_value(data=self.__accumulator_depression_plus_one, data_type=DataType.INT32) spec.write_value(data=self.__accumulator_potentiation_minus_one, data_type=DataType.INT32) # Convert mean times into machine timesteps time_step_per_ms = SpynnakerDataView.get_simulation_time_step_per_ms() mean_pre_timesteps = float(self.__mean_pre_window * time_step_per_ms) mean_post_timesteps = float(self.__mean_post_window * time_step_per_ms) # Write lookup tables self._write_exp_dist_lut(spec, mean_pre_timesteps) self._write_exp_dist_lut(spec, mean_post_timesteps) @staticmethod def _write_exp_dist_lut(spec, mean): """ :param .DataSpecificationGenerator spec: :param float mean: """ indices = numpy.arange(STDP_FIXED_POINT_ONE) inv_cdf = numpy.log(1.0 - indices/float(STDP_FIXED_POINT_ONE)) * -mean spec.write_array( inv_cdf.astype(numpy.uint16), data_type=DataType.UINT16) @property def synaptic_structure(self): """ The synaptic structure of the plastic part of the rows. :rtype: AbstractSynapseStructure """ return self.__synapse_structure @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES
2,097
build transform
# Copyright 2019 Open Source Robotics Foundation, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the Willow Garage nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import pytest import rclpy from tf2_ros.buffer import Buffer from geometry_msgs.msg import TransformStamped, PointStamped class TestBuffer: def METHOD_NAME(self, target, source, rclpy_time): transform = TransformStamped() transform.header.frame_id = target transform.header.stamp = rclpy_time.to_msg() transform.child_frame_id = source transform.transform.translation.x = 42.0 transform.transform.translation.y = -3.14 transform.transform.translation.z = 0.0 transform.transform.rotation.w = 1.0 transform.transform.rotation.x = 0.0 transform.transform.rotation.y = 0.0 transform.transform.rotation.z = 0.0 return transform def test_can_transform_valid_transform(self): buffer = Buffer() clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) assert buffer.set_transform(transform, 'unittest') is None assert buffer.can_transform('foo', 'bar', rclpy_time) output = buffer.lookup_transform('foo', 'bar', rclpy_time) assert transform.child_frame_id == output.child_frame_id assert transform.transform.translation.x == output.transform.translation.x assert transform.transform.translation.y == output.transform.translation.y assert transform.transform.translation.z == output.transform.translation.z def test_await_transform_immediately_available(self): # wait for a transform that is already available to test short-cut code buffer = Buffer() clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) buffer.set_transform(transform, 'unittest') coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time) with pytest.raises(StopIteration) as excinfo: coro.send(None) assert transform == excinfo.value.value coro.close() def test_await_transform_full_immediately_available(self): # wait for a transform that is already available to test short-cut code buffer = Buffer() clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) buffer.set_transform(transform, 'unittest') coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo') with pytest.raises(StopIteration) as excinfo: coro.send(None) assert transform == excinfo.value.value coro.close() def test_await_transform_delayed(self): # wait for a transform that is not yet available buffer = Buffer() clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) coro = buffer.lookup_transform_async('foo', 'bar', rclpy_time) coro.send(None) buffer.set_transform(transform, 'unittest') with pytest.raises(StopIteration) as excinfo: coro.send(None) assert transform == excinfo.value.value coro.close() def test_await_transform_full_delayed(self): # wait for a transform that is not yet available buffer = Buffer() clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) coro = buffer.lookup_transform_full_async('foo', rclpy_time, 'bar', rclpy_time, 'foo') coro.send(None) buffer.set_transform(transform, 'unittest') with pytest.raises(StopIteration) as excinfo: coro.send(None) assert transform == excinfo.value.value coro.close() def test_buffer_non_default_cache(self): buffer = Buffer(cache_time=rclpy.duration.Duration(seconds=10.0)) clock = rclpy.clock.Clock() rclpy_time = clock.now() transform = self.METHOD_NAME('foo', 'bar', rclpy_time) assert buffer.set_transform(transform, 'unittest') is None assert buffer.can_transform('foo', 'bar', rclpy_time) output = buffer.lookup_transform('foo', 'bar', rclpy_time) assert transform.child_frame_id == output.child_frame_id assert transform.transform.translation.x == output.transform.translation.x assert transform.transform.translation.y == output.transform.translation.y assert transform.transform.translation.z == output.transform.translation.z
2,098
get vertexai job client
# Copyright 2022 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """CRMint's abstract worker dealing with Vertex AI.""" import time import google.auth from google.cloud import aiplatform from google.cloud.aiplatform_v1.types import job_state as js from google.cloud.aiplatform_v1.types import pipeline_state as ps from jobs.workers import worker _PIPELINE_COMPLETE_STATES = frozenset([ ps.PipelineState.PIPELINE_STATE_SUCCEEDED, ps.PipelineState.PIPELINE_STATE_FAILED, ps.PipelineState.PIPELINE_STATE_CANCELLED, ps.PipelineState.PIPELINE_STATE_PAUSED]) _JOB_COMPLETE_STATES = frozenset([ js.JobState.JOB_STATE_SUCCEEDED, js.JobState.JOB_STATE_FAILED, js.JobState.JOB_STATE_CANCELLED, js.JobState.JOB_STATE_PAUSED]) class VertexAIWorker(worker.Worker): """Worker that polls job status and respawns itself if the job is not done.""" def METHOD_NAME(self, location): api_endpoint = f'{location}-aiplatform.googleapis.com' client_options = {'api_endpoint': api_endpoint} return aiplatform.gapic.JobServiceClient(client_options=client_options) def _get_vertexai_pipeline_client(self, location): api_endpoint = f'{location}-aiplatform.googleapis.com' client_options = {'api_endpoint': api_endpoint} return aiplatform.gapic.PipelineServiceClient(client_options=client_options) def _get_vertexai_dataset_client(self, location): api_endpoint = f'{location}-aiplatform.googleapis.com' client_options = {'api_endpoint': api_endpoint} return aiplatform.gapic.DatasetServiceClient(client_options=client_options) def _get_vertexai_model_client(self, location): api_endpoint = f'{location}-aiplatform.googleapis.com' client_options = {'api_endpoint': api_endpoint} return aiplatform.gapic.ModelServiceClient(client_options=client_options) def _get_batch_prediction_job(self, job_client, job_name): return job_client.get_batch_prediction_job(name=job_name) def _get_training_pipeline(self, pipeline_client, pipeline_name): return pipeline_client.get_training_pipeline(name=pipeline_name) def _get_location_from_pipeline_name(self, pipeline_name): return pipeline_name.split('/')[3] def _get_location_from_job_name(self, job_name): return job_name.split('/')[3] def _get_project_id(self): _, project_id = google.auth.default() return project_id def _get_parent_resource(self, location): project_id = self._get_project_id() return f'projects/{project_id}/locations/{location}' def _wait_for_pipeline(self, pipeline): """Waits for pipeline completion. It will relay to VertexAIWaiter if it takes too long. """ delay = 5 waiting_time = 5 time.sleep(delay) while pipeline.state not in _PIPELINE_COMPLETE_STATES: if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter. self._enqueue( 'VertexAIWaiter', { 'id': pipeline.name, 'worker_class': 'VertexAITabularTrainer' }, 60) return None if delay < 30: delay = [5, 10, 15, 20, 30][int(waiting_time / 60)] time.sleep(delay) waiting_time += delay if pipeline.state == ps.PipelineState.PIPELINE_STATE_FAILED: raise worker.WorkerException(f'Training pipeline {pipeline.name} failed.') def _wait_for_job(self, job): """Waits for batch prediction job completion. It will relay to VertexAIWaiter if it takes too long. """ delay = 5 waiting_time = 5 time.sleep(delay) while job.state not in _JOB_COMPLETE_STATES: if waiting_time > 300: # Once 5 minute has passed, spawn VertexAIWaiter. self._enqueue( 'VertexAIWaiter', { 'id': job.name, 'worker_class': 'VertexAIBatchPredictorToBQ'}, 60) return None if delay < 30: delay = [5, 10, 15, 20, 30][int(waiting_time / 60)] time.sleep(delay) waiting_time += delay if job.state == js.JobState.JOB_STATE_FAILED: raise worker.WorkerException(f'Job {job.name} failed.') def _clean_up_datasets(self, dataset_client, project, region, display_name): parent = f'projects/{project}/locations/{region}' datasets = list( dataset_client.list_datasets({ 'parent': parent, 'filter': f'display_name="{display_name}"', 'order_by': 'create_time asc'})) configs = map(lambda x: (x.create_time, {'name': x.name}), datasets) sorted_configs = sorted(configs) for _, config in sorted_configs[:-1]: dataset_name = config['name'] dataset_client.delete_dataset({'name': dataset_name}) self.log_info(f'Deleted dataset: {dataset_name}') def _clean_up_training_pipelines(self, pipeline_client, project, region, display_name): parent = f'projects/{project}/locations/{region}' training_pipelines = list( pipeline_client.list_training_pipelines({ 'parent': parent, 'filter': f'display_name="{display_name}"'})) configs = map( lambda x: (x.create_time, {'state': x.state, 'name': x.name}), training_pipelines) sorted_configs = sorted(configs) for _, config in sorted_configs[:-1]: training_pipeline_name = config['name'] if config['state'] in _PIPELINE_COMPLETE_STATES: pipeline_client.delete_training_pipeline(name=training_pipeline_name) else: pipeline_client.cancel_training_pipeline( name=training_pipeline_name, timeout=300) pipeline_client.delete_training_pipeline(name=training_pipeline_name) self.log_info(f'Deleted training pipeline: {training_pipeline_name}') def _clean_up_batch_predictions(self, job_client, project, region, display_name): parent = f'projects/{project}/locations/{region}' batch_predictions = list( job_client.list_batch_prediction_jobs({ 'parent': parent, 'filter': f'display_name="{display_name}"'})) configs = map( lambda x: (x.create_time, {'state': x.state, 'name': x.name}), batch_predictions) sorted_configs = sorted(configs) for _, config in sorted_configs[:-1]: batch_prediction_name = config['name'] if config['state'] in _JOB_COMPLETE_STATES: job_client.delete_batch_prediction_job(name=batch_prediction_name) else: job_client.cancel_batch_prediction_job( name=batch_prediction_name, timeout=300) job_client.delete_batch_prediction_job(name=batch_prediction_name) self.log_info(f'Deleted batch prediction: {batch_prediction_name}')
2,099
get cluster log groups from boto3
import json import logging import boto3 import utils from botocore.exceptions import ClientError LOGGER = logging.getLogger(__name__) def _dumps_json(obj): """Dump obj to a JSON string.""" return json.dumps(obj, indent=2) def METHOD_NAME(cluster_log_group_prefix): """ Get log groups with cluster log group prefix from boto3. Raises ClientError. """ try: log_groups = ( boto3.client("logs").describe_log_groups(logGroupNamePrefix=cluster_log_group_prefix).get("logGroups") ) LOGGER.info("Log groups: {0}\n".format(_dumps_json(log_groups))) return log_groups except ClientError as e: LOGGER.error("Unable to retrieve any log group with prefix {0}\nError: {1}".format(cluster_log_group_prefix, e)) raise ClientError def _get_log_stream_pages(log_client, log_group_name): """ Get paged list of log streams. Raises ClientError if the log group doesn't exist. """ next_token = None while True: kwargs = {"logGroupName": log_group_name} if next_token: kwargs.update({"nextToken": next_token}) response = log_client.describe_log_streams(**kwargs) streams = response.get("logStreams") LOGGER.info("Log streams for {group}:\n{streams}".format(group=log_group_name, streams=_dumps_json(streams))) yield streams next_token = response.get("nextToken") if next_token is None: break def get_log_streams(log_group_name): """ Get list of log streams. Raises ClientError if the log group doesn't exist. """ log_client = boto3.client("logs") for stream_page in _get_log_stream_pages(log_client, log_group_name): for stream in stream_page: yield stream def get_log_events(log_group_name, log_stream_name): """ Get log events for the given log_stream_name. Raises ClientError if the given log group or stream doesn't exist. """ logs_client = boto3.client("logs") # get_log_events is not page-able using utils.paginate_boto3 response = logs_client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) prev_token = None next_token = response.get("nextForwardToken") LOGGER.info(f"Starting pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}") while next_token != prev_token: for event in response.get("events"): LOGGER.info(f"event from stream {log_group_name}/{log_stream_name}:\n{json.dumps(event, indent=2)}") yield event response = logs_client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, nextToken=next_token ) prev_token = next_token next_token = response.get("nextForwardToken") LOGGER.info(f"Continuing pagination of GetLogEvents for {log_group_name}/{log_stream_name} with {next_token}") def get_ec2_instances(): """Iterate through ec2's describe_instances.""" for instance_page in utils.paginate_boto3(boto3.client("ec2").describe_instances): for instance in instance_page.get("Instances"): yield instance def _get_log_group_for_stack(stack_name): """Return a list of log groups belonging to the given stack.""" log_groups = [] for resource in utils.get_cfn_resources(stack_name): if resource.get("ResourceType") == "AWS::Logs::LogGroup": log_groups.append(resource.get("PhysicalResourceId")) return log_groups def get_cluster_log_groups(stack_name): """Return list of PhysicalResourceIds for log groups created by cluster with given stack name.""" log_groups = [] substack_phys_ids = utils.get_substacks(stack_name) for substack_phys_id in substack_phys_ids: log_groups.extend(_get_log_group_for_stack(substack_phys_id)) return log_groups def delete_log_group(log_group): """Delete the given log group.""" try: boto3.client("logs").delete_log_group(logGroupName=log_group) except ClientError as client_err: if client_err.response.get("Error").get("Code") == "ResourceNotFoundException": return # Log group didn't exist. LOGGER.warning( "Error when deleting log group {log_group}: {msg}".format( log_group=log_group, msg=client_err.response.get("Error").get("Message") ) ) def delete_log_groups(log_groups): """Delete the given log groups, if they exist.""" for log_group in log_groups: delete_log_group(log_group)