{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n'''\n namespace = 'urn:oasis:names:tc:xliff:document:1.1'\n unversioned_namespace = 'urn:oasis:names:tc:xliff:document:'\n\n suggestions_in_format = True\n \"\"\"xliff units have alttrans tags which can be used to store suggestions\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._filename = None\n lisa.LISAfile.__init__(self, *args, **kwargs)\n self._messagenum = 0\n\n def initbody(self):\n # detect the xliff namespace, handle both 1.1 and 1.2\n for prefix, ns in self.document.getroot().nsmap.items():\n if ns and ns.startswith(self.unversioned_namespace):\n self.namespace = ns\n break\n else:\n # handle crappy xliff docs without proper namespace declaration\n # by simply using the xmlns default namespace\n self.namespace = self.document.getroot().nsmap.get(None, None)\n\n if self._filename:\n filenode = self.getfilenode(self._filename, createifmissing=True)\n else:\n filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()\n self.body = self.getbodynode(filenode, createifmissing=True)\n\n def addheader(self):\n \"\"\"Initialise the file header.\"\"\"\n pass\n\n def createfilenode(self, filename, sourcelanguage=None,\n targetlanguage=None, datatype='plaintext'):\n \"\"\"creates a filenode with the given filename. All parameters\n are needed for XLIFF compliance.\"\"\"\n if sourcelanguage is None:\n sourcelanguage = self.sourcelanguage\n if targetlanguage is None:\n targetlanguage = self.targetlanguage\n\n # find the default NoName file tag and use it instead of creating a new one\n for filenode in self.document.getroot().iterchildren(self.namespaced(\"file\")):\n if filenode.get(\"original\") == \"NoName\":\n filenode.set(\"original\", filename)\n filenode.set(\"source-language\", sourcelanguage)\n if targetlanguage:\n filenode.set(\"target-language\", targetlanguage)\n return filenode\n\n filenode = etree.Element(self.namespaced(\"file\"))\n filenode.set(\"original\", filename)\n filenode.set(\"source-language\", sourcelanguage)\n if targetlanguage:\n filenode.set(\"target-language\", targetlanguage)\n filenode.set(\"datatype\", datatype)\n bodyNode = etree.SubElement(filenode, self.namespaced(self.bodyNode))\n return filenode\n\n def getfilename(self, filenode):\n \"\"\"returns the name of the given file\"\"\"\n return filenode.get(\"original\")\n\n def setfilename(self, filenode, filename):\n \"\"\"set the name of the given file\"\"\"\n return filenode.set(\"original\", filename)\n\n def getfilenames(self):\n \"\"\"returns all filenames in this XLIFF file\"\"\"\n filenodes = self.document.getroot().iterchildren(self.namespaced(\"file\"))\n filenames = [self.getfilename(filenode) for filenode in filenodes]\n filenames = filter(None, filenames)\n if len(filenames) == 1 and filenames[0] == '':\n filenames = []\n return filenames\n\n def getfilenode(self, filename, createifmissing=False):\n \"\"\"finds the filenode with the given name\"\"\"\n filenodes = self.document.getroot().iterchildren(self.namespaced(\"file\"))\n for filenode in filenodes:\n if self.getfilename(filenode) == filename:\n return filenode\n if createifmissing:\n filenode = self.createfilenode(filename)\n return filenode\n return None\n\n def getids(self, filename=None):\n if not filename:\n return super(xlifffile, self).getids()\n\n self.id_index = {}\n prefix = filename + ID_SEPARATOR\n units = (unit for unit in self.units if unit.getid().startswith(prefix))\n for index, unit in enumerate(units):\n self.id_index[unit.getid()[len(prefix):]] = unit\n return self.id_index.keys()\n\n def setsourcelanguage(self, language):\n if not language:\n return\n filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()\n filenode.set(\"source-language\", language)\n\n def getsourcelanguage(self):\n filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()\n return filenode.get(\"source-language\")\n sourcelanguage = property(getsourcelanguage, setsourcelanguage)\n\n def settargetlanguage(self, language):\n if not language:\n return\n filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()\n filenode.set(\"target-language\", language)\n\n def gettargetlanguage(self):\n filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()\n return filenode.get(\"target-language\")\n targetlanguage = property(gettargetlanguage, settargetlanguage)\n\n def getdatatype(self, filename=None):\n \"\"\"Returns the datatype of the stored file. If no filename is given,\n the datatype of the first file is given.\"\"\"\n if filename:\n node = self.getfilenode(filename)\n if not node is None:\n return node.get(\"datatype\")\n else:\n filenames = self.getfilenames()\n if len(filenames) > 0 and filenames[0] != \"NoName\":\n return self.getdatatype(filenames[0])\n return \"\"\n\n def getdate(self, filename=None):\n \"\"\"Returns the date attribute for the file.\n\n If no filename is given, the date of the first file is given.\n If the date attribute is not specified, None is returned.\n\n :returns: Date attribute of file\n :rtype: Date or None\n \"\"\"\n if filename:\n node = self.getfilenode(filename)\n if not node is None:\n return node.get(\"date\")\n else:\n filenames = self.getfilenames()\n if len(filenames) > 0 and filenames[0] != \"NoName\":\n return self.getdate(filenames[0])\n return None\n\n def removedefaultfile(self):\n \"\"\"We want to remove the default file-tag as soon as possible if we\n know if still present and empty.\"\"\"\n filenodes = list(self.document.getroot().iterchildren(self.namespaced(\"file\")))\n if len(filenodes) > 1:\n for filenode in filenodes:\n if (filenode.get(\"original\") == \"NoName\" and\n not list(filenode.iterdescendants(self.namespaced(self.UnitClass.rootNode)))):\n self.document.getroot().remove(filenode)\n break\n\n def getheadernode(self, filenode, createifmissing=False):\n \"\"\"finds the header node for the given filenode\"\"\"\n # TODO: Deprecated?\n headernode = filenode.iterchildren(self.namespaced(\"header\"))\n try:\n return headernode.next()\n except StopIteration:\n pass\n if not createifmissing:\n return None\n headernode = etree.SubElement(filenode, self.namespaced(\"header\"))\n return headernode\n\n def getbodynode(self, filenode, createifmissing=False):\n \"\"\"finds the body node for the given filenode\"\"\"\n bodynode = filenode.iterchildren(self.namespaced(\"body\"))\n try:\n return bodynode.next()\n except StopIteration:\n pass\n if not createifmissing:\n return None\n bodynode = etree.SubElement(filenode, self.namespaced(\"body\"))\n return bodynode\n\n def addsourceunit(self, source, filename=\"NoName\", createifmissing=False):\n \"\"\"adds the given trans-unit to the last used body node if the\n filename has changed it uses the slow method instead (will\n create the nodes required if asked). Returns success\"\"\"\n if self._filename != filename:\n if not self.switchfile(filename, createifmissing):\n return None\n unit = super(xlifffile, self).addsourceunit(source)\n self._messagenum += 1\n unit.setid(\"%d\" % self._messagenum)\n return unit\n\n def switchfile(self, filename, createifmissing=False):\n \"\"\"Adds the given trans-unit (will create the nodes required if asked).\n\n :returns: Success\n :rtype: Boolean\n \"\"\"\n self._filename = filename\n filenode = self.getfilenode(filename)\n if filenode is None:\n if not createifmissing:\n return False\n filenode = self.createfilenode(filename)\n self.document.getroot().append(filenode)\n\n self.body = self.getbodynode(filenode, createifmissing=createifmissing)\n if self.body is None:\n return False\n self._messagenum = len(list(self.body.iterdescendants(self.namespaced(\"trans-unit\"))))\n # TODO: was 0 based before - consider\n # messagenum = len(self.units)\n # TODO: we want to number them consecutively inside a body/file tag\n # instead of globally in the whole XLIFF file, but using\n # len(self.units) will be much faster\n return True\n\n def creategroup(self, filename=\"NoName\", createifmissing=False, restype=None):\n \"\"\"adds a group tag into the specified file\"\"\"\n if self._filename != filename:\n if not self.switchfile(filename, createifmissing):\n return None\n group = etree.SubElement(self.body, self.namespaced(\"group\"))\n if restype:\n group.set(\"restype\", restype)\n return group\n\n def __str__(self):\n self.removedefaultfile()\n return super(xlifffile, self).__str__()\n\n @classmethod\n def parsestring(cls, storestring):\n \"\"\"Parses the string to return the correct file object\"\"\"\n xliff = super(xlifffile, cls).parsestring(storestring)\n if xliff.units:\n header = xliff.units[0]\n if ((\"gettext-domain-header\" in (header.getrestype() or \"\") or\n xliff.getdatatype() == \"po\") and\n cls.__name__.lower() != \"poxlifffile\"):\n from translate.storage import poxliff\n xliff = poxliff.PoXliffFile.parsestring(storestring)\n return xliff\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":476120,"cells":{"repo_name":{"kind":"string","value":"amanuel/bigcouch"},"path":{"kind":"string","value":"couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/tarbz2.py"},"copies":{"kind":"string","value":"61"},"size":{"kind":"string","value":"1803"},"content":{"kind":"string","value":"\"\"\"SCons.Tool.Packaging.tarbz2\n\nThe tarbz2 SRC packager.\n\"\"\"\n\n#\n# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation\n# \n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__revision__ = \"src/engine/SCons/Tool/packaging/tarbz2.py 5134 2010/08/16 23:02:40 bdeegan\"\n\nfrom SCons.Tool.packaging import stripinstallbuilder, putintopackageroot\n\ndef package(env, target, source, PACKAGEROOT, **kw):\n bld = env['BUILDERS']['Tar']\n bld.set_suffix('.tar.gz')\n target, source = putintopackageroot(target, source, env, PACKAGEROOT)\n target, source = stripinstallbuilder(target, source, env)\n return bld(env, target, source, TARFLAGS='-jc')\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476121,"cells":{"repo_name":{"kind":"string","value":"NeCTAR-RC/nova"},"path":{"kind":"string","value":"nova/tests/unit/virt/libvirt/test_imagecache.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"42584"},"content":{"kind":"string","value":"# Copyright 2012 Michael Still and Canonical Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport contextlib\nimport hashlib\nimport os\nimport time\n\nimport mock\nfrom oslo_concurrency import lockutils\nfrom oslo_concurrency import processutils\nfrom oslo_config import cfg\nfrom oslo_log import formatters\nfrom oslo_log import log as logging\nfrom oslo_serialization import jsonutils\nfrom oslo_utils import importutils\nfrom six.moves import cStringIO\n\nfrom nova import conductor\nfrom nova import context\nfrom nova import objects\nfrom nova import test\nfrom nova.tests.unit import fake_instance\nfrom nova import utils\nfrom nova.virt.libvirt import imagecache\nfrom nova.virt.libvirt import utils as libvirt_utils\n\nCONF = cfg.CONF\nCONF.import_opt('compute_manager', 'nova.service')\nCONF.import_opt('host', 'nova.netconf')\n\n\n@contextlib.contextmanager\ndef intercept_log_messages():\n try:\n mylog = logging.getLogger('nova')\n stream = cStringIO()\n handler = logging.logging.StreamHandler(stream)\n handler.setFormatter(formatters.ContextFormatter())\n mylog.logger.addHandler(handler)\n yield stream\n finally:\n mylog.logger.removeHandler(handler)\n\n\nclass ImageCacheManagerTestCase(test.NoDBTestCase):\n\n def setUp(self):\n super(ImageCacheManagerTestCase, self).setUp()\n self.stock_instance_names = set(['instance-00000001',\n 'instance-00000002',\n 'instance-00000003',\n 'banana-42-hamster'])\n\n def test_read_stored_checksum_missing(self):\n self.stub_out('os.path.exists', lambda x: False)\n csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)\n self.assertIsNone(csum)\n\n @mock.patch.object(os.path, 'exists', return_value=True)\n @mock.patch.object(time, 'time', return_value=2000000)\n @mock.patch.object(os.path, 'getmtime', return_value=1000000)\n def test_get_age_of_file(self, mock_getmtime, mock_time, mock_exists):\n image_cache_manager = imagecache.ImageCacheManager()\n exists, age = image_cache_manager._get_age_of_file('/tmp')\n self.assertTrue(exists)\n self.assertEqual(1000000, age)\n\n @mock.patch.object(os.path, 'exists', return_value=False)\n def test_get_age_of_file_not_exists(self, mock_exists):\n image_cache_manager = imagecache.ImageCacheManager()\n exists, age = image_cache_manager._get_age_of_file('/tmp')\n self.assertFalse(exists)\n self.assertEqual(0, age)\n\n def test_read_stored_checksum(self):\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n csum_input = '{\"sha1\": \"fdghkfhkgjjksfdgjksjkghsdf\"}\\n'\n fname = os.path.join(tmpdir, 'aaa')\n info_fname = imagecache.get_info_filename(fname)\n f = open(info_fname, 'w')\n f.write(csum_input)\n f.close()\n\n csum_output = imagecache.read_stored_checksum(fname,\n timestamped=False)\n self.assertEqual(csum_input.rstrip(),\n '{\"sha1\": \"%s\"}' % csum_output)\n\n def test_read_stored_checksum_legacy_essex(self):\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n fname = os.path.join(tmpdir, 'aaa')\n old_fname = fname + '.sha1'\n f = open(old_fname, 'w')\n f.write('fdghkfhkgjjksfdgjksjkghsdf')\n f.close()\n\n csum_output = imagecache.read_stored_checksum(fname,\n timestamped=False)\n self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')\n self.assertFalse(os.path.exists(old_fname))\n info_fname = imagecache.get_info_filename(fname)\n self.assertTrue(os.path.exists(info_fname))\n\n def test_list_base_images(self):\n listing = ['00000001',\n 'ephemeral_0_20_None',\n '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',\n '00000004',\n 'swap_1000']\n images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',\n 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',\n 'e97222e91fc4241f49a7f520d1dcf446751129b3',\n '17d1b00b81642842e514494a78e804e9a511637c',\n '17d1b00b81642842e514494a78e804e9a511637c_5368709120',\n '17d1b00b81642842e514494a78e804e9a511637c_10737418240']\n listing.extend(images)\n\n self.stub_out('os.listdir', lambda x: listing)\n self.stub_out('os.path.isfile', lambda x: True)\n\n base_dir = '/var/lib/nova/instances/_base'\n self.flags(instances_path='/var/lib/nova/instances')\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._list_base_images(base_dir)\n\n sanitized = []\n for ent in image_cache_manager.unexplained_images:\n sanitized.append(ent.replace(base_dir + '/', ''))\n\n self.assertEqual(sorted(sanitized), sorted(images))\n\n expected = os.path.join(base_dir,\n 'e97222e91fc4241f49a7f520d1dcf446751129b3')\n self.assertIn(expected, image_cache_manager.unexplained_images)\n\n expected = os.path.join(base_dir,\n '17d1b00b81642842e514494a78e804e9a511637c_'\n '10737418240')\n self.assertIn(expected, image_cache_manager.unexplained_images)\n\n unexpected = os.path.join(base_dir, '00000004')\n self.assertNotIn(unexpected, image_cache_manager.unexplained_images)\n\n for ent in image_cache_manager.unexplained_images:\n self.assertTrue(ent.startswith(base_dir))\n\n self.assertEqual(len(image_cache_manager.originals), 2)\n\n expected = os.path.join(base_dir,\n '17d1b00b81642842e514494a78e804e9a511637c')\n self.assertIn(expected, image_cache_manager.originals)\n\n unexpected = os.path.join(base_dir,\n '17d1b00b81642842e514494a78e804e9a511637c_'\n '10737418240')\n self.assertNotIn(unexpected, image_cache_manager.originals)\n\n self.assertEqual(1, len(image_cache_manager.back_swap_images))\n self.assertIn('swap_1000', image_cache_manager.back_swap_images)\n\n def test_list_backing_images_small(self):\n self.stub_out('os.listdir',\n lambda x: ['_base', 'instance-00000001',\n 'instance-00000002', 'instance-00000003'])\n self.stub_out('os.path.exists',\n lambda x: x.find('instance-') != -1)\n self.stubs.Set(libvirt_utils, 'get_disk_backing_file',\n lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')\n\n found = os.path.join(CONF.instances_path,\n CONF.image_cache_subdirectory_name,\n 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [found]\n image_cache_manager.instance_names = self.stock_instance_names\n\n inuse_images = image_cache_manager._list_backing_images()\n\n self.assertEqual(inuse_images, [found])\n self.assertEqual(len(image_cache_manager.unexplained_images), 0)\n\n def test_list_backing_images_resized(self):\n self.stub_out('os.listdir',\n lambda x: ['_base', 'instance-00000001',\n 'instance-00000002', 'instance-00000003'])\n self.stub_out('os.path.exists',\n lambda x: x.find('instance-') != -1)\n self.stubs.Set(libvirt_utils, 'get_disk_backing_file',\n lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'\n '10737418240'))\n\n found = os.path.join(CONF.instances_path,\n CONF.image_cache_subdirectory_name,\n 'e97222e91fc4241f49a7f520d1dcf446751129b3_'\n '10737418240')\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [found]\n image_cache_manager.instance_names = self.stock_instance_names\n\n inuse_images = image_cache_manager._list_backing_images()\n\n self.assertEqual(inuse_images, [found])\n self.assertEqual(len(image_cache_manager.unexplained_images), 0)\n\n def test_list_backing_images_instancename(self):\n self.stub_out('os.listdir',\n lambda x: ['_base', 'banana-42-hamster'])\n self.stub_out('os.path.exists',\n lambda x: x.find('banana-42-hamster') != -1)\n self.stubs.Set(libvirt_utils, 'get_disk_backing_file',\n lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')\n\n found = os.path.join(CONF.instances_path,\n CONF.image_cache_subdirectory_name,\n 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [found]\n image_cache_manager.instance_names = self.stock_instance_names\n\n inuse_images = image_cache_manager._list_backing_images()\n\n self.assertEqual(inuse_images, [found])\n self.assertEqual(len(image_cache_manager.unexplained_images), 0)\n\n def test_list_backing_images_disk_notexist(self):\n self.stub_out('os.listdir',\n lambda x: ['_base', 'banana-42-hamster'])\n self.stub_out('os.path.exists',\n lambda x: x.find('banana-42-hamster') != -1)\n\n def fake_get_disk(disk_path):\n raise processutils.ProcessExecutionError()\n\n self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = []\n image_cache_manager.instance_names = self.stock_instance_names\n\n self.assertRaises(processutils.ProcessExecutionError,\n image_cache_manager._list_backing_images)\n\n def test_find_base_file_nothing(self):\n self.stub_out('os.path.exists', lambda x: False)\n\n base_dir = '/var/lib/nova/instances/_base'\n fingerprint = '549867354867'\n image_cache_manager = imagecache.ImageCacheManager()\n res = list(image_cache_manager._find_base_file(base_dir, fingerprint))\n\n self.assertEqual(0, len(res))\n\n def test_find_base_file_small(self):\n fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'\n self.stub_out('os.path.exists',\n lambda x: x.endswith('%s_sm' % fingerprint))\n\n base_dir = '/var/lib/nova/instances/_base'\n image_cache_manager = imagecache.ImageCacheManager()\n res = list(image_cache_manager._find_base_file(base_dir, fingerprint))\n\n base_file = os.path.join(base_dir, fingerprint + '_sm')\n self.assertEqual(res, [(base_file, True, False)])\n\n def test_find_base_file_resized(self):\n fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'\n listing = ['00000001',\n 'ephemeral_0_20_None',\n '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',\n '00000004']\n\n self.stub_out('os.listdir', lambda x: listing)\n self.stub_out('os.path.exists',\n lambda x: x.endswith('%s_10737418240' % fingerprint))\n self.stub_out('os.path.isfile', lambda x: True)\n\n base_dir = '/var/lib/nova/instances/_base'\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._list_base_images(base_dir)\n res = list(image_cache_manager._find_base_file(base_dir, fingerprint))\n\n base_file = os.path.join(base_dir, fingerprint + '_10737418240')\n self.assertEqual(res, [(base_file, False, True)])\n\n def test_find_base_file_all(self):\n fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'\n listing = ['00000001',\n 'ephemeral_0_20_None',\n '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',\n '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',\n '00000004']\n\n self.stub_out('os.listdir', lambda x: listing)\n self.stub_out('os.path.exists', lambda x: True)\n self.stub_out('os.path.isfile', lambda x: True)\n\n base_dir = '/var/lib/nova/instances/_base'\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._list_base_images(base_dir)\n res = list(image_cache_manager._find_base_file(base_dir, fingerprint))\n\n base_file1 = os.path.join(base_dir, fingerprint)\n base_file2 = os.path.join(base_dir, fingerprint + '_sm')\n base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')\n self.assertEqual(res, [(base_file1, False, False),\n (base_file2, True, False),\n (base_file3, False, True)])\n\n @contextlib.contextmanager\n def _make_base_file(self, checksum=True, lock=True):\n \"\"\"Make a base file for testing.\"\"\"\n\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n fname = os.path.join(tmpdir, 'aaa')\n\n base_file = open(fname, 'w')\n base_file.write('data')\n base_file.close()\n\n if lock:\n lockdir = os.path.join(tmpdir, 'locks')\n lockname = os.path.join(lockdir, 'nova-aaa')\n os.mkdir(lockdir)\n lock_file = open(lockname, 'w')\n lock_file.write('data')\n lock_file.close()\n\n base_file = open(fname, 'r')\n\n if checksum:\n imagecache.write_stored_checksum(fname)\n\n base_file.close()\n yield fname\n\n def test_remove_base_file(self):\n with self._make_base_file() as fname:\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._remove_base_file(fname)\n info_fname = imagecache.get_info_filename(fname)\n\n lock_name = 'nova-' + os.path.split(fname)[-1]\n lock_dir = os.path.join(CONF.instances_path, 'locks')\n lock_file = os.path.join(lock_dir, lock_name)\n\n # Files are initially too new to delete\n self.assertTrue(os.path.exists(fname))\n self.assertTrue(os.path.exists(info_fname))\n self.assertTrue(os.path.exists(lock_file))\n\n # Old files get cleaned up though\n os.utime(fname, (-1, time.time() - 3601))\n image_cache_manager._remove_base_file(fname)\n\n self.assertFalse(os.path.exists(fname))\n self.assertFalse(os.path.exists(info_fname))\n self.assertFalse(os.path.exists(lock_file))\n\n def test_remove_base_file_original(self):\n with self._make_base_file() as fname:\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.originals = [fname]\n image_cache_manager._remove_base_file(fname)\n info_fname = imagecache.get_info_filename(fname)\n\n # Files are initially too new to delete\n self.assertTrue(os.path.exists(fname))\n self.assertTrue(os.path.exists(info_fname))\n\n # This file should stay longer than a resized image\n os.utime(fname, (-1, time.time() - 3601))\n image_cache_manager._remove_base_file(fname)\n\n self.assertTrue(os.path.exists(fname))\n self.assertTrue(os.path.exists(info_fname))\n\n # Originals don't stay forever though\n os.utime(fname, (-1, time.time() - 3600 * 25))\n image_cache_manager._remove_base_file(fname)\n\n self.assertFalse(os.path.exists(fname))\n self.assertFalse(os.path.exists(info_fname))\n\n def test_remove_base_file_dne(self):\n # This test is solely to execute the \"does not exist\" code path. We\n # don't expect the method being tested to do anything in this case.\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n fname = os.path.join(tmpdir, 'aaa')\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._remove_base_file(fname)\n\n def test_remove_base_file_oserror(self):\n with intercept_log_messages() as stream:\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n fname = os.path.join(tmpdir, 'aaa')\n\n os.mkdir(fname)\n os.utime(fname, (-1, time.time() - 3601))\n\n # This will raise an OSError because of file permissions\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._remove_base_file(fname)\n\n self.assertTrue(os.path.exists(fname))\n self.assertNotEqual(stream.getvalue().find('Failed to remove'),\n -1)\n\n def test_handle_base_image_unused(self):\n img = '123'\n\n with self._make_base_file() as fname:\n os.utime(fname, (-1, time.time() - 3601))\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [fname]\n image_cache_manager._handle_base_image(img, fname)\n\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files,\n [fname])\n self.assertEqual(image_cache_manager.corrupt_base_files, [])\n\n @mock.patch.object(libvirt_utils, 'update_mtime')\n def test_handle_base_image_used(self, mock_mtime):\n img = '123'\n\n with self._make_base_file() as fname:\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [fname]\n image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}\n image_cache_manager._handle_base_image(img, fname)\n\n mock_mtime.assert_called_once_with(fname)\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files, [])\n self.assertEqual(image_cache_manager.corrupt_base_files, [])\n\n @mock.patch.object(libvirt_utils, 'update_mtime')\n def test_handle_base_image_used_remotely(self, mock_mtime):\n img = '123'\n\n with self._make_base_file() as fname:\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [fname]\n image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}\n image_cache_manager._handle_base_image(img, fname)\n\n mock_mtime.assert_called_once_with(fname)\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files, [])\n self.assertEqual(image_cache_manager.corrupt_base_files, [])\n\n def test_handle_base_image_absent(self):\n img = '123'\n\n with intercept_log_messages() as stream:\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}\n image_cache_manager._handle_base_image(img, None)\n\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files, [])\n self.assertEqual(image_cache_manager.corrupt_base_files, [])\n self.assertNotEqual(stream.getvalue().find('an absent base file'),\n -1)\n\n def test_handle_base_image_used_missing(self):\n img = '123'\n\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n fname = os.path.join(tmpdir, 'aaa')\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [fname]\n image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}\n image_cache_manager._handle_base_image(img, fname)\n\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files, [])\n self.assertEqual(image_cache_manager.corrupt_base_files, [])\n\n @mock.patch.object(libvirt_utils, 'update_mtime')\n def test_handle_base_image_checksum_fails(self, mock_mtime):\n self.flags(checksum_base_images=True, group='libvirt')\n\n img = '123'\n\n with self._make_base_file() as fname:\n with open(fname, 'w') as f:\n f.write('banana')\n\n d = {'sha1': '21323454'}\n with open('%s.info' % fname, 'w') as f:\n f.write(jsonutils.dumps(d))\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.unexplained_images = [fname]\n image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}\n image_cache_manager._handle_base_image(img, fname)\n\n mock_mtime.assert_called_once_with(fname)\n self.assertEqual(image_cache_manager.unexplained_images, [])\n self.assertEqual(image_cache_manager.removable_base_files, [])\n self.assertEqual(image_cache_manager.corrupt_base_files,\n [fname])\n\n @mock.patch.object(libvirt_utils, 'update_mtime')\n @mock.patch.object(lockutils, 'external_lock')\n def test_verify_base_images(self, mock_lock, mock_mtime):\n hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'\n hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'\n hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'\n hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'\n\n self.flags(instances_path='/instance_path',\n image_cache_subdirectory_name='_base')\n\n base_file_list = ['00000001',\n 'ephemeral_0_20_None',\n 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',\n 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',\n hashed_42,\n hashed_1,\n hashed_21,\n hashed_22,\n '%s_5368709120' % hashed_1,\n '%s_10737418240' % hashed_1,\n '00000004']\n\n def fq_path(path):\n return os.path.join('/instance_path/_base/', path)\n\n # Fake base directory existence\n orig_exists = os.path.exists\n\n def exists(path):\n # The python coverage tool got angry with my overly broad mocks\n if not path.startswith('/instance_path'):\n return orig_exists(path)\n\n if path in ['/instance_path',\n '/instance_path/_base',\n '/instance_path/instance-1/disk',\n '/instance_path/instance-2/disk',\n '/instance_path/instance-3/disk',\n '/instance_path/_base/%s.info' % hashed_42]:\n return True\n\n for p in base_file_list:\n if path == fq_path(p):\n return True\n if path == fq_path(p) + '.info':\n return False\n\n if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,\n hashed_21,\n hashed_22,\n hashed_42]]:\n return False\n\n self.fail('Unexpected path existence check: %s' % path)\n\n self.stub_out('os.path.exists', lambda x: exists(x))\n\n # Fake up some instances in the instances directory\n orig_listdir = os.listdir\n\n def listdir(path):\n # The python coverage tool got angry with my overly broad mocks\n if not path.startswith('/instance_path'):\n return orig_listdir(path)\n\n if path == '/instance_path':\n return ['instance-1', 'instance-2', 'instance-3', '_base']\n\n if path == '/instance_path/_base':\n return base_file_list\n\n self.fail('Unexpected directory listed: %s' % path)\n\n self.stub_out('os.listdir', lambda x: listdir(x))\n\n # Fake isfile for these faked images in _base\n orig_isfile = os.path.isfile\n\n def isfile(path):\n # The python coverage tool got angry with my overly broad mocks\n if not path.startswith('/instance_path'):\n return orig_isfile(path)\n\n for p in base_file_list:\n if path == fq_path(p):\n return True\n\n self.fail('Unexpected isfile call: %s' % path)\n\n self.stub_out('os.path.isfile', lambda x: isfile(x))\n\n # Fake the database call which lists running instances\n instances = [{'image_ref': '1',\n 'host': CONF.host,\n 'name': 'instance-1',\n 'uuid': '123',\n 'vm_state': '',\n 'task_state': ''},\n {'image_ref': '1',\n 'kernel_id': '21',\n 'ramdisk_id': '22',\n 'host': CONF.host,\n 'name': 'instance-2',\n 'uuid': '456',\n 'vm_state': '',\n 'task_state': ''}]\n all_instances = [fake_instance.fake_instance_obj(None, **instance)\n for instance in instances]\n image_cache_manager = imagecache.ImageCacheManager()\n\n # Fake the utils call which finds the backing image\n def get_disk_backing_file(path):\n if path in ['/instance_path/instance-1/disk',\n '/instance_path/instance-2/disk']:\n return fq_path('%s_5368709120' % hashed_1)\n self.fail('Unexpected backing file lookup: %s' % path)\n\n self.stubs.Set(libvirt_utils, 'get_disk_backing_file',\n lambda x: get_disk_backing_file(x))\n\n # Fake out verifying checksums, as that is tested elsewhere\n self.stubs.Set(image_cache_manager, '_verify_checksum',\n lambda x, y: True)\n\n # Fake getmtime as well\n orig_getmtime = os.path.getmtime\n\n def getmtime(path):\n if not path.startswith('/instance_path'):\n return orig_getmtime(path)\n\n return 1000000\n\n self.stub_out('os.path.getmtime', lambda x: getmtime(x))\n\n # Make sure we don't accidentally remove a real file\n orig_remove = os.remove\n\n def remove(path):\n if not path.startswith('/instance_path'):\n return orig_remove(path)\n\n # Don't try to remove fake files\n return\n\n self.stub_out('os.remove', lambda x: remove(x))\n\n self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList,\n 'get_by_instance_uuid')\n\n ctxt = context.get_admin_context()\n objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(\n ctxt, '123').AndReturn(None)\n objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(\n ctxt, '456').AndReturn(None)\n\n self.mox.ReplayAll()\n # And finally we can make the call we're actually testing...\n # The argument here should be a context, but it is mocked out\n image_cache_manager.update(ctxt, all_instances)\n\n # Verify\n active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),\n fq_path(hashed_21), fq_path(hashed_22)]\n for act in active:\n self.assertIn(act, image_cache_manager.active_base_files)\n self.assertEqual(len(image_cache_manager.active_base_files),\n len(active))\n\n for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),\n fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),\n fq_path(hashed_42),\n fq_path('%s_10737418240' % hashed_1)]:\n self.assertIn(rem, image_cache_manager.removable_base_files)\n\n # Ensure there are no \"corrupt\" images as well\n self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)\n\n def test_verify_base_images_no_base(self):\n self.flags(instances_path='/tmp/no/such/dir/name/please')\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.update(None, [])\n\n def test_is_valid_info_file(self):\n hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'\n\n self.flags(instances_path='/tmp/no/such/dir/name/please')\n self.flags(image_info_filename_pattern=('$instances_path/_base/'\n '%(image)s.info'),\n group='libvirt')\n base_filename = os.path.join(CONF.instances_path, '_base', hashed)\n\n is_valid_info_file = imagecache.is_valid_info_file\n self.assertFalse(is_valid_info_file('banana'))\n self.assertFalse(is_valid_info_file(\n os.path.join(CONF.instances_path, '_base', '00000001')))\n self.assertFalse(is_valid_info_file(base_filename))\n self.assertFalse(is_valid_info_file(base_filename + '.sha1'))\n self.assertTrue(is_valid_info_file(base_filename + '.info'))\n\n def test_configured_checksum_path(self):\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n\n # Ensure there is a base directory\n os.mkdir(os.path.join(tmpdir, '_base'))\n\n # Fake the database call which lists running instances\n instances = [{'image_ref': '1',\n 'host': CONF.host,\n 'name': 'instance-1',\n 'uuid': '123',\n 'vm_state': '',\n 'task_state': ''},\n {'image_ref': '1',\n 'host': CONF.host,\n 'name': 'instance-2',\n 'uuid': '456',\n 'vm_state': '',\n 'task_state': ''}]\n\n all_instances = []\n for instance in instances:\n all_instances.append(fake_instance.fake_instance_obj(\n None, **instance))\n\n def touch(filename):\n f = open(filename, 'w')\n f.write('Touched')\n f.close()\n\n old = time.time() - (25 * 3600)\n hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'\n base_filename = os.path.join(tmpdir, hashed)\n touch(base_filename)\n touch(base_filename + '.info')\n os.utime(base_filename + '.info', (old, old))\n touch(base_filename + '.info')\n os.utime(base_filename + '.info', (old, old))\n\n self.mox.StubOutWithMock(\n objects.block_device.BlockDeviceMappingList,\n 'get_by_instance_uuid')\n\n ctxt = context.get_admin_context()\n objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(\n ctxt, '123').AndReturn(None)\n objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(\n ctxt, '456').AndReturn(None)\n\n self.mox.ReplayAll()\n\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager.update(ctxt,\n all_instances)\n\n self.assertTrue(os.path.exists(base_filename))\n self.assertTrue(os.path.exists(base_filename + '.info'))\n\n def test_run_image_cache_manager_pass(self):\n was = {'called': False}\n\n def fake_get_all_by_filters(context, *args, **kwargs):\n was['called'] = True\n instances = []\n for x in range(2):\n instances.append(fake_instance.fake_db_instance(\n image_ref='1',\n uuid=x,\n name=x,\n vm_state='',\n task_state=''))\n return instances\n\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n\n self.stub_out('nova.db.instance_get_all_by_filters',\n fake_get_all_by_filters)\n compute = importutils.import_object(CONF.compute_manager)\n self.flags(use_local=True, group='conductor')\n compute.conductor_api = conductor.API()\n ctxt = context.get_admin_context()\n compute._run_image_cache_manager_pass(ctxt)\n self.assertTrue(was['called'])\n\n def test_store_swap_image(self):\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._store_swap_image('swap_')\n image_cache_manager._store_swap_image('swap_123')\n image_cache_manager._store_swap_image('swap_456')\n image_cache_manager._store_swap_image('swap_abc')\n image_cache_manager._store_swap_image('123_swap')\n image_cache_manager._store_swap_image('swap_129_')\n\n self.assertEqual(len(image_cache_manager.back_swap_images), 2)\n expect_set = set(['swap_123', 'swap_456'])\n self.assertEqual(image_cache_manager.back_swap_images, expect_set)\n\n @mock.patch.object(lockutils, 'external_lock')\n @mock.patch.object(libvirt_utils, 'update_mtime')\n @mock.patch('os.path.exists', return_value=True)\n @mock.patch('os.path.getmtime')\n @mock.patch('os.remove')\n def test_age_and_verify_swap_images(self, mock_remove, mock_getmtime,\n mock_exist, mock_mtime, mock_lock):\n image_cache_manager = imagecache.ImageCacheManager()\n expected_remove = set()\n expected_exist = set(['swap_128', 'swap_256'])\n\n image_cache_manager.back_swap_images.add('swap_128')\n image_cache_manager.back_swap_images.add('swap_256')\n\n image_cache_manager.used_swap_images.add('swap_128')\n\n def getmtime(path):\n return time.time() - 1000000\n\n mock_getmtime.side_effect = getmtime\n\n def removefile(path):\n if not path.startswith('/tmp_age_test'):\n return os.remove(path)\n\n fn = os.path.split(path)[-1]\n expected_remove.add(fn)\n expected_exist.remove(fn)\n\n mock_remove.side_effect = removefile\n\n image_cache_manager._age_and_verify_swap_images(None, '/tmp_age_test')\n self.assertEqual(1, len(expected_exist))\n self.assertEqual(1, len(expected_remove))\n self.assertIn('swap_128', expected_exist)\n self.assertIn('swap_256', expected_remove)\n\n @mock.patch.object(utils, 'synchronized')\n @mock.patch.object(imagecache.ImageCacheManager, '_get_age_of_file',\n return_value=(True, 100))\n def test_lock_acquired_on_removing_old_enough_files(self, mock_get_age,\n mock_synchronized):\n base_file = '/tmp_age_test'\n lock_path = os.path.join(CONF.instances_path, 'locks')\n lock_file = os.path.split(base_file)[-1]\n image_cache_manager = imagecache.ImageCacheManager()\n image_cache_manager._remove_old_enough_file(\n base_file, 60, remove_sig=False, remove_lock=False)\n mock_synchronized.assert_called_once_with(lock_file, external=True,\n lock_path=lock_path)\n\n\nclass VerifyChecksumTestCase(test.NoDBTestCase):\n\n def setUp(self):\n super(VerifyChecksumTestCase, self).setUp()\n self.img = {'container_format': 'ami', 'id': '42'}\n self.flags(checksum_base_images=True, group='libvirt')\n\n def _make_checksum(self, tmpdir):\n testdata = ('OpenStack Software delivers a massively scalable cloud '\n 'operating system.')\n\n fname = os.path.join(tmpdir, 'aaa')\n info_fname = imagecache.get_info_filename(fname)\n\n with open(fname, 'w') as f:\n f.write(testdata)\n\n return fname, info_fname, testdata\n\n def _write_file(self, info_fname, info_attr, testdata):\n f = open(info_fname, 'w')\n if info_attr == \"csum valid\":\n csum = hashlib.sha1()\n csum.update(testdata)\n f.write('{\"sha1\": \"%s\"}\\n' % csum.hexdigest())\n elif info_attr == \"csum invalid, not json\":\n f.write('banana')\n else:\n f.write('{\"sha1\": \"banana\"}')\n f.close()\n\n def _check_body(self, tmpdir, info_attr):\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n fname, info_fname, testdata = self._make_checksum(tmpdir)\n self._write_file(info_fname, info_attr, testdata)\n image_cache_manager = imagecache.ImageCacheManager()\n return image_cache_manager, fname\n\n def test_verify_checksum(self):\n with utils.tempdir() as tmpdir:\n image_cache_manager, fname = self._check_body(tmpdir, \"csum valid\")\n res = image_cache_manager._verify_checksum(self.img, fname)\n self.assertTrue(res)\n\n def test_verify_checksum_disabled(self):\n self.flags(checksum_base_images=False, group='libvirt')\n with utils.tempdir() as tmpdir:\n image_cache_manager, fname = self._check_body(tmpdir, \"csum valid\")\n res = image_cache_manager._verify_checksum(self.img, fname)\n self.assertIsNone(res)\n\n def test_verify_checksum_invalid_json(self):\n with intercept_log_messages() as stream:\n with utils.tempdir() as tmpdir:\n image_cache_manager, fname = (\n self._check_body(tmpdir, \"csum invalid, not json\"))\n res = image_cache_manager._verify_checksum(\n self.img, fname, create_if_missing=False)\n self.assertFalse(res)\n log = stream.getvalue()\n\n # NOTE(mikal): this is a skip not a fail because the file is\n # present, but is not in valid JSON format and therefore is\n # skipped.\n self.assertNotEqual(log.find('image verification skipped'), -1)\n\n def test_verify_checksum_invalid_repaired(self):\n with utils.tempdir() as tmpdir:\n image_cache_manager, fname = (\n self._check_body(tmpdir, \"csum invalid, not json\"))\n res = image_cache_manager._verify_checksum(\n self.img, fname, create_if_missing=True)\n self.assertIsNone(res)\n\n def test_verify_checksum_invalid(self):\n with intercept_log_messages() as stream:\n with utils.tempdir() as tmpdir:\n image_cache_manager, fname = (\n self._check_body(tmpdir, \"csum invalid, valid json\"))\n res = image_cache_manager._verify_checksum(self.img, fname)\n self.assertFalse(res)\n log = stream.getvalue()\n self.assertNotEqual(log.find('image verification failed'), -1)\n\n def test_verify_checksum_file_missing(self):\n with utils.tempdir() as tmpdir:\n self.flags(instances_path=tmpdir)\n self.flags(image_info_filename_pattern=('$instances_path/'\n '%(image)s.info'),\n group='libvirt')\n fname, info_fname, testdata = self._make_checksum(tmpdir)\n\n image_cache_manager = imagecache.ImageCacheManager()\n res = image_cache_manager._verify_checksum('aaa', fname)\n self.assertIsNone(res)\n\n # Checksum requests for a file with no checksum now have the\n # side effect of creating the checksum\n self.assertTrue(os.path.exists(info_fname))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476122,"cells":{"repo_name":{"kind":"string","value":"2014c2g23/2015cda"},"path":{"kind":"string","value":"static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_assertions.py"},"copies":{"kind":"string","value":"738"},"size":{"kind":"string","value":"15398"},"content":{"kind":"string","value":"import datetime\nimport warnings\nimport unittest\nfrom itertools import product\n\n\nclass Test_Assertions(unittest.TestCase):\n def test_AlmostEqual(self):\n self.assertAlmostEqual(1.00000001, 1.0)\n self.assertNotAlmostEqual(1.0000001, 1.0)\n self.assertRaises(self.failureException,\n self.assertAlmostEqual, 1.0000001, 1.0)\n self.assertRaises(self.failureException,\n self.assertNotAlmostEqual, 1.00000001, 1.0)\n\n self.assertAlmostEqual(1.1, 1.0, places=0)\n self.assertRaises(self.failureException,\n self.assertAlmostEqual, 1.1, 1.0, places=1)\n\n self.assertAlmostEqual(0, .1+.1j, places=0)\n self.assertNotAlmostEqual(0, .1+.1j, places=1)\n self.assertRaises(self.failureException,\n self.assertAlmostEqual, 0, .1+.1j, places=1)\n self.assertRaises(self.failureException,\n self.assertNotAlmostEqual, 0, .1+.1j, places=0)\n\n self.assertAlmostEqual(float('inf'), float('inf'))\n self.assertRaises(self.failureException, self.assertNotAlmostEqual,\n float('inf'), float('inf'))\n\n def test_AmostEqualWithDelta(self):\n self.assertAlmostEqual(1.1, 1.0, delta=0.5)\n self.assertAlmostEqual(1.0, 1.1, delta=0.5)\n self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)\n self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)\n\n self.assertRaises(self.failureException, self.assertAlmostEqual,\n 1.1, 1.0, delta=0.05)\n self.assertRaises(self.failureException, self.assertNotAlmostEqual,\n 1.1, 1.0, delta=0.5)\n\n self.assertRaises(TypeError, self.assertAlmostEqual,\n 1.1, 1.0, places=2, delta=2)\n self.assertRaises(TypeError, self.assertNotAlmostEqual,\n 1.1, 1.0, places=2, delta=2)\n\n first = datetime.datetime.now()\n second = first + datetime.timedelta(seconds=10)\n self.assertAlmostEqual(first, second,\n delta=datetime.timedelta(seconds=20))\n self.assertNotAlmostEqual(first, second,\n delta=datetime.timedelta(seconds=5))\n\n def test_assertRaises(self):\n def _raise(e):\n raise e\n self.assertRaises(KeyError, _raise, KeyError)\n self.assertRaises(KeyError, _raise, KeyError(\"key\"))\n try:\n self.assertRaises(KeyError, lambda: None)\n except self.failureException as e:\n self.assertIn(\"KeyError not raised\", str(e))\n else:\n self.fail(\"assertRaises() didn't fail\")\n try:\n self.assertRaises(KeyError, _raise, ValueError)\n except ValueError:\n pass\n else:\n self.fail(\"assertRaises() didn't let exception pass through\")\n with self.assertRaises(KeyError) as cm:\n try:\n raise KeyError\n except Exception as e:\n exc = e\n raise\n self.assertIs(cm.exception, exc)\n\n with self.assertRaises(KeyError):\n raise KeyError(\"key\")\n try:\n with self.assertRaises(KeyError):\n pass\n except self.failureException as e:\n self.assertIn(\"KeyError not raised\", str(e))\n else:\n self.fail(\"assertRaises() didn't fail\")\n try:\n with self.assertRaises(KeyError):\n raise ValueError\n except ValueError:\n pass\n else:\n self.fail(\"assertRaises() didn't let exception pass through\")\n\n def testAssertNotRegex(self):\n self.assertNotRegex('Ala ma kota', r'r+')\n try:\n self.assertNotRegex('Ala ma kota', r'k.t', 'Message')\n except self.failureException as e:\n self.assertIn(\"'kot'\", e.args[0])\n self.assertIn('Message', e.args[0])\n else:\n self.fail('assertNotRegex should have failed.')\n\n\nclass TestLongMessage(unittest.TestCase):\n \"\"\"Test that the individual asserts honour longMessage.\n This actually tests all the message behaviour for\n asserts that use longMessage.\"\"\"\n\n def setUp(self):\n class TestableTestFalse(unittest.TestCase):\n longMessage = False\n failureException = self.failureException\n\n def testTest(self):\n pass\n\n class TestableTestTrue(unittest.TestCase):\n longMessage = True\n failureException = self.failureException\n\n def testTest(self):\n pass\n\n self.testableTrue = TestableTestTrue('testTest')\n self.testableFalse = TestableTestFalse('testTest')\n\n def testDefault(self):\n self.assertTrue(unittest.TestCase.longMessage)\n\n def test_formatMsg(self):\n self.assertEqual(self.testableFalse._formatMessage(None, \"foo\"), \"foo\")\n self.assertEqual(self.testableFalse._formatMessage(\"foo\", \"bar\"), \"foo\")\n\n self.assertEqual(self.testableTrue._formatMessage(None, \"foo\"), \"foo\")\n self.assertEqual(self.testableTrue._formatMessage(\"foo\", \"bar\"), \"bar : foo\")\n\n # This blows up if _formatMessage uses string concatenation\n self.testableTrue._formatMessage(object(), 'foo')\n\n def test_formatMessage_unicode_error(self):\n one = ''.join(chr(i) for i in range(255))\n # this used to cause a UnicodeDecodeError constructing msg\n self.testableTrue._formatMessage(one, '\\uFFFD')\n\n def assertMessages(self, methodName, args, errors):\n \"\"\"\n Check that methodName(*args) raises the correct error messages.\n errors should be a list of 4 regex that match the error when:\n 1) longMessage = False and no msg passed;\n 2) longMessage = False and msg passed;\n 3) longMessage = True and no msg passed;\n 4) longMessage = True and msg passed;\n \"\"\"\n def getMethod(i):\n useTestableFalse = i < 2\n if useTestableFalse:\n test = self.testableFalse\n else:\n test = self.testableTrue\n return getattr(test, methodName)\n\n for i, expected_regex in enumerate(errors):\n testMethod = getMethod(i)\n kwargs = {}\n withMsg = i % 2\n if withMsg:\n kwargs = {\"msg\": \"oops\"}\n\n with self.assertRaisesRegex(self.failureException,\n expected_regex=expected_regex):\n testMethod(*args, **kwargs)\n\n def testAssertTrue(self):\n self.assertMessages('assertTrue', (False,),\n [\"^False is not true$\", \"^oops$\", \"^False is not true$\",\n \"^False is not true : oops$\"])\n\n def testAssertFalse(self):\n self.assertMessages('assertFalse', (True,),\n [\"^True is not false$\", \"^oops$\", \"^True is not false$\",\n \"^True is not false : oops$\"])\n\n def testNotEqual(self):\n self.assertMessages('assertNotEqual', (1, 1),\n [\"^1 == 1$\", \"^oops$\", \"^1 == 1$\",\n \"^1 == 1 : oops$\"])\n\n def testAlmostEqual(self):\n self.assertMessages('assertAlmostEqual', (1, 2),\n [\"^1 != 2 within 7 places$\", \"^oops$\",\n \"^1 != 2 within 7 places$\", \"^1 != 2 within 7 places : oops$\"])\n\n def testNotAlmostEqual(self):\n self.assertMessages('assertNotAlmostEqual', (1, 1),\n [\"^1 == 1 within 7 places$\", \"^oops$\",\n \"^1 == 1 within 7 places$\", \"^1 == 1 within 7 places : oops$\"])\n\n def test_baseAssertEqual(self):\n self.assertMessages('_baseAssertEqual', (1, 2),\n [\"^1 != 2$\", \"^oops$\", \"^1 != 2$\", \"^1 != 2 : oops$\"])\n\n def testAssertSequenceEqual(self):\n # Error messages are multiline so not testing on full message\n # assertTupleEqual and assertListEqual delegate to this method\n self.assertMessages('assertSequenceEqual', ([], [None]),\n [\"\\+ \\[None\\]$\", \"^oops$\", r\"\\+ \\[None\\]$\",\n r\"\\+ \\[None\\] : oops$\"])\n\n def testAssertSetEqual(self):\n self.assertMessages('assertSetEqual', (set(), set([None])),\n [\"None$\", \"^oops$\", \"None$\",\n \"None : oops$\"])\n\n def testAssertIn(self):\n self.assertMessages('assertIn', (None, []),\n ['^None not found in \\[\\]$', \"^oops$\",\n '^None not found in \\[\\]$',\n '^None not found in \\[\\] : oops$'])\n\n def testAssertNotIn(self):\n self.assertMessages('assertNotIn', (None, [None]),\n ['^None unexpectedly found in \\[None\\]$', \"^oops$\",\n '^None unexpectedly found in \\[None\\]$',\n '^None unexpectedly found in \\[None\\] : oops$'])\n\n def testAssertDictEqual(self):\n self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),\n [r\"\\+ \\{'key': 'value'\\}$\", \"^oops$\",\n \"\\+ \\{'key': 'value'\\}$\",\n \"\\+ \\{'key': 'value'\\} : oops$\"])\n\n def testAssertDictContainsSubset(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n\n self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),\n [\"^Missing: 'key'$\", \"^oops$\",\n \"^Missing: 'key'$\",\n \"^Missing: 'key' : oops$\"])\n\n def testAssertMultiLineEqual(self):\n self.assertMessages('assertMultiLineEqual', (\"\", \"foo\"),\n [r\"\\+ foo$\", \"^oops$\",\n r\"\\+ foo$\",\n r\"\\+ foo : oops$\"])\n\n def testAssertLess(self):\n self.assertMessages('assertLess', (2, 1),\n [\"^2 not less than 1$\", \"^oops$\",\n \"^2 not less than 1$\", \"^2 not less than 1 : oops$\"])\n\n def testAssertLessEqual(self):\n self.assertMessages('assertLessEqual', (2, 1),\n [\"^2 not less than or equal to 1$\", \"^oops$\",\n \"^2 not less than or equal to 1$\",\n \"^2 not less than or equal to 1 : oops$\"])\n\n def testAssertGreater(self):\n self.assertMessages('assertGreater', (1, 2),\n [\"^1 not greater than 2$\", \"^oops$\",\n \"^1 not greater than 2$\",\n \"^1 not greater than 2 : oops$\"])\n\n def testAssertGreaterEqual(self):\n self.assertMessages('assertGreaterEqual', (1, 2),\n [\"^1 not greater than or equal to 2$\", \"^oops$\",\n \"^1 not greater than or equal to 2$\",\n \"^1 not greater than or equal to 2 : oops$\"])\n\n def testAssertIsNone(self):\n self.assertMessages('assertIsNone', ('not None',),\n [\"^'not None' is not None$\", \"^oops$\",\n \"^'not None' is not None$\",\n \"^'not None' is not None : oops$\"])\n\n def testAssertIsNotNone(self):\n self.assertMessages('assertIsNotNone', (None,),\n [\"^unexpectedly None$\", \"^oops$\",\n \"^unexpectedly None$\",\n \"^unexpectedly None : oops$\"])\n\n def testAssertIs(self):\n self.assertMessages('assertIs', (None, 'foo'),\n [\"^None is not 'foo'$\", \"^oops$\",\n \"^None is not 'foo'$\",\n \"^None is not 'foo' : oops$\"])\n\n def testAssertIsNot(self):\n self.assertMessages('assertIsNot', (None, None),\n [\"^unexpectedly identical: None$\", \"^oops$\",\n \"^unexpectedly identical: None$\",\n \"^unexpectedly identical: None : oops$\"])\n\n\n def assertMessagesCM(self, methodName, args, func, errors):\n \"\"\"\n Check that the correct error messages are raised while executing:\n with method(*args):\n func()\n *errors* should be a list of 4 regex that match the error when:\n 1) longMessage = False and no msg passed;\n 2) longMessage = False and msg passed;\n 3) longMessage = True and no msg passed;\n 4) longMessage = True and msg passed;\n \"\"\"\n p = product((self.testableFalse, self.testableTrue),\n ({}, {\"msg\": \"oops\"}))\n for (cls, kwargs), err in zip(p, errors):\n method = getattr(cls, methodName)\n with self.assertRaisesRegex(cls.failureException, err):\n with method(*args, **kwargs) as cm:\n func()\n\n def testAssertRaises(self):\n self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,\n ['^TypeError not raised$', '^oops$',\n '^TypeError not raised$',\n '^TypeError not raised : oops$'])\n\n def testAssertRaisesRegex(self):\n # test error not raised\n self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),\n lambda: None,\n ['^TypeError not raised$', '^oops$',\n '^TypeError not raised$',\n '^TypeError not raised : oops$'])\n # test error raised but with wrong message\n def raise_wrong_message():\n raise TypeError('foo')\n self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),\n raise_wrong_message,\n ['^\"regex\" does not match \"foo\"$', '^oops$',\n '^\"regex\" does not match \"foo\"$',\n '^\"regex\" does not match \"foo\" : oops$'])\n\n def testAssertWarns(self):\n self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,\n ['^UserWarning not triggered$', '^oops$',\n '^UserWarning not triggered$',\n '^UserWarning not triggered : oops$'])\n\n def testAssertWarnsRegex(self):\n # test error not raised\n self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),\n lambda: None,\n ['^UserWarning not triggered$', '^oops$',\n '^UserWarning not triggered$',\n '^UserWarning not triggered : oops$'])\n # test warning raised but with wrong message\n def raise_wrong_message():\n warnings.warn('foo')\n self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),\n raise_wrong_message,\n ['^\"regex\" does not match \"foo\"$', '^oops$',\n '^\"regex\" does not match \"foo\"$',\n '^\"regex\" does not match \"foo\" : oops$'])\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476123,"cells":{"repo_name":{"kind":"string","value":"jamesblunt/sympy"},"path":{"kind":"string","value":"sympy/printing/tests/test_mathematica.py"},"copies":{"kind":"string","value":"93"},"size":{"kind":"string","value":"2612"},"content":{"kind":"string","value":"from sympy.core import (S, pi, oo, symbols, Function,\n Rational, Integer, Tuple)\nfrom sympy.integrals import Integral\nfrom sympy.concrete import Sum\nfrom sympy.functions import exp, sin, cos\n\nfrom sympy import mathematica_code as mcode\n\nx, y, z = symbols('x,y,z')\nf = Function('f')\n\n\ndef test_Integer():\n assert mcode(Integer(67)) == \"67\"\n assert mcode(Integer(-1)) == \"-1\"\n\n\ndef test_Rational():\n assert mcode(Rational(3, 7)) == \"3/7\"\n assert mcode(Rational(18, 9)) == \"2\"\n assert mcode(Rational(3, -7)) == \"-3/7\"\n assert mcode(Rational(-3, -7)) == \"3/7\"\n assert mcode(x + Rational(3, 7)) == \"x + 3/7\"\n assert mcode(Rational(3, 7)*x) == \"(3/7)*x\"\n\n\ndef test_Function():\n assert mcode(f(x, y, z)) == \"f[x, y, z]\"\n assert mcode(sin(x) ** cos(x)) == \"Sin[x]^Cos[x]\"\n\n\ndef test_Pow():\n assert mcode(x**3) == \"x^3\"\n assert mcode(x**(y**3)) == \"x^(y^3)\"\n assert mcode(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \\\n \"(3.5*f[x])^(-x + y^x)/(x^2 + y)\"\n assert mcode(x**-1.0) == 'x^(-1.0)'\n assert mcode(x**Rational(2, 3)) == 'x^(2/3)'\n\n\ndef test_Mul():\n A, B, C, D = symbols('A B C D', commutative=False)\n assert mcode(x*y*z) == \"x*y*z\"\n assert mcode(x*y*A) == \"x*y*A\"\n assert mcode(x*y*A*B) == \"x*y*A**B\"\n assert mcode(x*y*A*B*C) == \"x*y*A**B**C\"\n assert mcode(x*A*B*(C + D)*A*y) == \"x*y*A**B**(C + D)**A\"\n\n\ndef test_constants():\n assert mcode(pi) == \"Pi\"\n assert mcode(oo) == \"Infinity\"\n assert mcode(S.NegativeInfinity) == \"-Infinity\"\n assert mcode(S.EulerGamma) == \"EulerGamma\"\n assert mcode(S.Catalan) == \"Catalan\"\n assert mcode(S.Exp1) == \"E\"\n\n\ndef test_containers():\n assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \\\n \"{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}\"\n assert mcode((1, 2, (3, 4))) == \"{1, 2, {3, 4}}\"\n assert mcode([1]) == \"{1}\"\n assert mcode((1,)) == \"{1}\"\n assert mcode(Tuple(*[1, 2, 3])) == \"{1, 2, 3}\"\n\n\ndef test_Integral():\n assert mcode(Integral(sin(sin(x)), x)) == \"Hold[Integrate[Sin[Sin[x]], x]]\"\n assert mcode(Integral(exp(-x**2 - y**2),\n (x, -oo, oo),\n (y, -oo, oo))) == \\\n \"Hold[Integrate[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, \" \\\n \"{y, -Infinity, Infinity}]]\"\n\n\ndef test_Sum():\n assert mcode(Sum(sin(x), (x, 0, 10))) == \"Hold[Sum[Sin[x], {x, 0, 10}]]\"\n assert mcode(Sum(exp(-x**2 - y**2),\n (x, -oo, oo),\n (y, -oo, oo))) == \\\n \"Hold[Sum[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, \" \\\n \"{y, -Infinity, Infinity}]]\"\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476124,"cells":{"repo_name":{"kind":"string","value":"40223151/2015cd_midterm"},"path":{"kind":"string","value":"static/Brython3.1.1-20150328-091302/Lib/unittest/case.py"},"copies":{"kind":"string","value":"743"},"size":{"kind":"string","value":"48873"},"content":{"kind":"string","value":"\"\"\"Test case implementation\"\"\"\n\nimport sys\nimport functools\nimport difflib\nimport pprint\nimport re\nimport warnings\nimport collections\n\nfrom . import result\nfrom .util import (strclass, safe_repr, _count_diff_all_purpose,\n _count_diff_hashable)\n\n__unittest = True\n\n\nDIFF_OMITTED = ('\\nDiff is %s characters long. '\n 'Set self.maxDiff to None to see it.')\n\nclass SkipTest(Exception):\n \"\"\"\n Raise this exception in a test to skip it.\n\n Usually you can use TestCase.skipTest() or one of the skipping decorators\n instead of raising this directly.\n \"\"\"\n\nclass _ExpectedFailure(Exception):\n \"\"\"\n Raise this when a test is expected to fail.\n\n This is an implementation detail.\n \"\"\"\n\n def __init__(self, exc_info):\n super(_ExpectedFailure, self).__init__()\n self.exc_info = exc_info\n\nclass _UnexpectedSuccess(Exception):\n \"\"\"\n The test was supposed to fail, but it didn't!\n \"\"\"\n\n\nclass _Outcome(object):\n def __init__(self):\n self.success = True\n self.skipped = None\n self.unexpectedSuccess = None\n self.expectedFailure = None\n self.errors = []\n self.failures = []\n\n\ndef _id(obj):\n return obj\n\ndef skip(reason):\n \"\"\"\n Unconditionally skip a test.\n \"\"\"\n def decorator(test_item):\n if not isinstance(test_item, type):\n @functools.wraps(test_item)\n def skip_wrapper(*args, **kwargs):\n raise SkipTest(reason)\n test_item = skip_wrapper\n\n test_item.__unittest_skip__ = True\n test_item.__unittest_skip_why__ = reason\n return test_item\n return decorator\n\ndef skipIf(condition, reason):\n \"\"\"\n Skip a test if the condition is true.\n \"\"\"\n if condition:\n return skip(reason)\n return _id\n\ndef skipUnless(condition, reason):\n \"\"\"\n Skip a test unless the condition is true.\n \"\"\"\n if not condition:\n return skip(reason)\n return _id\n\n\ndef expectedFailure(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except Exception:\n raise _ExpectedFailure(sys.exc_info())\n raise _UnexpectedSuccess\n return wrapper\n\n\nclass _AssertRaisesBaseContext(object):\n\n def __init__(self, expected, test_case, callable_obj=None,\n expected_regex=None):\n self.expected = expected\n self.test_case = test_case\n if callable_obj is not None:\n try:\n self.obj_name = callable_obj.__name__\n except AttributeError:\n self.obj_name = str(callable_obj)\n else:\n self.obj_name = None\n if isinstance(expected_regex, (bytes, str)):\n expected_regex = re.compile(expected_regex)\n self.expected_regex = expected_regex\n self.msg = None\n\n def _raiseFailure(self, standardMsg):\n msg = self.test_case._formatMessage(self.msg, standardMsg)\n raise self.test_case.failureException(msg)\n\n def handle(self, name, callable_obj, args, kwargs):\n \"\"\"\n If callable_obj is None, assertRaises/Warns is being used as a\n context manager, so check for a 'msg' kwarg and return self.\n If callable_obj is not None, call it passing args and kwargs.\n \"\"\"\n if callable_obj is None:\n self.msg = kwargs.pop('msg', None)\n return self\n with self:\n callable_obj(*args, **kwargs)\n\n\n\nclass _AssertRaisesContext(_AssertRaisesBaseContext):\n \"\"\"A context manager used to implement TestCase.assertRaises* methods.\"\"\"\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n if exc_type is None:\n try:\n exc_name = self.expected.__name__\n except AttributeError:\n exc_name = str(self.expected)\n if self.obj_name:\n self._raiseFailure(\"{} not raised by {}\".format(exc_name,\n self.obj_name))\n else:\n self._raiseFailure(\"{} not raised\".format(exc_name))\n if not issubclass(exc_type, self.expected):\n # let unexpected exceptions pass through\n return False\n # store exception, without traceback, for later retrieval\n self.exception = exc_value.with_traceback(None)\n if self.expected_regex is None:\n return True\n\n expected_regex = self.expected_regex\n if not expected_regex.search(str(exc_value)):\n self._raiseFailure('\"{}\" does not match \"{}\"'.format(\n expected_regex.pattern, str(exc_value)))\n return True\n\n\nclass _AssertWarnsContext(_AssertRaisesBaseContext):\n \"\"\"A context manager used to implement TestCase.assertWarns* methods.\"\"\"\n\n def __enter__(self):\n # The __warningregistry__'s need to be in a pristine state for tests\n # to work properly.\n for v in sys.modules.values():\n if getattr(v, '__warningregistry__', None):\n v.__warningregistry__ = {}\n self.warnings_manager = warnings.catch_warnings(record=True)\n self.warnings = self.warnings_manager.__enter__()\n warnings.simplefilter(\"always\", self.expected)\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.warnings_manager.__exit__(exc_type, exc_value, tb)\n if exc_type is not None:\n # let unexpected exceptions pass through\n return\n try:\n exc_name = self.expected.__name__\n except AttributeError:\n exc_name = str(self.expected)\n first_matching = None\n for m in self.warnings:\n w = m.message\n if not isinstance(w, self.expected):\n continue\n if first_matching is None:\n first_matching = w\n if (self.expected_regex is not None and\n not self.expected_regex.search(str(w))):\n continue\n # store warning for later retrieval\n self.warning = w\n self.filename = m.filename\n self.lineno = m.lineno\n return\n # Now we simply try to choose a helpful failure message\n if first_matching is not None:\n self._raiseFailure('\"{}\" does not match \"{}\"'.format(\n self.expected_regex.pattern, str(first_matching)))\n if self.obj_name:\n self._raiseFailure(\"{} not triggered by {}\".format(exc_name,\n self.obj_name))\n else:\n self._raiseFailure(\"{} not triggered\".format(exc_name))\n\n\nclass TestCase(object):\n \"\"\"A class whose instances are single test cases.\n\n By default, the test code itself should be placed in a method named\n 'runTest'.\n\n If the fixture may be used for many test cases, create as\n many test methods as are needed. When instantiating such a TestCase\n subclass, specify in the constructor arguments the name of the test method\n that the instance is to execute.\n\n Test authors should subclass TestCase for their own tests. Construction\n and deconstruction of the test's environment ('fixture') can be\n implemented by overriding the 'setUp' and 'tearDown' methods respectively.\n\n If it is necessary to override the __init__ method, the base class\n __init__ method must always be called. It is important that subclasses\n should not change the signature of their __init__ method, since instances\n of the classes are instantiated automatically by parts of the framework\n in order to be run.\n\n When subclassing TestCase, you can set these attributes:\n * failureException: determines which exception will be raised when\n the instance's assertion methods fail; test methods raising this\n exception will be deemed to have 'failed' rather than 'errored'.\n * longMessage: determines whether long messages (including repr of\n objects used in assert methods) will be printed on failure in *addition*\n to any explicit message passed.\n * maxDiff: sets the maximum length of a diff in failure messages\n by assert methods using difflib. It is looked up as an instance\n attribute so can be configured by individual tests if required.\n \"\"\"\n\n failureException = AssertionError\n\n longMessage = True\n\n maxDiff = 80*8\n\n # If a string is longer than _diffThreshold, use normal comparison instead\n # of difflib. See #11763.\n _diffThreshold = 2**16\n\n # Attribute used by TestSuite for classSetUp\n\n _classSetupFailed = False\n\n def __init__(self, methodName='runTest'):\n \"\"\"Create an instance of the class that will use the named test\n method when executed. Raises a ValueError if the instance does\n not have a method with the specified name.\n \"\"\"\n self._testMethodName = methodName\n self._outcomeForDoCleanups = None\n self._testMethodDoc = 'No test'\n try:\n testMethod = getattr(self, methodName)\n except AttributeError:\n if methodName != 'runTest':\n # we allow instantiation with no explicit method name\n # but not an *incorrect* or missing method name\n raise ValueError(\"no such test method in %s: %s\" %\n (self.__class__, methodName))\n else:\n self._testMethodDoc = testMethod.__doc__\n self._cleanups = []\n\n # Map types to custom assertEqual functions that will compare\n # instances of said type in more detail to generate a more useful\n # error message.\n self._type_equality_funcs = {}\n self.addTypeEqualityFunc(dict, 'assertDictEqual')\n self.addTypeEqualityFunc(list, 'assertListEqual')\n self.addTypeEqualityFunc(tuple, 'assertTupleEqual')\n self.addTypeEqualityFunc(set, 'assertSetEqual')\n self.addTypeEqualityFunc(frozenset, 'assertSetEqual')\n self.addTypeEqualityFunc(str, 'assertMultiLineEqual')\n\n def addTypeEqualityFunc(self, typeobj, function):\n \"\"\"Add a type specific assertEqual style function to compare a type.\n\n This method is for use by TestCase subclasses that need to register\n their own type equality functions to provide nicer error messages.\n\n Args:\n typeobj: The data type to call this function on when both values\n are of the same type in assertEqual().\n function: The callable taking two arguments and an optional\n msg= argument that raises self.failureException with a\n useful error message when the two arguments are not equal.\n \"\"\"\n self._type_equality_funcs[typeobj] = function\n\n def addCleanup(self, function, *args, **kwargs):\n \"\"\"Add a function, with arguments, to be called when the test is\n completed. Functions added are called on a LIFO basis and are\n called after tearDown on test failure or success.\n\n Cleanup items are called even if setUp fails (unlike tearDown).\"\"\"\n self._cleanups.append((function, args, kwargs))\n\n def setUp(self):\n \"Hook method for setting up the test fixture before exercising it.\"\n pass\n\n def tearDown(self):\n \"Hook method for deconstructing the test fixture after testing it.\"\n pass\n\n @classmethod\n def setUpClass(cls):\n \"Hook method for setting up class fixture before running tests in the class.\"\n\n @classmethod\n def tearDownClass(cls):\n \"Hook method for deconstructing the class fixture after running all tests in the class.\"\n\n def countTestCases(self):\n return 1\n\n def defaultTestResult(self):\n return result.TestResult()\n\n def shortDescription(self):\n \"\"\"Returns a one-line description of the test, or None if no\n description has been provided.\n\n The default implementation of this method returns the first line of\n the specified test method's docstring.\n \"\"\"\n doc = self._testMethodDoc\n return doc and doc.split(\"\\n\")[0].strip() or None\n\n\n def id(self):\n return \"%s.%s\" % (strclass(self.__class__), self._testMethodName)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n\n return self._testMethodName == other._testMethodName\n\n def __hash__(self):\n return hash((type(self), self._testMethodName))\n\n def __str__(self):\n return \"%s (%s)\" % (self._testMethodName, strclass(self.__class__))\n\n def __repr__(self):\n return \"<%s testMethod=%s>\" % \\\n (strclass(self.__class__), self._testMethodName)\n\n def _addSkip(self, result, reason):\n addSkip = getattr(result, 'addSkip', None)\n if addSkip is not None:\n addSkip(self, reason)\n else:\n warnings.warn(\"TestResult has no addSkip method, skips not reported\",\n RuntimeWarning, 2)\n result.addSuccess(self)\n\n def _executeTestPart(self, function, outcome, isTest=False):\n try:\n function()\n except KeyboardInterrupt:\n raise\n except SkipTest as e:\n outcome.success = False\n outcome.skipped = str(e)\n except _UnexpectedSuccess:\n exc_info = sys.exc_info()\n outcome.success = False\n if isTest:\n outcome.unexpectedSuccess = exc_info\n else:\n outcome.errors.append(exc_info)\n except _ExpectedFailure:\n outcome.success = False\n exc_info = sys.exc_info()\n if isTest:\n outcome.expectedFailure = exc_info\n else:\n outcome.errors.append(exc_info)\n except self.failureException:\n outcome.success = False\n outcome.failures.append(sys.exc_info())\n exc_info = sys.exc_info()\n except:\n outcome.success = False\n outcome.errors.append(sys.exc_info())\n\n def run(self, result=None):\n orig_result = result\n if result is None:\n result = self.defaultTestResult()\n startTestRun = getattr(result, 'startTestRun', None)\n if startTestRun is not None:\n startTestRun()\n\n result.startTest(self)\n\n testMethod = getattr(self, self._testMethodName)\n if (getattr(self.__class__, \"__unittest_skip__\", False) or\n getattr(testMethod, \"__unittest_skip__\", False)):\n # If the class or method was skipped.\n try:\n skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')\n or getattr(testMethod, '__unittest_skip_why__', ''))\n self._addSkip(result, skip_why)\n finally:\n result.stopTest(self)\n return\n try:\n outcome = _Outcome()\n self._outcomeForDoCleanups = outcome\n\n self._executeTestPart(self.setUp, outcome)\n if outcome.success:\n self._executeTestPart(testMethod, outcome, isTest=True)\n self._executeTestPart(self.tearDown, outcome)\n\n self.doCleanups()\n if outcome.success:\n result.addSuccess(self)\n else:\n if outcome.skipped is not None:\n self._addSkip(result, outcome.skipped)\n for exc_info in outcome.errors:\n result.addError(self, exc_info)\n for exc_info in outcome.failures:\n result.addFailure(self, exc_info)\n if outcome.unexpectedSuccess is not None:\n addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)\n if addUnexpectedSuccess is not None:\n addUnexpectedSuccess(self)\n else:\n warnings.warn(\"TestResult has no addUnexpectedSuccess method, reporting as failures\",\n RuntimeWarning)\n result.addFailure(self, outcome.unexpectedSuccess)\n\n if outcome.expectedFailure is not None:\n addExpectedFailure = getattr(result, 'addExpectedFailure', None)\n if addExpectedFailure is not None:\n addExpectedFailure(self, outcome.expectedFailure)\n else:\n warnings.warn(\"TestResult has no addExpectedFailure method, reporting as passes\",\n RuntimeWarning)\n result.addSuccess(self)\n return result\n finally:\n result.stopTest(self)\n if orig_result is None:\n stopTestRun = getattr(result, 'stopTestRun', None)\n if stopTestRun is not None:\n stopTestRun()\n\n def doCleanups(self):\n \"\"\"Execute all cleanup functions. Normally called for you after\n tearDown.\"\"\"\n outcome = self._outcomeForDoCleanups or _Outcome()\n while self._cleanups:\n function, args, kwargs = self._cleanups.pop()\n part = lambda: function(*args, **kwargs)\n self._executeTestPart(part, outcome)\n\n # return this for backwards compatibility\n # even though we no longer us it internally\n return outcome.success\n\n def __call__(self, *args, **kwds):\n return self.run(*args, **kwds)\n\n def debug(self):\n \"\"\"Run the test without collecting errors in a TestResult\"\"\"\n self.setUp()\n getattr(self, self._testMethodName)()\n self.tearDown()\n while self._cleanups:\n function, args, kwargs = self._cleanups.pop(-1)\n function(*args, **kwargs)\n\n def skipTest(self, reason):\n \"\"\"Skip this test.\"\"\"\n raise SkipTest(reason)\n\n def fail(self, msg=None):\n \"\"\"Fail immediately, with the given message.\"\"\"\n raise self.failureException(msg)\n\n def assertFalse(self, expr, msg=None):\n \"\"\"Check that the expression is false.\"\"\"\n if expr:\n msg = self._formatMessage(msg, \"%s is not false\" % safe_repr(expr))\n raise self.failureException(msg)\n\n def assertTrue(self, expr, msg=None):\n \"\"\"Check that the expression is true.\"\"\"\n if not expr:\n msg = self._formatMessage(msg, \"%s is not true\" % safe_repr(expr))\n raise self.failureException(msg)\n\n def _formatMessage(self, msg, standardMsg):\n \"\"\"Honour the longMessage attribute when generating failure messages.\n If longMessage is False this means:\n * Use only an explicit message if it is provided\n * Otherwise use the standard message for the assert\n\n If longMessage is True:\n * Use the standard message\n * If an explicit message is provided, plus ' : ' and the explicit message\n \"\"\"\n if not self.longMessage:\n return msg or standardMsg\n if msg is None:\n return standardMsg\n try:\n # don't switch to '{}' formatting in Python 2.X\n # it changes the way unicode input is handled\n return '%s : %s' % (standardMsg, msg)\n except UnicodeDecodeError:\n return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))\n\n def assertRaises(self, excClass, callableObj=None, *args, **kwargs):\n \"\"\"Fail unless an exception of class excClass is raised\n by callableObj when invoked with arguments args and keyword\n arguments kwargs. If a different type of exception is\n raised, it will not be caught, and the test case will be\n deemed to have suffered an error, exactly as for an\n unexpected exception.\n\n If called with callableObj omitted or None, will return a\n context object used like this::\n\n with self.assertRaises(SomeException):\n do_something()\n\n An optional keyword argument 'msg' can be provided when assertRaises\n is used as a context object.\n\n The context manager keeps a reference to the exception as\n the 'exception' attribute. This allows you to inspect the\n exception after the assertion::\n\n with self.assertRaises(SomeException) as cm:\n do_something()\n the_exception = cm.exception\n self.assertEqual(the_exception.error_code, 3)\n \"\"\"\n context = _AssertRaisesContext(excClass, self, callableObj)\n return context.handle('assertRaises', callableObj, args, kwargs)\n\n def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):\n \"\"\"Fail unless a warning of class warnClass is triggered\n by callable_obj when invoked with arguments args and keyword\n arguments kwargs. If a different type of warning is\n triggered, it will not be handled: depending on the other\n warning filtering rules in effect, it might be silenced, printed\n out, or raised as an exception.\n\n If called with callable_obj omitted or None, will return a\n context object used like this::\n\n with self.assertWarns(SomeWarning):\n do_something()\n\n An optional keyword argument 'msg' can be provided when assertWarns\n is used as a context object.\n\n The context manager keeps a reference to the first matching\n warning as the 'warning' attribute; similarly, the 'filename'\n and 'lineno' attributes give you information about the line\n of Python code from which the warning was triggered.\n This allows you to inspect the warning after the assertion::\n\n with self.assertWarns(SomeWarning) as cm:\n do_something()\n the_warning = cm.warning\n self.assertEqual(the_warning.some_attribute, 147)\n \"\"\"\n context = _AssertWarnsContext(expected_warning, self, callable_obj)\n return context.handle('assertWarns', callable_obj, args, kwargs)\n\n def _getAssertEqualityFunc(self, first, second):\n \"\"\"Get a detailed comparison function for the types of the two args.\n\n Returns: A callable accepting (first, second, msg=None) that will\n raise a failure exception if first != second with a useful human\n readable error message for those types.\n \"\"\"\n #\n # NOTE(gregory.p.smith): I considered isinstance(first, type(second))\n # and vice versa. I opted for the conservative approach in case\n # subclasses are not intended to be compared in detail to their super\n # class instances using a type equality func. This means testing\n # subtypes won't automagically use the detailed comparison. Callers\n # should use their type specific assertSpamEqual method to compare\n # subclasses if the detailed comparison is desired and appropriate.\n # See the discussion in http://bugs.python.org/issue2578.\n #\n if type(first) is type(second):\n asserter = self._type_equality_funcs.get(type(first))\n if asserter is not None:\n if isinstance(asserter, str):\n asserter = getattr(self, asserter)\n return asserter\n\n return self._baseAssertEqual\n\n def _baseAssertEqual(self, first, second, msg=None):\n \"\"\"The default assertEqual implementation, not type specific.\"\"\"\n if not first == second:\n standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))\n msg = self._formatMessage(msg, standardMsg)\n raise self.failureException(msg)\n\n def assertEqual(self, first, second, msg=None):\n \"\"\"Fail if the two objects are unequal as determined by the '=='\n operator.\n \"\"\"\n assertion_func = self._getAssertEqualityFunc(first, second)\n assertion_func(first, second, msg=msg)\n\n def assertNotEqual(self, first, second, msg=None):\n \"\"\"Fail if the two objects are equal as determined by the '!='\n operator.\n \"\"\"\n if not first != second:\n msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),\n safe_repr(second)))\n raise self.failureException(msg)\n\n def assertAlmostEqual(self, first, second, places=None, msg=None,\n delta=None):\n \"\"\"Fail if the two objects are unequal as determined by their\n difference rounded to the given number of decimal places\n (default 7) and comparing to zero, or by comparing that the\n between the two objects is more than the given delta.\n\n Note that decimal places (from zero) are usually not the same\n as significant digits (measured from the most signficant digit).\n\n If the two objects compare equal then they will automatically\n compare almost equal.\n \"\"\"\n if first == second:\n # shortcut\n return\n if delta is not None and places is not None:\n raise TypeError(\"specify delta or places not both\")\n\n if delta is not None:\n if abs(first - second) <= delta:\n return\n\n standardMsg = '%s != %s within %s delta' % (safe_repr(first),\n safe_repr(second),\n safe_repr(delta))\n else:\n if places is None:\n places = 7\n\n if round(abs(second-first), places) == 0:\n return\n\n standardMsg = '%s != %s within %r places' % (safe_repr(first),\n safe_repr(second),\n places)\n msg = self._formatMessage(msg, standardMsg)\n raise self.failureException(msg)\n\n def assertNotAlmostEqual(self, first, second, places=None, msg=None,\n delta=None):\n \"\"\"Fail if the two objects are equal as determined by their\n difference rounded to the given number of decimal places\n (default 7) and comparing to zero, or by comparing that the\n between the two objects is less than the given delta.\n\n Note that decimal places (from zero) are usually not the same\n as significant digits (measured from the most signficant digit).\n\n Objects that are equal automatically fail.\n \"\"\"\n if delta is not None and places is not None:\n raise TypeError(\"specify delta or places not both\")\n if delta is not None:\n if not (first == second) and abs(first - second) > delta:\n return\n standardMsg = '%s == %s within %s delta' % (safe_repr(first),\n safe_repr(second),\n safe_repr(delta))\n else:\n if places is None:\n places = 7\n if not (first == second) and round(abs(second-first), places) != 0:\n return\n standardMsg = '%s == %s within %r places' % (safe_repr(first),\n safe_repr(second),\n places)\n\n msg = self._formatMessage(msg, standardMsg)\n raise self.failureException(msg)\n\n\n def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):\n \"\"\"An equality assertion for ordered sequences (like lists and tuples).\n\n For the purposes of this function, a valid ordered sequence type is one\n which can be indexed, has a length, and has an equality operator.\n\n Args:\n seq1: The first sequence to compare.\n seq2: The second sequence to compare.\n seq_type: The expected datatype of the sequences, or None if no\n datatype should be enforced.\n msg: Optional message to use on failure instead of a list of\n differences.\n \"\"\"\n if seq_type is not None:\n seq_type_name = seq_type.__name__\n if not isinstance(seq1, seq_type):\n raise self.failureException('First sequence is not a %s: %s'\n % (seq_type_name, safe_repr(seq1)))\n if not isinstance(seq2, seq_type):\n raise self.failureException('Second sequence is not a %s: %s'\n % (seq_type_name, safe_repr(seq2)))\n else:\n seq_type_name = \"sequence\"\n\n differing = None\n try:\n len1 = len(seq1)\n except (TypeError, NotImplementedError):\n differing = 'First %s has no length. Non-sequence?' % (\n seq_type_name)\n\n if differing is None:\n try:\n len2 = len(seq2)\n except (TypeError, NotImplementedError):\n differing = 'Second %s has no length. Non-sequence?' % (\n seq_type_name)\n\n if differing is None:\n if seq1 == seq2:\n return\n\n seq1_repr = safe_repr(seq1)\n seq2_repr = safe_repr(seq2)\n if len(seq1_repr) > 30:\n seq1_repr = seq1_repr[:30] + '...'\n if len(seq2_repr) > 30:\n seq2_repr = seq2_repr[:30] + '...'\n elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)\n differing = '%ss differ: %s != %s\\n' % elements\n\n for i in range(min(len1, len2)):\n try:\n item1 = seq1[i]\n except (TypeError, IndexError, NotImplementedError):\n differing += ('\\nUnable to index element %d of first %s\\n' %\n (i, seq_type_name))\n break\n\n try:\n item2 = seq2[i]\n except (TypeError, IndexError, NotImplementedError):\n differing += ('\\nUnable to index element %d of second %s\\n' %\n (i, seq_type_name))\n break\n\n if item1 != item2:\n differing += ('\\nFirst differing element %d:\\n%s\\n%s\\n' %\n (i, item1, item2))\n break\n else:\n if (len1 == len2 and seq_type is None and\n type(seq1) != type(seq2)):\n # The sequences are the same, but have differing types.\n return\n\n if len1 > len2:\n differing += ('\\nFirst %s contains %d additional '\n 'elements.\\n' % (seq_type_name, len1 - len2))\n try:\n differing += ('First extra element %d:\\n%s\\n' %\n (len2, seq1[len2]))\n except (TypeError, IndexError, NotImplementedError):\n differing += ('Unable to index element %d '\n 'of first %s\\n' % (len2, seq_type_name))\n elif len1 < len2:\n differing += ('\\nSecond %s contains %d additional '\n 'elements.\\n' % (seq_type_name, len2 - len1))\n try:\n differing += ('First extra element %d:\\n%s\\n' %\n (len1, seq2[len1]))\n except (TypeError, IndexError, NotImplementedError):\n differing += ('Unable to index element %d '\n 'of second %s\\n' % (len1, seq_type_name))\n standardMsg = differing\n diffMsg = '\\n' + '\\n'.join(\n difflib.ndiff(pprint.pformat(seq1).splitlines(),\n pprint.pformat(seq2).splitlines()))\n\n standardMsg = self._truncateMessage(standardMsg, diffMsg)\n msg = self._formatMessage(msg, standardMsg)\n self.fail(msg)\n\n def _truncateMessage(self, message, diff):\n max_diff = self.maxDiff\n if max_diff is None or len(diff) <= max_diff:\n return message + diff\n return message + (DIFF_OMITTED % len(diff))\n\n def assertListEqual(self, list1, list2, msg=None):\n \"\"\"A list-specific equality assertion.\n\n Args:\n list1: The first list to compare.\n list2: The second list to compare.\n msg: Optional message to use on failure instead of a list of\n differences.\n\n \"\"\"\n self.assertSequenceEqual(list1, list2, msg, seq_type=list)\n\n def assertTupleEqual(self, tuple1, tuple2, msg=None):\n \"\"\"A tuple-specific equality assertion.\n\n Args:\n tuple1: The first tuple to compare.\n tuple2: The second tuple to compare.\n msg: Optional message to use on failure instead of a list of\n differences.\n \"\"\"\n self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)\n\n def assertSetEqual(self, set1, set2, msg=None):\n \"\"\"A set-specific equality assertion.\n\n Args:\n set1: The first set to compare.\n set2: The second set to compare.\n msg: Optional message to use on failure instead of a list of\n differences.\n\n assertSetEqual uses ducktyping to support different types of sets, and\n is optimized for sets specifically (parameters must support a\n difference method).\n \"\"\"\n try:\n difference1 = set1.difference(set2)\n except TypeError as e:\n self.fail('invalid type when attempting set difference: %s' % e)\n except AttributeError as e:\n self.fail('first argument does not support set difference: %s' % e)\n\n try:\n difference2 = set2.difference(set1)\n except TypeError as e:\n self.fail('invalid type when attempting set difference: %s' % e)\n except AttributeError as e:\n self.fail('second argument does not support set difference: %s' % e)\n\n if not (difference1 or difference2):\n return\n\n lines = []\n if difference1:\n lines.append('Items in the first set but not the second:')\n for item in difference1:\n lines.append(repr(item))\n if difference2:\n lines.append('Items in the second set but not the first:')\n for item in difference2:\n lines.append(repr(item))\n\n standardMsg = '\\n'.join(lines)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIn(self, member, container, msg=None):\n \"\"\"Just like self.assertTrue(a in b), but with a nicer default message.\"\"\"\n if member not in container:\n standardMsg = '%s not found in %s' % (safe_repr(member),\n safe_repr(container))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertNotIn(self, member, container, msg=None):\n \"\"\"Just like self.assertTrue(a not in b), but with a nicer default message.\"\"\"\n if member in container:\n standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),\n safe_repr(container))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIs(self, expr1, expr2, msg=None):\n \"\"\"Just like self.assertTrue(a is b), but with a nicer default message.\"\"\"\n if expr1 is not expr2:\n standardMsg = '%s is not %s' % (safe_repr(expr1),\n safe_repr(expr2))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIsNot(self, expr1, expr2, msg=None):\n \"\"\"Just like self.assertTrue(a is not b), but with a nicer default message.\"\"\"\n if expr1 is expr2:\n standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertDictEqual(self, d1, d2, msg=None):\n self.assertIsInstance(d1, dict, 'First argument is not a dictionary')\n self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')\n\n if d1 != d2:\n standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))\n diff = ('\\n' + '\\n'.join(difflib.ndiff(\n pprint.pformat(d1).splitlines(),\n pprint.pformat(d2).splitlines())))\n standardMsg = self._truncateMessage(standardMsg, diff)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertDictContainsSubset(self, subset, dictionary, msg=None):\n \"\"\"Checks whether dictionary is a superset of subset.\"\"\"\n warnings.warn('assertDictContainsSubset is deprecated',\n DeprecationWarning)\n missing = []\n mismatched = []\n for key, value in subset.items():\n if key not in dictionary:\n missing.append(key)\n elif value != dictionary[key]:\n mismatched.append('%s, expected: %s, actual: %s' %\n (safe_repr(key), safe_repr(value),\n safe_repr(dictionary[key])))\n\n if not (missing or mismatched):\n return\n\n standardMsg = ''\n if missing:\n standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in\n missing)\n if mismatched:\n if standardMsg:\n standardMsg += '; '\n standardMsg += 'Mismatched values: %s' % ','.join(mismatched)\n\n self.fail(self._formatMessage(msg, standardMsg))\n\n\n def assertCountEqual(self, first, second, msg=None):\n \"\"\"An unordered sequence comparison asserting that the same elements,\n regardless of order. If the same element occurs more than once,\n it verifies that the elements occur the same number of times.\n\n self.assertEqual(Counter(list(first)),\n Counter(list(second)))\n\n Example:\n - [0, 1, 1] and [1, 0, 1] compare equal.\n - [0, 0, 1] and [0, 1] compare unequal.\n\n \"\"\"\n first_seq, second_seq = list(first), list(second)\n try:\n first = collections.Counter(first_seq)\n second = collections.Counter(second_seq)\n except TypeError:\n # Handle case with unhashable elements\n differences = _count_diff_all_purpose(first_seq, second_seq)\n else:\n if first == second:\n return\n differences = _count_diff_hashable(first_seq, second_seq)\n\n if differences:\n standardMsg = 'Element counts were not equal:\\n'\n lines = ['First has %d, Second has %d: %r' % diff for diff in differences]\n diffMsg = '\\n'.join(lines)\n standardMsg = self._truncateMessage(standardMsg, diffMsg)\n msg = self._formatMessage(msg, standardMsg)\n self.fail(msg)\n\n def assertMultiLineEqual(self, first, second, msg=None):\n \"\"\"Assert that two multi-line strings are equal.\"\"\"\n self.assertIsInstance(first, str, 'First argument is not a string')\n self.assertIsInstance(second, str, 'Second argument is not a string')\n\n if first != second:\n # don't use difflib if the strings are too long\n if (len(first) > self._diffThreshold or\n len(second) > self._diffThreshold):\n self._baseAssertEqual(first, second, msg)\n firstlines = first.splitlines(keepends=True)\n secondlines = second.splitlines(keepends=True)\n if len(firstlines) == 1 and first.strip('\\r\\n') == first:\n firstlines = [first + '\\n']\n secondlines = [second + '\\n']\n standardMsg = '%s != %s' % (safe_repr(first, True),\n safe_repr(second, True))\n diff = '\\n' + ''.join(difflib.ndiff(firstlines, secondlines))\n standardMsg = self._truncateMessage(standardMsg, diff)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertLess(self, a, b, msg=None):\n \"\"\"Just like self.assertTrue(a < b), but with a nicer default message.\"\"\"\n if not a < b:\n standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertLessEqual(self, a, b, msg=None):\n \"\"\"Just like self.assertTrue(a <= b), but with a nicer default message.\"\"\"\n if not a <= b:\n standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertGreater(self, a, b, msg=None):\n \"\"\"Just like self.assertTrue(a > b), but with a nicer default message.\"\"\"\n if not a > b:\n standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertGreaterEqual(self, a, b, msg=None):\n \"\"\"Just like self.assertTrue(a >= b), but with a nicer default message.\"\"\"\n if not a >= b:\n standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIsNone(self, obj, msg=None):\n \"\"\"Same as self.assertTrue(obj is None), with a nicer default message.\"\"\"\n if obj is not None:\n standardMsg = '%s is not None' % (safe_repr(obj),)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIsNotNone(self, obj, msg=None):\n \"\"\"Included for symmetry with assertIsNone.\"\"\"\n if obj is None:\n standardMsg = 'unexpectedly None'\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertIsInstance(self, obj, cls, msg=None):\n \"\"\"Same as self.assertTrue(isinstance(obj, cls)), with a nicer\n default message.\"\"\"\n if not isinstance(obj, cls):\n standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertNotIsInstance(self, obj, cls, msg=None):\n \"\"\"Included for symmetry with assertIsInstance.\"\"\"\n if isinstance(obj, cls):\n standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def assertRaisesRegex(self, expected_exception, expected_regex,\n callable_obj=None, *args, **kwargs):\n \"\"\"Asserts that the message in a raised exception matches a regex.\n\n Args:\n expected_exception: Exception class expected to be raised.\n expected_regex: Regex (re pattern object or string) expected\n to be found in error message.\n callable_obj: Function to be called.\n msg: Optional message used in case of failure. Can only be used\n when assertRaisesRegex is used as a context manager.\n args: Extra args.\n kwargs: Extra kwargs.\n \"\"\"\n context = _AssertRaisesContext(expected_exception, self, callable_obj,\n expected_regex)\n\n return context.handle('assertRaisesRegex', callable_obj, args, kwargs)\n\n def assertWarnsRegex(self, expected_warning, expected_regex,\n callable_obj=None, *args, **kwargs):\n \"\"\"Asserts that the message in a triggered warning matches a regexp.\n Basic functioning is similar to assertWarns() with the addition\n that only warnings whose messages also match the regular expression\n are considered successful matches.\n\n Args:\n expected_warning: Warning class expected to be triggered.\n expected_regex: Regex (re pattern object or string) expected\n to be found in error message.\n callable_obj: Function to be called.\n msg: Optional message used in case of failure. Can only be used\n when assertWarnsRegex is used as a context manager.\n args: Extra args.\n kwargs: Extra kwargs.\n \"\"\"\n context = _AssertWarnsContext(expected_warning, self, callable_obj,\n expected_regex)\n return context.handle('assertWarnsRegex', callable_obj, args, kwargs)\n\n def assertRegex(self, text, expected_regex, msg=None):\n \"\"\"Fail the test unless the text matches the regular expression.\"\"\"\n if isinstance(expected_regex, (str, bytes)):\n assert expected_regex, \"expected_regex must not be empty.\"\n expected_regex = re.compile(expected_regex)\n if not expected_regex.search(text):\n msg = msg or \"Regex didn't match\"\n msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)\n raise self.failureException(msg)\n\n def assertNotRegex(self, text, unexpected_regex, msg=None):\n \"\"\"Fail the test if the text matches the regular expression.\"\"\"\n if isinstance(unexpected_regex, (str, bytes)):\n unexpected_regex = re.compile(unexpected_regex)\n match = unexpected_regex.search(text)\n if match:\n msg = msg or \"Regex matched\"\n msg = '%s: %r matches %r in %r' % (msg,\n text[match.start():match.end()],\n unexpected_regex.pattern,\n text)\n raise self.failureException(msg)\n\n\n def _deprecate(original_func):\n def deprecated_func(*args, **kwargs):\n warnings.warn(\n 'Please use {0} instead.'.format(original_func.__name__),\n DeprecationWarning, 2)\n return original_func(*args, **kwargs)\n return deprecated_func\n\n # see #9424\n failUnlessEqual = assertEquals = _deprecate(assertEqual)\n failIfEqual = assertNotEquals = _deprecate(assertNotEqual)\n failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)\n failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)\n failUnless = assert_ = _deprecate(assertTrue)\n failUnlessRaises = _deprecate(assertRaises)\n failIf = _deprecate(assertFalse)\n assertRaisesRegexp = _deprecate(assertRaisesRegex)\n assertRegexpMatches = _deprecate(assertRegex)\n\n\n\nclass FunctionTestCase(TestCase):\n \"\"\"A test case that wraps a test function.\n\n This is useful for slipping pre-existing test functions into the\n unittest framework. Optionally, set-up and tidy-up functions can be\n supplied. As with TestCase, the tidy-up ('tearDown') function will\n always be called if the set-up ('setUp') function ran successfully.\n \"\"\"\n\n def __init__(self, testFunc, setUp=None, tearDown=None, description=None):\n super(FunctionTestCase, self).__init__()\n self._setUpFunc = setUp\n self._tearDownFunc = tearDown\n self._testFunc = testFunc\n self._description = description\n\n def setUp(self):\n if self._setUpFunc is not None:\n self._setUpFunc()\n\n def tearDown(self):\n if self._tearDownFunc is not None:\n self._tearDownFunc()\n\n def runTest(self):\n self._testFunc()\n\n def id(self):\n return self._testFunc.__name__\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return self._setUpFunc == other._setUpFunc and \\\n self._tearDownFunc == other._tearDownFunc and \\\n self._testFunc == other._testFunc and \\\n self._description == other._description\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((type(self), self._setUpFunc, self._tearDownFunc,\n self._testFunc, self._description))\n\n def __str__(self):\n return \"%s (%s)\" % (strclass(self.__class__),\n self._testFunc.__name__)\n\n def __repr__(self):\n return \"<%s tec=%s>\" % (strclass(self.__class__),\n self._testFunc)\n\n def shortDescription(self):\n if self._description is not None:\n return self._description\n doc = self._testFunc.__doc__\n return doc and doc.split(\"\\n\")[0].strip() or None\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476125,"cells":{"repo_name":{"kind":"string","value":"jbassen/edx-platform"},"path":{"kind":"string","value":"common/test/acceptance/tests/lms/test_lms_edxnotes.py"},"copies":{"kind":"string","value":"84"},"size":{"kind":"string","value":"44359"},"content":{"kind":"string","value":"\"\"\"\nTest LMS Notes\n\"\"\"\nfrom uuid import uuid4\nfrom datetime import datetime\nfrom nose.plugins.attrib import attr\nfrom ..helpers import UniqueCourseTest\nfrom ...fixtures.course import CourseFixture, XBlockFixtureDesc\nfrom ...pages.lms.auto_auth import AutoAuthPage\nfrom ...pages.lms.course_nav import CourseNavPage\nfrom ...pages.lms.courseware import CoursewarePage\nfrom ...pages.lms.edxnotes import EdxNotesUnitPage, EdxNotesPage, EdxNotesPageNoContent\nfrom ...fixtures.edxnotes import EdxNotesFixture, Note, Range\nfrom ..helpers import EventsTestMixin\n\n\nclass EdxNotesTestMixin(UniqueCourseTest):\n \"\"\"\n Creates a course with initial data and contains useful helper methods.\n \"\"\"\n def setUp(self):\n \"\"\"\n Initialize pages and install a course fixture.\n \"\"\"\n super(EdxNotesTestMixin, self).setUp()\n self.courseware_page = CoursewarePage(self.browser, self.course_id)\n self.course_nav = CourseNavPage(self.browser)\n self.note_unit_page = EdxNotesUnitPage(self.browser, self.course_id)\n self.notes_page = EdxNotesPage(self.browser, self.course_id)\n\n self.username = str(uuid4().hex)[:5]\n self.email = \"{}@email.com\".format(self.username)\n\n self.selector = \"annotate-id\"\n self.edxnotes_fixture = EdxNotesFixture()\n self.course_fixture = CourseFixture(\n self.course_info[\"org\"], self.course_info[\"number\"],\n self.course_info[\"run\"], self.course_info[\"display_name\"]\n )\n\n self.course_fixture.add_advanced_settings({\n u\"edxnotes\": {u\"value\": True}\n })\n\n self.course_fixture.add_children(\n XBlockFixtureDesc(\"chapter\", \"Test Section 1\").add_children(\n XBlockFixtureDesc(\"sequential\", \"Test Subsection 1\").add_children(\n XBlockFixtureDesc(\"vertical\", \"Test Unit 1\").add_children(\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 1\",\n data=\"\"\"\n

Annotate this text!

\n

Annotate this text

\n \"\"\".format(self.selector)\n ),\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 2\",\n data=\"\"\"

Annotate this text!

\"\"\".format(self.selector)\n ),\n ),\n XBlockFixtureDesc(\"vertical\", \"Test Unit 2\").add_children(\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 3\",\n data=\"\"\"

Annotate this text!

\"\"\".format(self.selector)\n ),\n ),\n ),\n XBlockFixtureDesc(\"sequential\", \"Test Subsection 2\").add_children(\n XBlockFixtureDesc(\"vertical\", \"Test Unit 3\").add_children(\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 4\",\n data=\"\"\"\n

Annotate this text!

\n \"\"\".format(self.selector)\n ),\n ),\n ),\n ),\n XBlockFixtureDesc(\"chapter\", \"Test Section 2\").add_children(\n XBlockFixtureDesc(\"sequential\", \"Test Subsection 3\").add_children(\n XBlockFixtureDesc(\"vertical\", \"Test Unit 4\").add_children(\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 5\",\n data=\"\"\"\n

Annotate this text!

\n \"\"\".format(self.selector)\n ),\n XBlockFixtureDesc(\n \"html\",\n \"Test HTML 6\",\n data=\"\"\"

Annotate this text!

\"\"\".format(self.selector)\n ),\n ),\n ),\n )).install()\n\n self.addCleanup(self.edxnotes_fixture.cleanup)\n\n AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()\n\n def _add_notes(self):\n xblocks = self.course_fixture.get_nested_xblocks(category=\"html\")\n notes_list = []\n for index, xblock in enumerate(xblocks):\n notes_list.append(\n Note(\n user=self.username,\n usage_id=xblock.locator,\n course_id=self.course_fixture._course_key,\n ranges=[Range(startOffset=index, endOffset=index + 5)]\n )\n )\n\n self.edxnotes_fixture.create_notes(notes_list)\n self.edxnotes_fixture.install()\n\n\n@attr('shard_4')\nclass EdxNotesDefaultInteractionsTest(EdxNotesTestMixin):\n \"\"\"\n Tests for creation, editing, deleting annotations inside annotatable components in LMS.\n \"\"\"\n def create_notes(self, components, offset=0):\n self.assertGreater(len(components), 0)\n index = offset\n for component in components:\n for note in component.create_note(\".{}\".format(self.selector)):\n note.text = \"TEST TEXT {}\".format(index)\n index += 1\n\n def edit_notes(self, components, offset=0):\n self.assertGreater(len(components), 0)\n index = offset\n for component in components:\n self.assertGreater(len(component.notes), 0)\n for note in component.edit_note():\n note.text = \"TEST TEXT {}\".format(index)\n index += 1\n\n def edit_tags_in_notes(self, components, tags):\n self.assertGreater(len(components), 0)\n index = 0\n for component in components:\n self.assertGreater(len(component.notes), 0)\n for note in component.edit_note():\n note.tags = tags[index]\n index += 1\n self.assertEqual(index, len(tags), \"Number of supplied tags did not match components\")\n\n def remove_notes(self, components):\n self.assertGreater(len(components), 0)\n for component in components:\n self.assertGreater(len(component.notes), 0)\n component.remove_note()\n\n def assert_notes_are_removed(self, components):\n for component in components:\n self.assertEqual(0, len(component.notes))\n\n def assert_text_in_notes(self, notes):\n actual = [note.text for note in notes]\n expected = [\"TEST TEXT {}\".format(i) for i in xrange(len(notes))]\n self.assertEqual(expected, actual)\n\n def assert_tags_in_notes(self, notes, expected_tags):\n actual = [note.tags for note in notes]\n expected = [expected_tags[i] for i in xrange(len(notes))]\n self.assertEqual(expected, actual)\n\n def test_can_create_notes(self):\n \"\"\"\n Scenario: User can create notes.\n Given I have a course with 3 annotatable components\n And I open the unit with 2 annotatable components\n When I add 2 notes for the first component and 1 note for the second\n Then I see that notes were correctly created\n When I change sequential position to \"2\"\n And I add note for the annotatable component on the page\n Then I see that note was correctly created\n When I refresh the page\n Then I see that note was correctly stored\n When I change sequential position to \"1\"\n Then I see that notes were correctly stored on the page\n \"\"\"\n self.note_unit_page.visit()\n\n components = self.note_unit_page.components\n self.create_notes(components)\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n self.course_nav.go_to_sequential_position(2)\n components = self.note_unit_page.components\n self.create_notes(components)\n\n components = self.note_unit_page.refresh()\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n self.course_nav.go_to_sequential_position(1)\n components = self.note_unit_page.components\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n def test_can_edit_notes(self):\n \"\"\"\n Scenario: User can edit notes.\n Given I have a course with 3 components with notes\n And I open the unit with 2 annotatable components\n When I change text in the notes\n Then I see that notes were correctly changed\n When I change sequential position to \"2\"\n And I change the note on the page\n Then I see that note was correctly changed\n When I refresh the page\n Then I see that edited note was correctly stored\n When I change sequential position to \"1\"\n Then I see that edited notes were correctly stored on the page\n \"\"\"\n self._add_notes()\n self.note_unit_page.visit()\n\n components = self.note_unit_page.components\n self.edit_notes(components)\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n self.course_nav.go_to_sequential_position(2)\n components = self.note_unit_page.components\n self.edit_notes(components)\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n components = self.note_unit_page.refresh()\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n self.course_nav.go_to_sequential_position(1)\n components = self.note_unit_page.components\n self.assert_text_in_notes(self.note_unit_page.notes)\n\n def test_can_delete_notes(self):\n \"\"\"\n Scenario: User can delete notes.\n Given I have a course with 3 components with notes\n And I open the unit with 2 annotatable components\n When I remove all notes on the page\n Then I do not see any notes on the page\n When I change sequential position to \"2\"\n And I remove all notes on the page\n Then I do not see any notes on the page\n When I refresh the page\n Then I do not see any notes on the page\n When I change sequential position to \"1\"\n Then I do not see any notes on the page\n \"\"\"\n self._add_notes()\n self.note_unit_page.visit()\n\n components = self.note_unit_page.components\n self.remove_notes(components)\n self.assert_notes_are_removed(components)\n\n self.course_nav.go_to_sequential_position(2)\n components = self.note_unit_page.components\n self.remove_notes(components)\n self.assert_notes_are_removed(components)\n\n components = self.note_unit_page.refresh()\n self.assert_notes_are_removed(components)\n\n self.course_nav.go_to_sequential_position(1)\n components = self.note_unit_page.components\n self.assert_notes_are_removed(components)\n\n def test_can_create_note_with_tags(self):\n \"\"\"\n Scenario: a user of notes can define one with tags\n Given I have a course with 3 annotatable components\n And I open the unit with 2 annotatable components\n When I add a note with tags for the first component\n And I refresh the page\n Then I see that note was correctly stored with its tags\n \"\"\"\n self.note_unit_page.visit()\n\n components = self.note_unit_page.components\n for note in components[0].create_note(\".{}\".format(self.selector)):\n note.tags = [\"fruit\", \"tasty\"]\n\n self.note_unit_page.refresh()\n self.assertEqual([\"fruit\", \"tasty\"], self.note_unit_page.notes[0].tags)\n\n def test_can_change_tags(self):\n \"\"\"\n Scenario: a user of notes can edit tags on notes\n Given I have a course with 3 components with notes\n When I open the unit with 2 annotatable components\n And I edit tags on the notes for the 2 annotatable components\n Then I see that the tags were correctly changed\n And I again edit tags on the notes for the 2 annotatable components\n And I refresh the page\n Then I see that the tags were correctly changed\n \"\"\"\n self._add_notes()\n self.note_unit_page.visit()\n\n components = self.note_unit_page.components\n self.edit_tags_in_notes(components, [[\"hard\"], [\"apple\", \"pear\"]])\n self.assert_tags_in_notes(self.note_unit_page.notes, [[\"hard\"], [\"apple\", \"pear\"]])\n\n self.edit_tags_in_notes(components, [[], [\"avocado\"]])\n self.assert_tags_in_notes(self.note_unit_page.notes, [[], [\"avocado\"]])\n\n self.note_unit_page.refresh()\n self.assert_tags_in_notes(self.note_unit_page.notes, [[], [\"avocado\"]])\n\n def test_sr_labels(self):\n \"\"\"\n Scenario: screen reader labels exist for text and tags fields\n Given I have a course with 3 components with notes\n When I open the unit with 2 annotatable components\n And I open the editor for each note\n Then the text and tags fields both have screen reader labels\n \"\"\"\n self._add_notes()\n self.note_unit_page.visit()\n\n # First note is in the first annotatable component, will have field indexes 0 and 1.\n for note in self.note_unit_page.components[0].edit_note():\n self.assertTrue(note.has_sr_label(0, 0, \"Note\"))\n self.assertTrue(note.has_sr_label(1, 1, \"Tags (space-separated)\"))\n\n # Second note is in the second annotatable component, will have field indexes 2 and 3.\n for note in self.note_unit_page.components[1].edit_note():\n self.assertTrue(note.has_sr_label(0, 2, \"Note\"))\n self.assertTrue(note.has_sr_label(1, 3, \"Tags (space-separated)\"))\n\n\n@attr('shard_4')\nclass EdxNotesPageTest(EventsTestMixin, EdxNotesTestMixin):\n \"\"\"\n Tests for Notes page.\n \"\"\"\n def _add_notes(self, notes_list):\n self.edxnotes_fixture.create_notes(notes_list)\n self.edxnotes_fixture.install()\n\n def _add_default_notes(self, tags=None):\n \"\"\"\n Creates 5 test notes. If tags are not specified, will populate the notes with some test tag data.\n If tags are specified, they will be used for each of the 3 notes that have tags.\n \"\"\"\n xblocks = self.course_fixture.get_nested_xblocks(category=\"html\")\n # pylint: disable=attribute-defined-outside-init\n self.raw_note_list = [\n Note(\n usage_id=xblocks[4].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"First note\",\n quote=\"Annotate this text\",\n updated=datetime(2011, 1, 1, 1, 1, 1, 1).isoformat(),\n ),\n Note(\n usage_id=xblocks[2].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"\",\n quote=u\"Annotate this text\",\n updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),\n tags=[\"Review\", \"cool\"] if tags is None else tags\n ),\n Note(\n usage_id=xblocks[0].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"Third note\",\n quote=\"Annotate this text\",\n updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),\n ranges=[Range(startOffset=0, endOffset=18)],\n tags=[\"Cool\", \"TODO\"] if tags is None else tags\n ),\n Note(\n usage_id=xblocks[3].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"Fourth note\",\n quote=\"\",\n updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),\n tags=[\"review\"] if tags is None else tags\n ),\n Note(\n usage_id=xblocks[1].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"Fifth note\",\n quote=\"Annotate this text\",\n updated=datetime(2015, 1, 1, 1, 1, 1, 1).isoformat()\n ),\n ]\n self._add_notes(self.raw_note_list)\n\n def assertNoteContent(self, item, text=None, quote=None, unit_name=None, time_updated=None, tags=None):\n \"\"\" Verifies the expected properties of the note. \"\"\"\n self.assertEqual(text, item.text)\n if item.quote is not None:\n self.assertIn(quote, item.quote)\n else:\n self.assertIsNone(quote)\n self.assertEqual(unit_name, item.unit_name)\n self.assertEqual(time_updated, item.time_updated)\n self.assertEqual(tags, item.tags)\n\n def assertChapterContent(self, item, title=None, subtitles=None):\n \"\"\"\n Verifies the expected title and subsection titles (subtitles) for the given chapter.\n \"\"\"\n self.assertEqual(item.title, title)\n self.assertEqual(item.subtitles, subtitles)\n\n def assertGroupContent(self, item, title=None, notes=None):\n \"\"\"\n Verifies the expected title and child notes for the given group.\n \"\"\"\n self.assertEqual(item.title, title)\n self.assertEqual(item.notes, notes)\n\n def assert_viewed_event(self, view=None):\n \"\"\"\n Verifies that the correct view event was captured for the Notes page.\n \"\"\"\n # There will always be an initial event for \"Recent Activity\" because that is the default view.\n # If view is something besides \"Recent Activity\", expect 2 events, with the second one being\n # the view name passed in.\n if view == 'Recent Activity':\n view = None\n actual_events = self.wait_for_events(\n event_filter={'event_type': 'edx.course.student_notes.notes_page_viewed'},\n number_of_matches=1 if view is None else 2\n )\n expected_events = [{'event': {'view': 'Recent Activity'}}]\n if view:\n expected_events.append({'event': {'view': view}})\n self.assert_events_match(expected_events, actual_events)\n\n def assert_unit_link_event(self, usage_id, view):\n \"\"\"\n Verifies that the correct used_unit_link event was captured for the Notes page.\n \"\"\"\n actual_events = self.wait_for_events(\n event_filter={'event_type': 'edx.course.student_notes.used_unit_link'},\n number_of_matches=1\n )\n expected_events = [\n {'event': {'component_usage_id': usage_id, 'view': view}}\n ]\n self.assert_events_match(expected_events, actual_events)\n\n def assert_search_event(self, search_string, number_of_results):\n \"\"\"\n Verifies that the correct searched event was captured for the Notes page.\n \"\"\"\n actual_events = self.wait_for_events(\n event_filter={'event_type': 'edx.course.student_notes.searched'},\n number_of_matches=1\n )\n expected_events = [\n {'event': {'search_string': search_string, 'number_of_results': number_of_results}}\n ]\n self.assert_events_match(expected_events, actual_events)\n\n def test_no_content(self):\n \"\"\"\n Scenario: User can see `No content` message.\n Given I have a course without notes\n When I open Notes page\n Then I see only \"You do not have any notes within the course.\" message\n \"\"\"\n notes_page_empty = EdxNotesPageNoContent(self.browser, self.course_id)\n notes_page_empty.visit()\n self.assertIn(\n \"You have not made any notes in this course yet. Other students in this course are using notes to:\",\n notes_page_empty.no_content_text)\n\n def test_recent_activity_view(self):\n \"\"\"\n Scenario: User can view all notes by recent activity.\n Given I have a course with 5 notes\n When I open Notes page\n Then I see 5 notes sorted by the updated date\n And I see correct content in the notes\n And an event has fired indicating that the Recent Activity view was selected\n \"\"\"\n self._add_default_notes()\n self.notes_page.visit()\n notes = self.notes_page.notes\n self.assertEqual(len(notes), 5)\n\n self.assertNoteContent(\n notes[0],\n quote=u\"Annotate this text\",\n text=u\"Fifth note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2015 at 01:01 UTC\"\n )\n\n self.assertNoteContent(\n notes[1],\n text=u\"Fourth note\",\n unit_name=\"Test Unit 3\",\n time_updated=\"Jan 01, 2014 at 01:01 UTC\",\n tags=[\"review\"]\n )\n\n self.assertNoteContent(\n notes[2],\n quote=\"Annotate this text\",\n text=u\"Third note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2013 at 01:01 UTC\",\n tags=[\"Cool\", \"TODO\"]\n )\n\n self.assertNoteContent(\n notes[3],\n quote=u\"Annotate this text\",\n unit_name=\"Test Unit 2\",\n time_updated=\"Jan 01, 2012 at 01:01 UTC\",\n tags=[\"Review\", \"cool\"]\n )\n\n self.assertNoteContent(\n notes[4],\n quote=u\"Annotate this text\",\n text=u\"First note\",\n unit_name=\"Test Unit 4\",\n time_updated=\"Jan 01, 2011 at 01:01 UTC\"\n )\n\n self.assert_viewed_event()\n\n def test_course_structure_view(self):\n \"\"\"\n Scenario: User can view all notes by location in Course.\n Given I have a course with 5 notes\n When I open Notes page\n And I switch to \"Location in Course\" view\n Then I see 2 groups, 3 sections and 5 notes\n And I see correct content in the notes and groups\n And an event has fired indicating that the Location in Course view was selected\n \"\"\"\n self._add_default_notes()\n self.notes_page.visit().switch_to_tab(\"structure\")\n\n notes = self.notes_page.notes\n groups = self.notes_page.chapter_groups\n sections = self.notes_page.subsection_groups\n self.assertEqual(len(notes), 5)\n self.assertEqual(len(groups), 2)\n self.assertEqual(len(sections), 3)\n\n self.assertChapterContent(\n groups[0],\n title=u\"Test Section 1\",\n subtitles=[u\"Test Subsection 1\", u\"Test Subsection 2\"]\n )\n\n self.assertGroupContent(\n sections[0],\n title=u\"Test Subsection 1\",\n notes=[u\"Fifth note\", u\"Third note\", None]\n )\n\n self.assertNoteContent(\n notes[0],\n quote=u\"Annotate this text\",\n text=u\"Fifth note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2015 at 01:01 UTC\"\n )\n\n self.assertNoteContent(\n notes[1],\n quote=u\"Annotate this text\",\n text=u\"Third note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2013 at 01:01 UTC\",\n tags=[\"Cool\", \"TODO\"]\n )\n\n self.assertNoteContent(\n notes[2],\n quote=u\"Annotate this text\",\n unit_name=\"Test Unit 2\",\n time_updated=\"Jan 01, 2012 at 01:01 UTC\",\n tags=[\"Review\", \"cool\"]\n )\n\n self.assertGroupContent(\n sections[1],\n title=u\"Test Subsection 2\",\n notes=[u\"Fourth note\"]\n )\n\n self.assertNoteContent(\n notes[3],\n text=u\"Fourth note\",\n unit_name=\"Test Unit 3\",\n time_updated=\"Jan 01, 2014 at 01:01 UTC\",\n tags=[\"review\"]\n )\n\n self.assertChapterContent(\n groups[1],\n title=u\"Test Section 2\",\n subtitles=[u\"Test Subsection 3\"],\n )\n\n self.assertGroupContent(\n sections[2],\n title=u\"Test Subsection 3\",\n notes=[u\"First note\"]\n )\n\n self.assertNoteContent(\n notes[4],\n quote=u\"Annotate this text\",\n text=u\"First note\",\n unit_name=\"Test Unit 4\",\n time_updated=\"Jan 01, 2011 at 01:01 UTC\"\n )\n\n self.assert_viewed_event('Location in Course')\n\n def test_tags_view(self):\n \"\"\"\n Scenario: User can view all notes by associated tags.\n Given I have a course with 5 notes and I am viewing the Notes page\n When I switch to the \"Tags\" view\n Then I see 4 tag groups\n And I see correct content in the notes and groups\n And an event has fired indicating that the Tags view was selected\n \"\"\"\n self._add_default_notes()\n self.notes_page.visit().switch_to_tab(\"tags\")\n\n notes = self.notes_page.notes\n groups = self.notes_page.tag_groups\n self.assertEqual(len(notes), 7)\n self.assertEqual(len(groups), 4)\n\n # Tag group \"cool\"\n self.assertGroupContent(\n groups[0],\n title=u\"cool (2)\",\n notes=[u\"Third note\", None]\n )\n\n self.assertNoteContent(\n notes[0],\n quote=u\"Annotate this text\",\n text=u\"Third note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2013 at 01:01 UTC\",\n tags=[\"Cool\", \"TODO\"]\n )\n\n self.assertNoteContent(\n notes[1],\n quote=u\"Annotate this text\",\n unit_name=\"Test Unit 2\",\n time_updated=\"Jan 01, 2012 at 01:01 UTC\",\n tags=[\"Review\", \"cool\"]\n )\n\n # Tag group \"review\"\n self.assertGroupContent(\n groups[1],\n title=u\"review (2)\",\n notes=[u\"Fourth note\", None]\n )\n\n self.assertNoteContent(\n notes[2],\n text=u\"Fourth note\",\n unit_name=\"Test Unit 3\",\n time_updated=\"Jan 01, 2014 at 01:01 UTC\",\n tags=[\"review\"]\n )\n\n self.assertNoteContent(\n notes[3],\n quote=u\"Annotate this text\",\n unit_name=\"Test Unit 2\",\n time_updated=\"Jan 01, 2012 at 01:01 UTC\",\n tags=[\"Review\", \"cool\"]\n )\n\n # Tag group \"todo\"\n self.assertGroupContent(\n groups[2],\n title=u\"todo (1)\",\n notes=[\"Third note\"]\n )\n\n self.assertNoteContent(\n notes[4],\n quote=u\"Annotate this text\",\n text=u\"Third note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2013 at 01:01 UTC\",\n tags=[\"Cool\", \"TODO\"]\n )\n\n # Notes with no tags\n self.assertGroupContent(\n groups[3],\n title=u\"[no tags] (2)\",\n notes=[\"Fifth note\", \"First note\"]\n )\n\n self.assertNoteContent(\n notes[5],\n quote=u\"Annotate this text\",\n text=u\"Fifth note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2015 at 01:01 UTC\"\n )\n\n self.assertNoteContent(\n notes[6],\n quote=u\"Annotate this text\",\n text=u\"First note\",\n unit_name=\"Test Unit 4\",\n time_updated=\"Jan 01, 2011 at 01:01 UTC\"\n )\n\n self.assert_viewed_event('Tags')\n\n def test_easy_access_from_notes_page(self):\n \"\"\"\n Scenario: Ensure that the link to the Unit works correctly.\n Given I have a course with 5 notes\n When I open Notes page\n And I click on the first unit link\n Then I see correct text on the unit page and a unit link event was fired\n When go back to the Notes page\n And I switch to \"Location in Course\" view\n And I click on the second unit link\n Then I see correct text on the unit page and a unit link event was fired\n When go back to the Notes page\n And I switch to \"Tags\" view\n And I click on the first unit link\n Then I see correct text on the unit page and a unit link event was fired\n When go back to the Notes page\n And I run the search with \"Fifth\" query\n And I click on the first unit link\n Then I see correct text on the unit page and a unit link event was fired\n \"\"\"\n def assert_page(note, usage_id, view):\n \"\"\" Verify that clicking on the unit link works properly. \"\"\"\n quote = note.quote\n note.go_to_unit()\n self.courseware_page.wait_for_page()\n self.assertIn(quote, self.courseware_page.xblock_component_html_content())\n self.assert_unit_link_event(usage_id, view)\n self.reset_event_tracking()\n\n self._add_default_notes()\n self.notes_page.visit()\n note = self.notes_page.notes[0]\n assert_page(note, self.raw_note_list[4]['usage_id'], \"Recent Activity\")\n\n self.notes_page.visit().switch_to_tab(\"structure\")\n note = self.notes_page.notes[1]\n assert_page(note, self.raw_note_list[2]['usage_id'], \"Location in Course\")\n\n self.notes_page.visit().switch_to_tab(\"tags\")\n note = self.notes_page.notes[0]\n assert_page(note, self.raw_note_list[2]['usage_id'], \"Tags\")\n\n self.notes_page.visit().search(\"Fifth\")\n note = self.notes_page.notes[0]\n assert_page(note, self.raw_note_list[4]['usage_id'], \"Search Results\")\n\n def test_search_behaves_correctly(self):\n \"\"\"\n Scenario: Searching behaves correctly.\n Given I have a course with 5 notes\n When I open Notes page\n When I run the search with \" \" query\n Then I see the following error message \"Please enter a term in the search field.\"\n And I do not see \"Search Results\" tab\n When I run the search with \"note\" query\n Then I see that error message disappears\n And I see that \"Search Results\" tab appears with 4 notes found\n And an event has fired indicating that the Search Results view was selected\n And an event has fired recording the search that was performed\n \"\"\"\n self._add_default_notes()\n self.notes_page.visit()\n # Run the search with whitespaces only\n self.notes_page.search(\" \")\n # Displays error message\n self.assertTrue(self.notes_page.is_error_visible)\n self.assertEqual(self.notes_page.error_text, u\"Please enter a term in the search field.\")\n # Search results tab does not appear\n self.assertNotIn(u\"Search Results\", self.notes_page.tabs)\n # Run the search with correct query\n self.notes_page.search(\"note\")\n # Error message disappears\n self.assertFalse(self.notes_page.is_error_visible)\n self.assertIn(u\"Search Results\", self.notes_page.tabs)\n notes = self.notes_page.notes\n self.assertEqual(len(notes), 4)\n\n self.assertNoteContent(\n notes[0],\n quote=u\"Annotate this text\",\n text=u\"Fifth note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2015 at 01:01 UTC\"\n )\n\n self.assertNoteContent(\n notes[1],\n text=u\"Fourth note\",\n unit_name=\"Test Unit 3\",\n time_updated=\"Jan 01, 2014 at 01:01 UTC\",\n tags=[\"review\"]\n )\n\n self.assertNoteContent(\n notes[2],\n quote=\"Annotate this text\",\n text=u\"Third note\",\n unit_name=\"Test Unit 1\",\n time_updated=\"Jan 01, 2013 at 01:01 UTC\",\n tags=[\"Cool\", \"TODO\"]\n )\n\n self.assertNoteContent(\n notes[3],\n quote=u\"Annotate this text\",\n text=u\"First note\",\n unit_name=\"Test Unit 4\",\n time_updated=\"Jan 01, 2011 at 01:01 UTC\"\n )\n\n self.assert_viewed_event('Search Results')\n self.assert_search_event('note', 4)\n\n def test_scroll_to_tag_recent_activity(self):\n \"\"\"\n Scenario: Can scroll to a tag group from the Recent Activity view (default view)\n Given I have a course with 5 notes and I open the Notes page\n When I click on a tag associated with a note\n Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag\n \"\"\"\n self._add_default_notes([\"apple\", \"banana\", \"kiwi\", \"pear\", \"pumpkin\", \"squash\", \"zucchini\"])\n self.notes_page.visit()\n self._scroll_to_tag_and_verify(\"pear\", 3)\n\n def test_scroll_to_tag_course_structure(self):\n \"\"\"\n Scenario: Can scroll to a tag group from the Course Structure view\n Given I have a course with 5 notes and I open the Notes page and select the Course Structure view\n When I click on a tag associated with a note\n Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag\n \"\"\"\n self._add_default_notes([\"apple\", \"banana\", \"kiwi\", \"pear\", \"pumpkin\", \"squash\", \"zucchini\"])\n self.notes_page.visit().switch_to_tab(\"structure\")\n self._scroll_to_tag_and_verify(\"squash\", 5)\n\n def test_scroll_to_tag_search(self):\n \"\"\"\n Scenario: Can scroll to a tag group from the Search Results view\n Given I have a course with 5 notes and I open the Notes page and perform a search\n Then the Search view tab opens and gets focus\n And when I click on a tag associated with a note\n Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag\n \"\"\"\n self._add_default_notes([\"apple\", \"banana\", \"kiwi\", \"pear\", \"pumpkin\", \"squash\", \"zucchini\"])\n self.notes_page.visit().search(\"note\")\n self._scroll_to_tag_and_verify(\"pumpkin\", 4)\n\n def test_scroll_to_tag_from_tag_view(self):\n \"\"\"\n Scenario: Can scroll to a tag group from the Tags view\n Given I have a course with 5 notes and I open the Notes page and select the Tag view\n When I click on a tag associated with a note\n Then I scroll to the section of notes associated with that tag\n \"\"\"\n self._add_default_notes([\"apple\", \"banana\", \"kiwi\", \"pear\", \"pumpkin\", \"squash\", \"zucchini\"])\n self.notes_page.visit().switch_to_tab(\"tags\")\n self._scroll_to_tag_and_verify(\"kiwi\", 2)\n\n def _scroll_to_tag_and_verify(self, tag_name, group_index):\n \"\"\" Helper method for all scroll to tag tests \"\"\"\n self.notes_page.notes[1].go_to_tag(tag_name)\n\n # Because all the notes (with tags) have the same tags, they will end up ordered alphabetically.\n pear_group = self.notes_page.tag_groups[group_index]\n self.assertEqual(tag_name + \" (3)\", pear_group.title)\n self.assertTrue(pear_group.scrolled_to_top(group_index))\n\n def test_tabs_behaves_correctly(self):\n \"\"\"\n Scenario: Tabs behaves correctly.\n Given I have a course with 5 notes\n When I open Notes page\n Then I see only \"Recent Activity\", \"Location in Course\", and \"Tags\" tabs\n When I run the search with \"note\" query\n And I see that \"Search Results\" tab appears with 4 notes found\n Then I switch to \"Recent Activity\" tab\n And I see all 5 notes\n Then I switch to \"Location in Course\" tab\n And I see all 2 groups and 5 notes\n When I switch back to \"Search Results\" tab\n Then I can still see 4 notes found\n When I close \"Search Results\" tab\n Then I see that \"Recent Activity\" tab becomes active\n And \"Search Results\" tab disappears\n And I see all 5 notes\n \"\"\"\n self._add_default_notes()\n self.notes_page.visit()\n\n # We're on Recent Activity tab.\n self.assertEqual(len(self.notes_page.tabs), 3)\n self.assertEqual([u\"Recent Activity\", u\"Location in Course\", u\"Tags\"], self.notes_page.tabs)\n self.notes_page.search(\"note\")\n # We're on Search Results tab\n self.assertEqual(len(self.notes_page.tabs), 4)\n self.assertIn(u\"Search Results\", self.notes_page.tabs)\n self.assertEqual(len(self.notes_page.notes), 4)\n # We can switch on Recent Activity tab and back.\n self.notes_page.switch_to_tab(\"recent\")\n self.assertEqual(len(self.notes_page.notes), 5)\n self.notes_page.switch_to_tab(\"structure\")\n self.assertEqual(len(self.notes_page.chapter_groups), 2)\n self.assertEqual(len(self.notes_page.notes), 5)\n self.notes_page.switch_to_tab(\"search\")\n self.assertEqual(len(self.notes_page.notes), 4)\n # Can close search results page\n self.notes_page.close_tab()\n self.assertEqual(len(self.notes_page.tabs), 3)\n self.assertNotIn(u\"Search Results\", self.notes_page.tabs)\n self.assertEqual(len(self.notes_page.notes), 5)\n\n def test_open_note_when_accessed_from_notes_page(self):\n \"\"\"\n Scenario: Ensure that the link to the Unit opens a note only once.\n Given I have a course with 2 sequentials that contain respectively one note and two notes\n When I open Notes page\n And I click on the first unit link\n Then I see the note opened on the unit page\n When I switch to the second sequential\n I do not see any note opened\n When I switch back to first sequential\n I do not see any note opened\n \"\"\"\n xblocks = self.course_fixture.get_nested_xblocks(category=\"html\")\n self._add_notes([\n Note(\n usage_id=xblocks[1].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"Third note\",\n quote=\"Annotate this text\",\n updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),\n ranges=[Range(startOffset=0, endOffset=19)],\n ),\n Note(\n usage_id=xblocks[2].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"Second note\",\n quote=\"Annotate this text\",\n updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),\n ranges=[Range(startOffset=0, endOffset=19)],\n ),\n Note(\n usage_id=xblocks[0].locator,\n user=self.username,\n course_id=self.course_fixture._course_key,\n text=\"First note\",\n quote=\"Annotate this text\",\n updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),\n ranges=[Range(startOffset=0, endOffset=19)],\n ),\n ])\n self.notes_page.visit()\n item = self.notes_page.notes[0]\n item.go_to_unit()\n self.courseware_page.wait_for_page()\n note = self.note_unit_page.notes[0]\n self.assertTrue(note.is_visible)\n note = self.note_unit_page.notes[1]\n self.assertFalse(note.is_visible)\n self.course_nav.go_to_sequential_position(2)\n note = self.note_unit_page.notes[0]\n self.assertFalse(note.is_visible)\n self.course_nav.go_to_sequential_position(1)\n note = self.note_unit_page.notes[0]\n self.assertFalse(note.is_visible)\n\n\n@attr('shard_4')\nclass EdxNotesToggleSingleNoteTest(EdxNotesTestMixin):\n \"\"\"\n Tests for toggling single annotation.\n \"\"\"\n\n def setUp(self):\n super(EdxNotesToggleSingleNoteTest, self).setUp()\n self._add_notes()\n self.note_unit_page.visit()\n\n def test_can_toggle_by_clicking_on_highlighted_text(self):\n \"\"\"\n Scenario: User can toggle a single note by clicking on highlighted text.\n Given I have a course with components with notes\n When I click on highlighted text\n And I move mouse out of the note\n Then I see that the note is still shown\n When I click outside the note\n Then I see the the note is closed\n \"\"\"\n note = self.note_unit_page.notes[0]\n\n note.click_on_highlight()\n self.note_unit_page.move_mouse_to(\"body\")\n self.assertTrue(note.is_visible)\n self.note_unit_page.click(\"body\")\n self.assertFalse(note.is_visible)\n\n def test_can_toggle_by_clicking_on_the_note(self):\n \"\"\"\n Scenario: User can toggle a single note by clicking on the note.\n Given I have a course with components with notes\n When I click on the note\n And I move mouse out of the note\n Then I see that the note is still shown\n When I click outside the note\n Then I see the the note is closed\n \"\"\"\n note = self.note_unit_page.notes[0]\n\n note.show().click_on_viewer()\n self.note_unit_page.move_mouse_to(\"body\")\n self.assertTrue(note.is_visible)\n self.note_unit_page.click(\"body\")\n self.assertFalse(note.is_visible)\n\n def test_interaction_between_notes(self):\n \"\"\"\n Scenario: Interactions between notes works well.\n Given I have a course with components with notes\n When I click on highlighted text in the first component\n And I move mouse out of the note\n Then I see that the note is still shown\n When I click on highlighted text in the second component\n Then I see that the new note is shown\n \"\"\"\n note_1 = self.note_unit_page.notes[0]\n note_2 = self.note_unit_page.notes[1]\n\n note_1.click_on_highlight()\n self.note_unit_page.move_mouse_to(\"body\")\n self.assertTrue(note_1.is_visible)\n\n note_2.click_on_highlight()\n self.assertFalse(note_1.is_visible)\n self.assertTrue(note_2.is_visible)\n\n\n@attr('shard_4')\nclass EdxNotesToggleNotesTest(EdxNotesTestMixin):\n \"\"\"\n Tests for toggling visibility of all notes.\n \"\"\"\n\n def setUp(self):\n super(EdxNotesToggleNotesTest, self).setUp()\n self._add_notes()\n self.note_unit_page.visit()\n\n def test_can_disable_all_notes(self):\n \"\"\"\n Scenario: User can disable all notes.\n Given I have a course with components with notes\n And I open the unit with annotatable components\n When I click on \"Show notes\" checkbox\n Then I do not see any notes on the sequential position\n When I change sequential position to \"2\"\n Then I still do not see any notes on the sequential position\n When I go to \"Test Subsection 2\" subsection\n Then I do not see any notes on the subsection\n \"\"\"\n # Disable all notes\n self.note_unit_page.toggle_visibility()\n self.assertEqual(len(self.note_unit_page.notes), 0)\n self.course_nav.go_to_sequential_position(2)\n self.assertEqual(len(self.note_unit_page.notes), 0)\n self.course_nav.go_to_section(u\"Test Section 1\", u\"Test Subsection 2\")\n self.assertEqual(len(self.note_unit_page.notes), 0)\n\n def test_can_reenable_all_notes(self):\n \"\"\"\n Scenario: User can toggle notes visibility.\n Given I have a course with components with notes\n And I open the unit with annotatable components\n When I click on \"Show notes\" checkbox\n Then I do not see any notes on the sequential position\n When I click on \"Show notes\" checkbox again\n Then I see that all notes appear\n When I change sequential position to \"2\"\n Then I still can see all notes on the sequential position\n When I go to \"Test Subsection 2\" subsection\n Then I can see all notes on the subsection\n \"\"\"\n # Disable notes\n self.note_unit_page.toggle_visibility()\n self.assertEqual(len(self.note_unit_page.notes), 0)\n # Enable notes to make sure that I can enable notes without refreshing\n # the page.\n self.note_unit_page.toggle_visibility()\n self.assertGreater(len(self.note_unit_page.notes), 0)\n self.course_nav.go_to_sequential_position(2)\n self.assertGreater(len(self.note_unit_page.notes), 0)\n self.course_nav.go_to_section(u\"Test Section 1\", u\"Test Subsection 2\")\n self.assertGreater(len(self.note_unit_page.notes), 0)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":476126,"cells":{"repo_name":{"kind":"string","value":"LouisPlisso/pytomo"},"path":{"kind":"string","value":"pytomo/fpdf/fonts.py"},"copies":{"kind":"string","value":"34"},"size":{"kind":"string","value":"26574"},"content":{"kind":"string","value":"#!/usr/bin/env python\r\n# -*- coding: latin-1 -*-\r\n\r\n# Fonts:\r\n\r\nfpdf_charwidths = {}\r\n\r\nfpdf_charwidths['courier']={}\r\n\r\nfor i in xrange(0,256):\r\n fpdf_charwidths['courier'][chr(i)]=600\r\n fpdf_charwidths['courierB']=fpdf_charwidths['courier']\r\n fpdf_charwidths['courierI']=fpdf_charwidths['courier']\r\n fpdf_charwidths['courierBI']=fpdf_charwidths['courier']\r\n\r\nfpdf_charwidths['helvetica']={\r\n '\\x00':278,'\\x01':278,'\\x02':278,'\\x03':278,'\\x04':278,'\\x05':278,'\\x06':278,'\\x07':278,'\\x08':278,'\\t':278,'\\n':278,'\\x0b':278,'\\x0c':278,'\\r':278,'\\x0e':278,'\\x0f':278,'\\x10':278,'\\x11':278,'\\x12':278,'\\x13':278,'\\x14':278,'\\x15':278,\r\n '\\x16':278,'\\x17':278,'\\x18':278,'\\x19':278,'\\x1a':278,'\\x1b':278,'\\x1c':278,'\\x1d':278,'\\x1e':278,'\\x1f':278,' ':278,'!':278,'\"':355,'#':556,'$':556,'%':889,'&':667,'\\'':191,'(':333,')':333,'*':389,'+':584,\r\n ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,\r\n 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,\r\n 'X':667,'Y':667,'Z':611,'[':278,'\\\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,\r\n 'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\\x7f':350,'\\x80':556,'\\x81':350,'\\x82':222,'\\x83':556,\r\n '\\x84':333,'\\x85':1000,'\\x86':556,'\\x87':556,'\\x88':333,'\\x89':1000,'\\x8a':667,'\\x8b':333,'\\x8c':1000,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':222,'\\x92':222,'\\x93':333,'\\x94':333,'\\x95':350,'\\x96':556,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':500,'\\x9b':333,'\\x9c':944,'\\x9d':350,'\\x9e':500,'\\x9f':667,'\\xa0':278,'\\xa1':333,'\\xa2':556,'\\xa3':556,'\\xa4':556,'\\xa5':556,'\\xa6':260,'\\xa7':556,'\\xa8':333,'\\xa9':737,'\\xaa':370,'\\xab':556,'\\xac':584,'\\xad':333,'\\xae':737,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':584,'\\xb2':333,'\\xb3':333,'\\xb4':333,'\\xb5':556,'\\xb6':537,'\\xb7':278,'\\xb8':333,'\\xb9':333,'\\xba':365,'\\xbb':556,'\\xbc':834,'\\xbd':834,'\\xbe':834,'\\xbf':611,'\\xc0':667,'\\xc1':667,'\\xc2':667,'\\xc3':667,'\\xc4':667,'\\xc5':667,\r\n '\\xc6':1000,'\\xc7':722,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':278,'\\xcd':278,'\\xce':278,'\\xcf':278,'\\xd0':722,'\\xd1':722,'\\xd2':778,'\\xd3':778,'\\xd4':778,'\\xd5':778,'\\xd6':778,'\\xd7':584,'\\xd8':778,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':667,'\\xde':667,'\\xdf':611,'\\xe0':556,'\\xe1':556,'\\xe2':556,'\\xe3':556,'\\xe4':556,'\\xe5':556,'\\xe6':889,'\\xe7':500,'\\xe8':556,'\\xe9':556,'\\xea':556,'\\xeb':556,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':556,'\\xf1':556,\r\n '\\xf2':556,'\\xf3':556,'\\xf4':556,'\\xf5':556,'\\xf6':556,'\\xf7':584,'\\xf8':611,'\\xf9':556,'\\xfa':556,'\\xfb':556,'\\xfc':556,'\\xfd':500,'\\xfe':556,'\\xff':500}\r\n\r\nfpdf_charwidths['helveticaB']={\r\n '\\x00':278,'\\x01':278,'\\x02':278,'\\x03':278,'\\x04':278,'\\x05':278,'\\x06':278,'\\x07':278,'\\x08':278,'\\t':278,'\\n':278,'\\x0b':278,'\\x0c':278,'\\r':278,'\\x0e':278,'\\x0f':278,'\\x10':278,'\\x11':278,'\\x12':278,'\\x13':278,'\\x14':278,'\\x15':278,\r\n '\\x16':278,'\\x17':278,'\\x18':278,'\\x19':278,'\\x1a':278,'\\x1b':278,'\\x1c':278,'\\x1d':278,'\\x1e':278,'\\x1f':278,' ':278,'!':333,'\"':474,'#':556,'$':556,'%':889,'&':722,'\\'':238,'(':333,')':333,'*':389,'+':584,\r\n ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,\r\n 'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,\r\n 'X':667,'Y':667,'Z':611,'[':333,'\\\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,\r\n 'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\\x7f':350,'\\x80':556,'\\x81':350,'\\x82':278,'\\x83':556,\r\n '\\x84':500,'\\x85':1000,'\\x86':556,'\\x87':556,'\\x88':333,'\\x89':1000,'\\x8a':667,'\\x8b':333,'\\x8c':1000,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':278,'\\x92':278,'\\x93':500,'\\x94':500,'\\x95':350,'\\x96':556,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':556,'\\x9b':333,'\\x9c':944,'\\x9d':350,'\\x9e':500,'\\x9f':667,'\\xa0':278,'\\xa1':333,'\\xa2':556,'\\xa3':556,'\\xa4':556,'\\xa5':556,'\\xa6':280,'\\xa7':556,'\\xa8':333,'\\xa9':737,'\\xaa':370,'\\xab':556,'\\xac':584,'\\xad':333,'\\xae':737,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':584,'\\xb2':333,'\\xb3':333,'\\xb4':333,'\\xb5':611,'\\xb6':556,'\\xb7':278,'\\xb8':333,'\\xb9':333,'\\xba':365,'\\xbb':556,'\\xbc':834,'\\xbd':834,'\\xbe':834,'\\xbf':611,'\\xc0':722,'\\xc1':722,'\\xc2':722,'\\xc3':722,'\\xc4':722,'\\xc5':722,\r\n '\\xc6':1000,'\\xc7':722,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':278,'\\xcd':278,'\\xce':278,'\\xcf':278,'\\xd0':722,'\\xd1':722,'\\xd2':778,'\\xd3':778,'\\xd4':778,'\\xd5':778,'\\xd6':778,'\\xd7':584,'\\xd8':778,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':667,'\\xde':667,'\\xdf':611,'\\xe0':556,'\\xe1':556,'\\xe2':556,'\\xe3':556,'\\xe4':556,'\\xe5':556,'\\xe6':889,'\\xe7':556,'\\xe8':556,'\\xe9':556,'\\xea':556,'\\xeb':556,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':611,'\\xf1':611,\r\n '\\xf2':611,'\\xf3':611,'\\xf4':611,'\\xf5':611,'\\xf6':611,'\\xf7':584,'\\xf8':611,'\\xf9':611,'\\xfa':611,'\\xfb':611,'\\xfc':611,'\\xfd':556,'\\xfe':611,'\\xff':556\r\n}\r\n\r\nfpdf_charwidths['helveticaBI']={\r\n '\\x00':278,'\\x01':278,'\\x02':278,'\\x03':278,'\\x04':278,'\\x05':278,'\\x06':278,'\\x07':278,'\\x08':278,'\\t':278,'\\n':278,'\\x0b':278,'\\x0c':278,'\\r':278,'\\x0e':278,'\\x0f':278,'\\x10':278,'\\x11':278,'\\x12':278,'\\x13':278,'\\x14':278,'\\x15':278,\r\n '\\x16':278,'\\x17':278,'\\x18':278,'\\x19':278,'\\x1a':278,'\\x1b':278,'\\x1c':278,'\\x1d':278,'\\x1e':278,'\\x1f':278,' ':278,'!':333,'\"':474,'#':556,'$':556,'%':889,'&':722,'\\'':238,'(':333,')':333,'*':389,'+':584,\r\n ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,\r\n 'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,\r\n 'X':667,'Y':667,'Z':611,'[':333,'\\\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,\r\n 'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\\x7f':350,'\\x80':556,'\\x81':350,'\\x82':278,'\\x83':556,\r\n '\\x84':500,'\\x85':1000,'\\x86':556,'\\x87':556,'\\x88':333,'\\x89':1000,'\\x8a':667,'\\x8b':333,'\\x8c':1000,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':278,'\\x92':278,'\\x93':500,'\\x94':500,'\\x95':350,'\\x96':556,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':556,'\\x9b':333,'\\x9c':944,'\\x9d':350,'\\x9e':500,'\\x9f':667,'\\xa0':278,'\\xa1':333,'\\xa2':556,'\\xa3':556,'\\xa4':556,'\\xa5':556,'\\xa6':280,'\\xa7':556,'\\xa8':333,'\\xa9':737,'\\xaa':370,'\\xab':556,'\\xac':584,'\\xad':333,'\\xae':737,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':584,'\\xb2':333,'\\xb3':333,'\\xb4':333,'\\xb5':611,'\\xb6':556,'\\xb7':278,'\\xb8':333,'\\xb9':333,'\\xba':365,'\\xbb':556,'\\xbc':834,'\\xbd':834,'\\xbe':834,'\\xbf':611,'\\xc0':722,'\\xc1':722,'\\xc2':722,'\\xc3':722,'\\xc4':722,'\\xc5':722,\r\n '\\xc6':1000,'\\xc7':722,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':278,'\\xcd':278,'\\xce':278,'\\xcf':278,'\\xd0':722,'\\xd1':722,'\\xd2':778,'\\xd3':778,'\\xd4':778,'\\xd5':778,'\\xd6':778,'\\xd7':584,'\\xd8':778,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':667,'\\xde':667,'\\xdf':611,'\\xe0':556,'\\xe1':556,'\\xe2':556,'\\xe3':556,'\\xe4':556,'\\xe5':556,'\\xe6':889,'\\xe7':556,'\\xe8':556,'\\xe9':556,'\\xea':556,'\\xeb':556,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':611,'\\xf1':611,\r\n '\\xf2':611,'\\xf3':611,'\\xf4':611,'\\xf5':611,'\\xf6':611,'\\xf7':584,'\\xf8':611,'\\xf9':611,'\\xfa':611,'\\xfb':611,'\\xfc':611,'\\xfd':556,'\\xfe':611,'\\xff':556}\r\n\r\nfpdf_charwidths['helveticaI']={\r\n '\\x00':278,'\\x01':278,'\\x02':278,'\\x03':278,'\\x04':278,'\\x05':278,'\\x06':278,'\\x07':278,'\\x08':278,'\\t':278,'\\n':278,'\\x0b':278,'\\x0c':278,'\\r':278,'\\x0e':278,'\\x0f':278,'\\x10':278,'\\x11':278,'\\x12':278,'\\x13':278,'\\x14':278,'\\x15':278,\r\n '\\x16':278,'\\x17':278,'\\x18':278,'\\x19':278,'\\x1a':278,'\\x1b':278,'\\x1c':278,'\\x1d':278,'\\x1e':278,'\\x1f':278,' ':278,'!':278,'\"':355,'#':556,'$':556,'%':889,'&':667,'\\'':191,'(':333,')':333,'*':389,'+':584,\r\n ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,\r\n 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,\r\n 'X':667,'Y':667,'Z':611,'[':278,'\\\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,\r\n 'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\\x7f':350,'\\x80':556,'\\x81':350,'\\x82':222,'\\x83':556,\r\n '\\x84':333,'\\x85':1000,'\\x86':556,'\\x87':556,'\\x88':333,'\\x89':1000,'\\x8a':667,'\\x8b':333,'\\x8c':1000,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':222,'\\x92':222,'\\x93':333,'\\x94':333,'\\x95':350,'\\x96':556,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':500,'\\x9b':333,'\\x9c':944,'\\x9d':350,'\\x9e':500,'\\x9f':667,'\\xa0':278,'\\xa1':333,'\\xa2':556,'\\xa3':556,'\\xa4':556,'\\xa5':556,'\\xa6':260,'\\xa7':556,'\\xa8':333,'\\xa9':737,'\\xaa':370,'\\xab':556,'\\xac':584,'\\xad':333,'\\xae':737,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':584,'\\xb2':333,'\\xb3':333,'\\xb4':333,'\\xb5':556,'\\xb6':537,'\\xb7':278,'\\xb8':333,'\\xb9':333,'\\xba':365,'\\xbb':556,'\\xbc':834,'\\xbd':834,'\\xbe':834,'\\xbf':611,'\\xc0':667,'\\xc1':667,'\\xc2':667,'\\xc3':667,'\\xc4':667,'\\xc5':667,\r\n '\\xc6':1000,'\\xc7':722,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':278,'\\xcd':278,'\\xce':278,'\\xcf':278,'\\xd0':722,'\\xd1':722,'\\xd2':778,'\\xd3':778,'\\xd4':778,'\\xd5':778,'\\xd6':778,'\\xd7':584,'\\xd8':778,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':667,'\\xde':667,'\\xdf':611,'\\xe0':556,'\\xe1':556,'\\xe2':556,'\\xe3':556,'\\xe4':556,'\\xe5':556,'\\xe6':889,'\\xe7':500,'\\xe8':556,'\\xe9':556,'\\xea':556,'\\xeb':556,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':556,'\\xf1':556,\r\n '\\xf2':556,'\\xf3':556,'\\xf4':556,'\\xf5':556,'\\xf6':556,'\\xf7':584,'\\xf8':611,'\\xf9':556,'\\xfa':556,'\\xfb':556,'\\xfc':556,'\\xfd':500,'\\xfe':556,'\\xff':500}\r\n\r\nfpdf_charwidths['symbol']={\r\n '\\x00':250,'\\x01':250,'\\x02':250,'\\x03':250,'\\x04':250,'\\x05':250,'\\x06':250,'\\x07':250,'\\x08':250,'\\t':250,'\\n':250,'\\x0b':250,'\\x0c':250,'\\r':250,'\\x0e':250,'\\x0f':250,'\\x10':250,'\\x11':250,'\\x12':250,'\\x13':250,'\\x14':250,'\\x15':250,\r\n '\\x16':250,'\\x17':250,'\\x18':250,'\\x19':250,'\\x1a':250,'\\x1b':250,'\\x1c':250,'\\x1d':250,'\\x1e':250,'\\x1f':250,' ':250,'!':333,'\"':713,'#':500,'$':549,'%':833,'&':778,'\\'':439,'(':333,')':333,'*':500,'+':549,\r\n ',':250,'-':549,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':549,'=':549,'>':549,'?':444,'@':549,'A':722,\r\n 'B':667,'C':722,'D':612,'E':611,'F':763,'G':603,'H':722,'I':333,'J':631,'K':722,'L':686,'M':889,'N':722,'O':722,'P':768,'Q':741,'R':556,'S':592,'T':611,'U':690,'V':439,'W':768,\r\n 'X':645,'Y':795,'Z':611,'[':333,'\\\\':863,']':333,'^':658,'_':500,'`':500,'a':631,'b':549,'c':549,'d':494,'e':439,'f':521,'g':411,'h':603,'i':329,'j':603,'k':549,'l':549,'m':576,\r\n 'n':521,'o':549,'p':549,'q':521,'r':549,'s':603,'t':439,'u':576,'v':713,'w':686,'x':493,'y':686,'z':494,'{':480,'|':200,'}':480,'~':549,'\\x7f':0,'\\x80':0,'\\x81':0,'\\x82':0,'\\x83':0,\r\n '\\x84':0,'\\x85':0,'\\x86':0,'\\x87':0,'\\x88':0,'\\x89':0,'\\x8a':0,'\\x8b':0,'\\x8c':0,'\\x8d':0,'\\x8e':0,'\\x8f':0,'\\x90':0,'\\x91':0,'\\x92':0,'\\x93':0,'\\x94':0,'\\x95':0,'\\x96':0,'\\x97':0,'\\x98':0,'\\x99':0,\r\n '\\x9a':0,'\\x9b':0,'\\x9c':0,'\\x9d':0,'\\x9e':0,'\\x9f':0,'\\xa0':750,'\\xa1':620,'\\xa2':247,'\\xa3':549,'\\xa4':167,'\\xa5':713,'\\xa6':500,'\\xa7':753,'\\xa8':753,'\\xa9':753,'\\xaa':753,'\\xab':1042,'\\xac':987,'\\xad':603,'\\xae':987,'\\xaf':603,\r\n '\\xb0':400,'\\xb1':549,'\\xb2':411,'\\xb3':549,'\\xb4':549,'\\xb5':713,'\\xb6':494,'\\xb7':460,'\\xb8':549,'\\xb9':549,'\\xba':549,'\\xbb':549,'\\xbc':1000,'\\xbd':603,'\\xbe':1000,'\\xbf':658,'\\xc0':823,'\\xc1':686,'\\xc2':795,'\\xc3':987,'\\xc4':768,'\\xc5':768,\r\n '\\xc6':823,'\\xc7':768,'\\xc8':768,'\\xc9':713,'\\xca':713,'\\xcb':713,'\\xcc':713,'\\xcd':713,'\\xce':713,'\\xcf':713,'\\xd0':768,'\\xd1':713,'\\xd2':790,'\\xd3':790,'\\xd4':890,'\\xd5':823,'\\xd6':549,'\\xd7':250,'\\xd8':713,'\\xd9':603,'\\xda':603,'\\xdb':1042,\r\n '\\xdc':987,'\\xdd':603,'\\xde':987,'\\xdf':603,'\\xe0':494,'\\xe1':329,'\\xe2':790,'\\xe3':790,'\\xe4':786,'\\xe5':713,'\\xe6':384,'\\xe7':384,'\\xe8':384,'\\xe9':384,'\\xea':384,'\\xeb':384,'\\xec':494,'\\xed':494,'\\xee':494,'\\xef':494,'\\xf0':0,'\\xf1':329,\r\n '\\xf2':274,'\\xf3':686,'\\xf4':686,'\\xf5':686,'\\xf6':384,'\\xf7':384,'\\xf8':384,'\\xf9':384,'\\xfa':384,'\\xfb':384,'\\xfc':494,'\\xfd':494,'\\xfe':494,'\\xff':0}\r\n \r\nfpdf_charwidths['times']={\r\n '\\x00':250,'\\x01':250,'\\x02':250,'\\x03':250,'\\x04':250,'\\x05':250,'\\x06':250,'\\x07':250,'\\x08':250,'\\t':250,'\\n':250,'\\x0b':250,'\\x0c':250,'\\r':250,'\\x0e':250,'\\x0f':250,'\\x10':250,'\\x11':250,'\\x12':250,'\\x13':250,'\\x14':250,'\\x15':250,\r\n '\\x16':250,'\\x17':250,'\\x18':250,'\\x19':250,'\\x1a':250,'\\x1b':250,'\\x1c':250,'\\x1d':250,'\\x1e':250,'\\x1f':250,' ':250,'!':333,'\"':408,'#':500,'$':500,'%':833,'&':778,'\\'':180,'(':333,')':333,'*':500,'+':564,\r\n ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722,\r\n 'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944,\r\n 'X':722,'Y':722,'Z':611,'[':333,'\\\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778,\r\n 'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\\x7f':350,'\\x80':500,'\\x81':350,'\\x82':333,'\\x83':500,\r\n '\\x84':444,'\\x85':1000,'\\x86':500,'\\x87':500,'\\x88':333,'\\x89':1000,'\\x8a':556,'\\x8b':333,'\\x8c':889,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':333,'\\x92':333,'\\x93':444,'\\x94':444,'\\x95':350,'\\x96':500,'\\x97':1000,'\\x98':333,'\\x99':980,\r\n '\\x9a':389,'\\x9b':333,'\\x9c':722,'\\x9d':350,'\\x9e':444,'\\x9f':722,'\\xa0':250,'\\xa1':333,'\\xa2':500,'\\xa3':500,'\\xa4':500,'\\xa5':500,'\\xa6':200,'\\xa7':500,'\\xa8':333,'\\xa9':760,'\\xaa':276,'\\xab':500,'\\xac':564,'\\xad':333,'\\xae':760,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':564,'\\xb2':300,'\\xb3':300,'\\xb4':333,'\\xb5':500,'\\xb6':453,'\\xb7':250,'\\xb8':333,'\\xb9':300,'\\xba':310,'\\xbb':500,'\\xbc':750,'\\xbd':750,'\\xbe':750,'\\xbf':444,'\\xc0':722,'\\xc1':722,'\\xc2':722,'\\xc3':722,'\\xc4':722,'\\xc5':722,\r\n '\\xc6':889,'\\xc7':667,'\\xc8':611,'\\xc9':611,'\\xca':611,'\\xcb':611,'\\xcc':333,'\\xcd':333,'\\xce':333,'\\xcf':333,'\\xd0':722,'\\xd1':722,'\\xd2':722,'\\xd3':722,'\\xd4':722,'\\xd5':722,'\\xd6':722,'\\xd7':564,'\\xd8':722,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':722,'\\xde':556,'\\xdf':500,'\\xe0':444,'\\xe1':444,'\\xe2':444,'\\xe3':444,'\\xe4':444,'\\xe5':444,'\\xe6':667,'\\xe7':444,'\\xe8':444,'\\xe9':444,'\\xea':444,'\\xeb':444,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':500,'\\xf1':500,\r\n '\\xf2':500,'\\xf3':500,'\\xf4':500,'\\xf5':500,'\\xf6':500,'\\xf7':564,'\\xf8':500,'\\xf9':500,'\\xfa':500,'\\xfb':500,'\\xfc':500,'\\xfd':500,'\\xfe':500,'\\xff':500}\r\n\r\nfpdf_charwidths['timesB']={\r\n '\\x00':250,'\\x01':250,'\\x02':250,'\\x03':250,'\\x04':250,'\\x05':250,'\\x06':250,'\\x07':250,'\\x08':250,'\\t':250,'\\n':250,'\\x0b':250,'\\x0c':250,'\\r':250,'\\x0e':250,'\\x0f':250,'\\x10':250,'\\x11':250,'\\x12':250,'\\x13':250,'\\x14':250,'\\x15':250,\r\n '\\x16':250,'\\x17':250,'\\x18':250,'\\x19':250,'\\x1a':250,'\\x1b':250,'\\x1c':250,'\\x1d':250,'\\x1e':250,'\\x1f':250,' ':250,'!':333,'\"':555,'#':500,'$':500,'%':1000,'&':833,'\\'':278,'(':333,')':333,'*':500,'+':570,\r\n ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':930,'A':722,\r\n 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':778,'I':389,'J':500,'K':778,'L':667,'M':944,'N':722,'O':778,'P':611,'Q':778,'R':722,'S':556,'T':667,'U':722,'V':722,'W':1000,\r\n 'X':722,'Y':722,'Z':667,'[':333,'\\\\':278,']':333,'^':581,'_':500,'`':333,'a':500,'b':556,'c':444,'d':556,'e':444,'f':333,'g':500,'h':556,'i':278,'j':333,'k':556,'l':278,'m':833,\r\n 'n':556,'o':500,'p':556,'q':556,'r':444,'s':389,'t':333,'u':556,'v':500,'w':722,'x':500,'y':500,'z':444,'{':394,'|':220,'}':394,'~':520,'\\x7f':350,'\\x80':500,'\\x81':350,'\\x82':333,'\\x83':500,\r\n '\\x84':500,'\\x85':1000,'\\x86':500,'\\x87':500,'\\x88':333,'\\x89':1000,'\\x8a':556,'\\x8b':333,'\\x8c':1000,'\\x8d':350,'\\x8e':667,'\\x8f':350,'\\x90':350,'\\x91':333,'\\x92':333,'\\x93':500,'\\x94':500,'\\x95':350,'\\x96':500,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':389,'\\x9b':333,'\\x9c':722,'\\x9d':350,'\\x9e':444,'\\x9f':722,'\\xa0':250,'\\xa1':333,'\\xa2':500,'\\xa3':500,'\\xa4':500,'\\xa5':500,'\\xa6':220,'\\xa7':500,'\\xa8':333,'\\xa9':747,'\\xaa':300,'\\xab':500,'\\xac':570,'\\xad':333,'\\xae':747,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':570,'\\xb2':300,'\\xb3':300,'\\xb4':333,'\\xb5':556,'\\xb6':540,'\\xb7':250,'\\xb8':333,'\\xb9':300,'\\xba':330,'\\xbb':500,'\\xbc':750,'\\xbd':750,'\\xbe':750,'\\xbf':500,'\\xc0':722,'\\xc1':722,'\\xc2':722,'\\xc3':722,'\\xc4':722,'\\xc5':722,\r\n '\\xc6':1000,'\\xc7':722,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':389,'\\xcd':389,'\\xce':389,'\\xcf':389,'\\xd0':722,'\\xd1':722,'\\xd2':778,'\\xd3':778,'\\xd4':778,'\\xd5':778,'\\xd6':778,'\\xd7':570,'\\xd8':778,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':722,'\\xde':611,'\\xdf':556,'\\xe0':500,'\\xe1':500,'\\xe2':500,'\\xe3':500,'\\xe4':500,'\\xe5':500,'\\xe6':722,'\\xe7':444,'\\xe8':444,'\\xe9':444,'\\xea':444,'\\xeb':444,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':500,'\\xf1':556,\r\n '\\xf2':500,'\\xf3':500,'\\xf4':500,'\\xf5':500,'\\xf6':500,'\\xf7':570,'\\xf8':500,'\\xf9':556,'\\xfa':556,'\\xfb':556,'\\xfc':556,'\\xfd':500,'\\xfe':556,'\\xff':500}\r\n \r\nfpdf_charwidths['timesBI']={\r\n '\\x00':250,'\\x01':250,'\\x02':250,'\\x03':250,'\\x04':250,'\\x05':250,'\\x06':250,'\\x07':250,'\\x08':250,'\\t':250,'\\n':250,'\\x0b':250,'\\x0c':250,'\\r':250,'\\x0e':250,'\\x0f':250,'\\x10':250,'\\x11':250,'\\x12':250,'\\x13':250,'\\x14':250,'\\x15':250,\r\n '\\x16':250,'\\x17':250,'\\x18':250,'\\x19':250,'\\x1a':250,'\\x1b':250,'\\x1c':250,'\\x1d':250,'\\x1e':250,'\\x1f':250,' ':250,'!':389,'\"':555,'#':500,'$':500,'%':833,'&':778,'\\'':278,'(':333,')':333,'*':500,'+':570,\r\n ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':832,'A':667,\r\n 'B':667,'C':667,'D':722,'E':667,'F':667,'G':722,'H':778,'I':389,'J':500,'K':667,'L':611,'M':889,'N':722,'O':722,'P':611,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':667,'W':889,\r\n 'X':667,'Y':611,'Z':611,'[':333,'\\\\':278,']':333,'^':570,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':556,'i':278,'j':278,'k':500,'l':278,'m':778,\r\n 'n':556,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':556,'v':444,'w':667,'x':500,'y':444,'z':389,'{':348,'|':220,'}':348,'~':570,'\\x7f':350,'\\x80':500,'\\x81':350,'\\x82':333,'\\x83':500,\r\n '\\x84':500,'\\x85':1000,'\\x86':500,'\\x87':500,'\\x88':333,'\\x89':1000,'\\x8a':556,'\\x8b':333,'\\x8c':944,'\\x8d':350,'\\x8e':611,'\\x8f':350,'\\x90':350,'\\x91':333,'\\x92':333,'\\x93':500,'\\x94':500,'\\x95':350,'\\x96':500,'\\x97':1000,'\\x98':333,'\\x99':1000,\r\n '\\x9a':389,'\\x9b':333,'\\x9c':722,'\\x9d':350,'\\x9e':389,'\\x9f':611,'\\xa0':250,'\\xa1':389,'\\xa2':500,'\\xa3':500,'\\xa4':500,'\\xa5':500,'\\xa6':220,'\\xa7':500,'\\xa8':333,'\\xa9':747,'\\xaa':266,'\\xab':500,'\\xac':606,'\\xad':333,'\\xae':747,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':570,'\\xb2':300,'\\xb3':300,'\\xb4':333,'\\xb5':576,'\\xb6':500,'\\xb7':250,'\\xb8':333,'\\xb9':300,'\\xba':300,'\\xbb':500,'\\xbc':750,'\\xbd':750,'\\xbe':750,'\\xbf':500,'\\xc0':667,'\\xc1':667,'\\xc2':667,'\\xc3':667,'\\xc4':667,'\\xc5':667,\r\n '\\xc6':944,'\\xc7':667,'\\xc8':667,'\\xc9':667,'\\xca':667,'\\xcb':667,'\\xcc':389,'\\xcd':389,'\\xce':389,'\\xcf':389,'\\xd0':722,'\\xd1':722,'\\xd2':722,'\\xd3':722,'\\xd4':722,'\\xd5':722,'\\xd6':722,'\\xd7':570,'\\xd8':722,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':611,'\\xde':611,'\\xdf':500,'\\xe0':500,'\\xe1':500,'\\xe2':500,'\\xe3':500,'\\xe4':500,'\\xe5':500,'\\xe6':722,'\\xe7':444,'\\xe8':444,'\\xe9':444,'\\xea':444,'\\xeb':444,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':500,'\\xf1':556,\r\n '\\xf2':500,'\\xf3':500,'\\xf4':500,'\\xf5':500,'\\xf6':500,'\\xf7':570,'\\xf8':500,'\\xf9':556,'\\xfa':556,'\\xfb':556,'\\xfc':556,'\\xfd':444,'\\xfe':500,'\\xff':444}\r\n\r\nfpdf_charwidths['timesI']={\r\n '\\x00':250,'\\x01':250,'\\x02':250,'\\x03':250,'\\x04':250,'\\x05':250,'\\x06':250,'\\x07':250,'\\x08':250,'\\t':250,'\\n':250,'\\x0b':250,'\\x0c':250,'\\r':250,'\\x0e':250,'\\x0f':250,'\\x10':250,'\\x11':250,'\\x12':250,'\\x13':250,'\\x14':250,'\\x15':250,\r\n '\\x16':250,'\\x17':250,'\\x18':250,'\\x19':250,'\\x1a':250,'\\x1b':250,'\\x1c':250,'\\x1d':250,'\\x1e':250,'\\x1f':250,' ':250,'!':333,'\"':420,'#':500,'$':500,'%':833,'&':778,'\\'':214,'(':333,')':333,'*':500,'+':675,\r\n ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':675,'=':675,'>':675,'?':500,'@':920,'A':611,\r\n 'B':611,'C':667,'D':722,'E':611,'F':611,'G':722,'H':722,'I':333,'J':444,'K':667,'L':556,'M':833,'N':667,'O':722,'P':611,'Q':722,'R':611,'S':500,'T':556,'U':722,'V':611,'W':833,\r\n 'X':611,'Y':556,'Z':556,'[':389,'\\\\':278,']':389,'^':422,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':278,'g':500,'h':500,'i':278,'j':278,'k':444,'l':278,'m':722,\r\n 'n':500,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':500,'v':444,'w':667,'x':444,'y':444,'z':389,'{':400,'|':275,'}':400,'~':541,'\\x7f':350,'\\x80':500,'\\x81':350,'\\x82':333,'\\x83':500,\r\n '\\x84':556,'\\x85':889,'\\x86':500,'\\x87':500,'\\x88':333,'\\x89':1000,'\\x8a':500,'\\x8b':333,'\\x8c':944,'\\x8d':350,'\\x8e':556,'\\x8f':350,'\\x90':350,'\\x91':333,'\\x92':333,'\\x93':556,'\\x94':556,'\\x95':350,'\\x96':500,'\\x97':889,'\\x98':333,'\\x99':980,\r\n '\\x9a':389,'\\x9b':333,'\\x9c':667,'\\x9d':350,'\\x9e':389,'\\x9f':556,'\\xa0':250,'\\xa1':389,'\\xa2':500,'\\xa3':500,'\\xa4':500,'\\xa5':500,'\\xa6':275,'\\xa7':500,'\\xa8':333,'\\xa9':760,'\\xaa':276,'\\xab':500,'\\xac':675,'\\xad':333,'\\xae':760,'\\xaf':333,\r\n '\\xb0':400,'\\xb1':675,'\\xb2':300,'\\xb3':300,'\\xb4':333,'\\xb5':500,'\\xb6':523,'\\xb7':250,'\\xb8':333,'\\xb9':300,'\\xba':310,'\\xbb':500,'\\xbc':750,'\\xbd':750,'\\xbe':750,'\\xbf':500,'\\xc0':611,'\\xc1':611,'\\xc2':611,'\\xc3':611,'\\xc4':611,'\\xc5':611,\r\n '\\xc6':889,'\\xc7':667,'\\xc8':611,'\\xc9':611,'\\xca':611,'\\xcb':611,'\\xcc':333,'\\xcd':333,'\\xce':333,'\\xcf':333,'\\xd0':722,'\\xd1':667,'\\xd2':722,'\\xd3':722,'\\xd4':722,'\\xd5':722,'\\xd6':722,'\\xd7':675,'\\xd8':722,'\\xd9':722,'\\xda':722,'\\xdb':722,\r\n '\\xdc':722,'\\xdd':556,'\\xde':611,'\\xdf':500,'\\xe0':500,'\\xe1':500,'\\xe2':500,'\\xe3':500,'\\xe4':500,'\\xe5':500,'\\xe6':667,'\\xe7':444,'\\xe8':444,'\\xe9':444,'\\xea':444,'\\xeb':444,'\\xec':278,'\\xed':278,'\\xee':278,'\\xef':278,'\\xf0':500,'\\xf1':500,\r\n '\\xf2':500,'\\xf3':500,'\\xf4':500,'\\xf5':500,'\\xf6':500,'\\xf7':675,'\\xf8':500,'\\xf9':500,'\\xfa':500,'\\xfb':500,'\\xfc':500,'\\xfd':444,'\\xfe':500,'\\xff':444}\r\n\r\nfpdf_charwidths['zapfdingbats']={\r\n '\\x00':0,'\\x01':0,'\\x02':0,'\\x03':0,'\\x04':0,'\\x05':0,'\\x06':0,'\\x07':0,'\\x08':0,'\\t':0,'\\n':0,'\\x0b':0,'\\x0c':0,'\\r':0,'\\x0e':0,'\\x0f':0,'\\x10':0,'\\x11':0,'\\x12':0,'\\x13':0,'\\x14':0,'\\x15':0,\r\n '\\x16':0,'\\x17':0,'\\x18':0,'\\x19':0,'\\x1a':0,'\\x1b':0,'\\x1c':0,'\\x1d':0,'\\x1e':0,'\\x1f':0,' ':278,'!':974,'\"':961,'#':974,'$':980,'%':719,'&':789,'\\'':790,'(':791,')':690,'*':960,'+':939,\r\n ',':549,'-':855,'.':911,'/':933,'0':911,'1':945,'2':974,'3':755,'4':846,'5':762,'6':761,'7':571,'8':677,'9':763,':':760,';':759,'<':754,'=':494,'>':552,'?':537,'@':577,'A':692,\r\n 'B':786,'C':788,'D':788,'E':790,'F':793,'G':794,'H':816,'I':823,'J':789,'K':841,'L':823,'M':833,'N':816,'O':831,'P':923,'Q':744,'R':723,'S':749,'T':790,'U':792,'V':695,'W':776,\r\n 'X':768,'Y':792,'Z':759,'[':707,'\\\\':708,']':682,'^':701,'_':826,'`':815,'a':789,'b':789,'c':707,'d':687,'e':696,'f':689,'g':786,'h':787,'i':713,'j':791,'k':785,'l':791,'m':873,\r\n 'n':761,'o':762,'p':762,'q':759,'r':759,'s':892,'t':892,'u':788,'v':784,'w':438,'x':138,'y':277,'z':415,'{':392,'|':392,'}':668,'~':668,'\\x7f':0,'\\x80':390,'\\x81':390,'\\x82':317,'\\x83':317,\r\n '\\x84':276,'\\x85':276,'\\x86':509,'\\x87':509,'\\x88':410,'\\x89':410,'\\x8a':234,'\\x8b':234,'\\x8c':334,'\\x8d':334,'\\x8e':0,'\\x8f':0,'\\x90':0,'\\x91':0,'\\x92':0,'\\x93':0,'\\x94':0,'\\x95':0,'\\x96':0,'\\x97':0,'\\x98':0,'\\x99':0,\r\n '\\x9a':0,'\\x9b':0,'\\x9c':0,'\\x9d':0,'\\x9e':0,'\\x9f':0,'\\xa0':0,'\\xa1':732,'\\xa2':544,'\\xa3':544,'\\xa4':910,'\\xa5':667,'\\xa6':760,'\\xa7':760,'\\xa8':776,'\\xa9':595,'\\xaa':694,'\\xab':626,'\\xac':788,'\\xad':788,'\\xae':788,'\\xaf':788,\r\n '\\xb0':788,'\\xb1':788,'\\xb2':788,'\\xb3':788,'\\xb4':788,'\\xb5':788,'\\xb6':788,'\\xb7':788,'\\xb8':788,'\\xb9':788,'\\xba':788,'\\xbb':788,'\\xbc':788,'\\xbd':788,'\\xbe':788,'\\xbf':788,'\\xc0':788,'\\xc1':788,'\\xc2':788,'\\xc3':788,'\\xc4':788,'\\xc5':788,\r\n '\\xc6':788,'\\xc7':788,'\\xc8':788,'\\xc9':788,'\\xca':788,'\\xcb':788,'\\xcc':788,'\\xcd':788,'\\xce':788,'\\xcf':788,'\\xd0':788,'\\xd1':788,'\\xd2':788,'\\xd3':788,'\\xd4':894,'\\xd5':838,'\\xd6':1016,'\\xd7':458,'\\xd8':748,'\\xd9':924,'\\xda':748,'\\xdb':918,\r\n '\\xdc':927,'\\xdd':928,'\\xde':928,'\\xdf':834,'\\xe0':873,'\\xe1':828,'\\xe2':924,'\\xe3':924,'\\xe4':917,'\\xe5':930,'\\xe6':931,'\\xe7':463,'\\xe8':883,'\\xe9':836,'\\xea':836,'\\xeb':867,'\\xec':867,'\\xed':696,'\\xee':696,'\\xef':874,'\\xf0':0,'\\xf1':874,\r\n '\\xf2':760,'\\xf3':946,'\\xf4':771,'\\xf5':865,'\\xf6':771,'\\xf7':888,'\\xf8':967,'\\xf9':888,'\\xfa':831,'\\xfb':873,'\\xfc':927,'\\xfd':970,'\\xfe':918,'\\xff':0}\r\n\r\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476127,"cells":{"repo_name":{"kind":"string","value":"BaconPancakes/valor"},"path":{"kind":"string","value":"lib/pip/_vendor/colorama/ansitowin32.py"},"copies":{"kind":"string","value":"450"},"size":{"kind":"string","value":"9668"},"content":{"kind":"string","value":"# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.\nimport re\nimport sys\nimport os\n\nfrom .ansi import AnsiFore, AnsiBack, AnsiStyle, Style\nfrom .winterm import WinTerm, WinColor, WinStyle\nfrom .win32 import windll, winapi_test\n\n\nwinterm = None\nif windll is not None:\n winterm = WinTerm()\n\n\ndef is_stream_closed(stream):\n return not hasattr(stream, 'closed') or stream.closed\n\n\ndef is_a_tty(stream):\n return hasattr(stream, 'isatty') and stream.isatty()\n\n\nclass StreamWrapper(object):\n '''\n Wraps a stream (such as stdout), acting as a transparent proxy for all\n attribute access apart from method 'write()', which is delegated to our\n Converter instance.\n '''\n def __init__(self, wrapped, converter):\n # double-underscore everything to prevent clashes with names of\n # attributes on the wrapped stream object.\n self.__wrapped = wrapped\n self.__convertor = converter\n\n def __getattr__(self, name):\n return getattr(self.__wrapped, name)\n\n def write(self, text):\n self.__convertor.write(text)\n\n\nclass AnsiToWin32(object):\n '''\n Implements a 'write()' method which, on Windows, will strip ANSI character\n sequences from the text, and if outputting to a tty, will convert them into\n win32 function calls.\n '''\n ANSI_CSI_RE = re.compile('\\001?\\033\\[((?:\\d|;)*)([a-zA-Z])\\002?') # Control Sequence Introducer\n ANSI_OSC_RE = re.compile('\\001?\\033\\]((?:.|;)*?)(\\x07)\\002?') # Operating System Command\n\n def __init__(self, wrapped, convert=None, strip=None, autoreset=False):\n # The wrapped stream (normally sys.stdout or sys.stderr)\n self.wrapped = wrapped\n\n # should we reset colors to defaults after every .write()\n self.autoreset = autoreset\n\n # create the proxy wrapping our output stream\n self.stream = StreamWrapper(wrapped, self)\n\n on_windows = os.name == 'nt'\n # We test if the WinAPI works, because even if we are on Windows\n # we may be using a terminal that doesn't support the WinAPI\n # (e.g. Cygwin Terminal). In this case it's up to the terminal\n # to support the ANSI codes.\n conversion_supported = on_windows and winapi_test()\n\n # should we strip ANSI sequences from our output?\n if strip is None:\n strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))\n self.strip = strip\n\n # should we should convert ANSI sequences into win32 calls?\n if convert is None:\n convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)\n self.convert = convert\n\n # dict of ansi codes to win32 functions and parameters\n self.win32_calls = self.get_win32_calls()\n\n # are we wrapping stderr?\n self.on_stderr = self.wrapped is sys.stderr\n\n def should_wrap(self):\n '''\n True if this class is actually needed. If false, then the output\n stream will not be affected, nor will win32 calls be issued, so\n wrapping stdout is not actually required. This will generally be\n False on non-Windows platforms, unless optional functionality like\n autoreset has been requested using kwargs to init()\n '''\n return self.convert or self.strip or self.autoreset\n\n def get_win32_calls(self):\n if self.convert and winterm:\n return {\n AnsiStyle.RESET_ALL: (winterm.reset_all, ),\n AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),\n AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),\n AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),\n AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),\n AnsiFore.RED: (winterm.fore, WinColor.RED),\n AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),\n AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),\n AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),\n AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),\n AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),\n AnsiFore.WHITE: (winterm.fore, WinColor.GREY),\n AnsiFore.RESET: (winterm.fore, ),\n AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),\n AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),\n AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),\n AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),\n AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),\n AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),\n AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),\n AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),\n AnsiBack.BLACK: (winterm.back, WinColor.BLACK),\n AnsiBack.RED: (winterm.back, WinColor.RED),\n AnsiBack.GREEN: (winterm.back, WinColor.GREEN),\n AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),\n AnsiBack.BLUE: (winterm.back, WinColor.BLUE),\n AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),\n AnsiBack.CYAN: (winterm.back, WinColor.CYAN),\n AnsiBack.WHITE: (winterm.back, WinColor.GREY),\n AnsiBack.RESET: (winterm.back, ),\n AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),\n AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),\n AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),\n AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),\n AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),\n AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),\n AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),\n AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),\n }\n return dict()\n\n def write(self, text):\n if self.strip or self.convert:\n self.write_and_convert(text)\n else:\n self.wrapped.write(text)\n self.wrapped.flush()\n if self.autoreset:\n self.reset_all()\n\n\n def reset_all(self):\n if self.convert:\n self.call_win32('m', (0,))\n elif not self.strip and not is_stream_closed(self.wrapped):\n self.wrapped.write(Style.RESET_ALL)\n\n\n def write_and_convert(self, text):\n '''\n Write the given text to our wrapped stream, stripping any ANSI\n sequences from the text, and optionally converting them into win32\n calls.\n '''\n cursor = 0\n text = self.convert_osc(text)\n for match in self.ANSI_CSI_RE.finditer(text):\n start, end = match.span()\n self.write_plain_text(text, cursor, start)\n self.convert_ansi(*match.groups())\n cursor = end\n self.write_plain_text(text, cursor, len(text))\n\n\n def write_plain_text(self, text, start, end):\n if start < end:\n self.wrapped.write(text[start:end])\n self.wrapped.flush()\n\n\n def convert_ansi(self, paramstring, command):\n if self.convert:\n params = self.extract_params(command, paramstring)\n self.call_win32(command, params)\n\n\n def extract_params(self, command, paramstring):\n if command in 'Hf':\n params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))\n while len(params) < 2:\n # defaults:\n params = params + (1,)\n else:\n params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)\n if len(params) == 0:\n # defaults:\n if command in 'JKm':\n params = (0,)\n elif command in 'ABCD':\n params = (1,)\n\n return params\n\n\n def call_win32(self, command, params):\n if command == 'm':\n for param in params:\n if param in self.win32_calls:\n func_args = self.win32_calls[param]\n func = func_args[0]\n args = func_args[1:]\n kwargs = dict(on_stderr=self.on_stderr)\n func(*args, **kwargs)\n elif command in 'J':\n winterm.erase_screen(params[0], on_stderr=self.on_stderr)\n elif command in 'K':\n winterm.erase_line(params[0], on_stderr=self.on_stderr)\n elif command in 'Hf': # cursor position - absolute\n winterm.set_cursor_position(params, on_stderr=self.on_stderr)\n elif command in 'ABCD': # cursor position - relative\n n = params[0]\n # A - up, B - down, C - forward, D - back\n x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]\n winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)\n\n\n def convert_osc(self, text):\n for match in self.ANSI_OSC_RE.finditer(text):\n start, end = match.span()\n text = text[:start] + text[end:]\n paramstring, command = match.groups()\n if command in '\\x07': # \\x07 = BEL\n params = paramstring.split(\";\")\n # 0 - change title and icon (we will only change title)\n # 1 - change icon (we don't support this)\n # 2 - change title\n if params[0] in '02':\n winterm.set_title(params[1])\n return text\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476128,"cells":{"repo_name":{"kind":"string","value":"hypnotika/namebench"},"path":{"kind":"string","value":"libnamebench/site_connector.py"},"copies":{"kind":"string","value":"175"},"size":{"kind":"string","value":"4048"},"content":{"kind":"string","value":"# Copyright 2010 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Class used for connecting to the results site.\"\"\"\n\nimport os\nimport platform\nimport random\nimport socket\nimport sys\nimport tempfile\nimport time\nimport urllib\nimport zlib\n\n# third_party\nimport httplib2\nimport simplejson\n\nimport util\n\nRETRY_WAIT = 10\n\n\nclass SiteConnector(object):\n \"\"\"Methods that connect to the results site.\"\"\"\n\n def __init__(self, config, status_callback=None):\n self.config = config\n self.url = self.config.site_url.rstrip('/')\n self.status_callback = status_callback\n \n def msg(self, msg, count=None, total=None, **kwargs):\n if self.status_callback:\n self.status_callback(msg, count=count, total=total, **kwargs)\n else:\n print '%s [%s/%s]' % (msg, count, total)\n\n def GetIndexHosts(self):\n \"\"\"Get a list of 'index' hosts for standardized testing.\"\"\"\n url = self.url + '/index_hosts'\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n content = None\n try:\n unused_resp, content = h.request(url, 'GET')\n hosts = []\n for record_type, host in simplejson.loads(content):\n hosts.append((str(record_type), str(host)))\n return hosts\n except simplejson.decoder.JSONDecodeError:\n self.msg('Failed to decode: \"%s\"' % content)\n return []\n except AttributeError:\n self.msg('%s refused connection' % url)\n return []\n except:\n self.msg('* Failed to fetch %s: %s' % (url, util.GetLastExceptionString()))\n return []\n \n\n def UploadJsonResults(self, json_data, hide_results=False, fail_quickly=False):\n \"\"\"Data is generated by reporter.CreateJsonData.\"\"\"\n\n url = self.url + '/submit'\n if not url or not url.startswith('http'):\n return (False, 'error')\n h = httplib2.Http()\n post_data = {\n 'client_id': self._CalculateDuplicateCheckId(),\n 'submit_id': random.randint(0, 2**32),\n 'hidden': bool(hide_results),\n 'data': json_data\n }\n try:\n resp, content = h.request(url, 'POST', urllib.urlencode(post_data))\n try:\n data = simplejson.loads(content)\n for note in data['notes']:\n print ' * %s' % note\n return (''.join((self.url, data['url'])), data['state'])\n except:\n self.msg('BAD RESPONSE from %s: [%s]:\\n %s' % (url, resp, content))\n print \"DATA:\"\n print post_data\n # See http://code.google.com/p/httplib2/issues/detail?id=62\n except AttributeError:\n self.msg('%s refused connection' % url)\n except:\n self.msg('Error uploading results: %s' % util.GetLastExceptionString())\n\n # We haven't returned, something is up.\n if not fail_quickly:\n self.msg('Problem talking to %s, will retry after %ss' % (url, RETRY_WAIT))\n time.sleep(RETRY_WAIT)\n self.UploadJsonResults(json_data, hide_results=hide_results, fail_quickly=True)\n \n return (False, 'error')\n\n def _CalculateDuplicateCheckId(self):\n \"\"\"This is so that we can detect duplicate submissions from a particular host.\n\n Returns:\n checksum: integer\n \"\"\"\n # From http://docs.python.org/release/2.5.2/lib/module-zlib.html\n # \"not suitable for use as a general hash algorithm.\"\n #\n # We are only using it as a temporary way to detect duplicate runs on the\n # same host in a short time period, so it's accuracy is not important.\n return zlib.crc32(platform.platform() + sys.version + platform.node() +\n os.getenv('HOME', '') + os.getenv('USERPROFILE', ''))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476129,"cells":{"repo_name":{"kind":"string","value":"nanobox-io/nanobox-pkgsrc-base"},"path":{"kind":"string","value":"nodejs7/patches/patch-tools_gyp_pylib_gyp_generator_make.py"},"copies":{"kind":"string","value":"16"},"size":{"kind":"string","value":"1181"},"content":{"kind":"string","value":"$NetBSD: patch-tools_gyp_pylib_gyp_generator_make.py,v 1.3 2013/12/12 11:52:37 jperkin Exp $\n\nAdd support for NetBSD and DragonFly.\nEnsure we use the system libtool on OSX.\n\n--- tools/gyp/pylib/gyp/generator/make.py.orig\t2013-12-12 05:20:06.000000000 +0000\n+++ tools/gyp/pylib/gyp/generator/make.py\n@@ -174,7 +174,7 @@ cmd_solink_module = $(LINK.$(TOOLSET)) -\n \n LINK_COMMANDS_MAC = \"\"\"\\\n quiet_cmd_alink = LIBTOOL-STATIC $@\n-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)\n+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)\n \n quiet_cmd_link = LINK($(TOOLSET)) $@\n cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o \"$@\" $(LD_INPUTS) $(LIBS)\n@@ -2012,7 +2012,7 @@ def GenerateOutput(target_list, target_d\n 'flock': './gyp-flock-tool flock',\n 'flock_index': 2,\n })\n- elif flavor == 'freebsd':\n+ elif flavor == 'freebsd' or flavor == 'dragonflybsd' or flavor == 'netbsd':\n # Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.\n header_params.update({\n 'flock': 'lockf',\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476130,"cells":{"repo_name":{"kind":"string","value":"stanlee321/pysolper"},"path":{"kind":"string","value":"latrop/lib/dist/tipfy/template.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"21622"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"A simple template system that compiles templates to Python code.\n\nBasic usage looks like:\n\n t = template.Template(\"{{ myvalue }}\")\n print t.generate(myvalue=\"XXX\")\n\nLoader is a class that loads templates from a root directory and caches\nthe compiled templates:\n\n loader = template.Loader(\"/home/btaylor\")\n print loader.load(\"test.html\").generate(myvalue=\"XXX\")\n\nWe compile all templates to raw Python. Error-reporting is currently... uh,\ninteresting. Syntax for the templates\n\n ### base.html\n \n \n {% block title %}Default title{% end %}\n \n \n
    \n {% for student in students %}\n {% block student %}\n
  • {{ escape(student.name) }}
  • \n {% end %}\n {% end %}\n
\n \n \n\n ### bold.html\n {% extends \"base.html\" %}\n\n {% block title %}A bolder title{% end %}\n\n {% block student %}\n
  • {{ escape(student.name) }}
  • \n {% block %}\n\nUnlike most other template systems, we do not put any restrictions on the\nexpressions you can include in your statements. if and for blocks get\ntranslated exactly into Python, do you can do complex expressions like:\n\n {% for student in [p for p in people if p.student and p.age > 23] %}\n
  • {{ escape(student.name) }}
  • \n {% end %}\n\nTranslating directly to Python means you can apply functions to expressions\neasily, like the escape() function in the examples above. You can pass\nfunctions in to your template just like any other variable:\n\n ### Python code\n def add(x, y):\n return x + y\n template.execute(add=add)\n\n ### The template\n {{ add(1, 2) }}\n\nWe provide the functions escape(), url_escape(), json_encode(), and squeeze()\nto all templates by default.\n\"\"\"\n\nfrom __future__ import with_statement\n\nimport cStringIO\nimport datetime\nimport htmlentitydefs\nimport logging\nimport os.path\nimport re\nimport urllib\nimport xml.sax.saxutils\nimport zipfile\n\nfrom .json import json_encode\n\n\ndef utf8(value):\n \"\"\"Encodes a unicode value to UTF-8 if not yet encoded.\n\n :param value:\n Value to be encoded.\n :returns:\n An encoded string.\n \"\"\"\n if isinstance(value, unicode):\n return value.encode(\"utf-8\")\n\n assert isinstance(value, str)\n return value\n\n\ndef _unicode(value):\n \"\"\"Encodes a string value to unicode if not yet decoded.\n\n :param value:\n Value to be decoded.\n :returns:\n A decoded string.\n \"\"\"\n if isinstance(value, str):\n return value.decode(\"utf-8\")\n\n assert isinstance(value, unicode)\n return value\n\n\ndef xhtml_escape(value):\n \"\"\"Escapes a string so it is valid within XML or XHTML.\n\n :param value:\n The value to be escaped.\n :returns:\n The escaped value.\n \"\"\"\n return utf8(xml.sax.saxutils.escape(value, {'\"': \"&quot;\"}))\n\n\ndef xhtml_unescape(value):\n \"\"\"Un-escapes an XML-escaped string.\n\n :param value:\n The value to be un-escaped.\n :returns:\n The un-escaped value.\n \"\"\"\n return re.sub(r\"&(#?)(\\w+?);\", _convert_entity, _unicode(value))\n\n\ndef squeeze(value):\n \"\"\"Replace all sequences of whitespace chars with a single space.\"\"\"\n return re.sub(r\"[\\x00-\\x20]+\", \" \", value).strip()\n\n\ndef url_escape(value):\n \"\"\"Returns a valid URL-encoded version of the given value.\"\"\"\n return urllib.quote_plus(utf8(value))\n\n\ndef _convert_entity(m):\n if m.group(1) == \"#\":\n try:\n return unichr(int(m.group(2)))\n except ValueError:\n return \"&#%s;\" % m.group(2)\n try:\n return _HTML_UNICODE_MAP[m.group(2)]\n except KeyError:\n return \"&%s;\" % m.group(2)\n\n\ndef _build_unicode_map():\n return dict((name, unichr(value)) for \\\n name, value in htmlentitydefs.name2codepoint.iteritems())\n\n\n_HTML_UNICODE_MAP = _build_unicode_map()\n\n\nclass Template(object):\n \"\"\"A compiled template.\n\n We compile into Python from the given template_string. You can generate\n the template from variables with generate().\n \"\"\"\n def __init__(self, template_string, name=\"\", loader=None,\n compress_whitespace=None):\n self.name = name\n if compress_whitespace is None:\n compress_whitespace = name.endswith(\".html\") or \\\n name.endswith(\".js\")\n reader = _TemplateReader(name, template_string)\n self.file = _File(_parse(reader))\n self.code = self._generate_python(loader, compress_whitespace)\n try:\n self.compiled = compile(self.code, self.name, \"exec\")\n except:\n formatted_code = _format_code(self.code).rstrip()\n logging.error(\"%s code:\\n%s\", self.name, formatted_code)\n raise\n\n def generate(self, **kwargs):\n \"\"\"Generate this template with the given arguments.\"\"\"\n namespace = {\n \"escape\": xhtml_escape,\n \"url_escape\": url_escape,\n \"json_encode\": json_encode,\n \"squeeze\": squeeze,\n \"datetime\": datetime,\n }\n namespace.update(kwargs)\n exec self.compiled in namespace\n execute = namespace[\"_execute\"]\n try:\n return execute()\n except:\n formatted_code = _format_code(self.code).rstrip()\n logging.error(\"%s code:\\n%s\", self.name, formatted_code)\n raise\n\n def _generate_python(self, loader, compress_whitespace):\n buffer = cStringIO.StringIO()\n try:\n named_blocks = {}\n ancestors = self._get_ancestors(loader)\n ancestors.reverse()\n for ancestor in ancestors:\n ancestor.find_named_blocks(loader, named_blocks)\n self.file.find_named_blocks(loader, named_blocks)\n writer = _CodeWriter(buffer, named_blocks, loader, self,\n compress_whitespace)\n ancestors[0].generate(writer)\n return buffer.getvalue()\n finally:\n buffer.close()\n\n def _get_ancestors(self, loader):\n ancestors = [self.file]\n for chunk in self.file.body.chunks:\n if isinstance(chunk, _ExtendsBlock):\n if not loader:\n raise ParseError(\"{% extends %} block found, but no \"\n \"template loader\")\n template = loader.load(chunk.name, self.name)\n ancestors.extend(template._get_ancestors(loader))\n return ancestors\n\n\nclass Loader(object):\n \"\"\"A template loader that loads from a single root directory.\n\n You must use a template loader to use template constructs like\n {% extends %} and {% include %}. Loader caches all templates after\n they are loaded the first time.\n \"\"\"\n def __init__(self, root_directory):\n self.root = os.path.abspath(root_directory)\n self.templates = {}\n\n def reset(self):\n self.templates = {}\n\n def resolve_path(self, name, parent_path=None):\n if parent_path and not parent_path.startswith(\"<\") and \\\n not parent_path.startswith(\"/\") and \\\n not name.startswith(\"/\"):\n current_path = os.path.join(self.root, parent_path)\n file_dir = os.path.dirname(os.path.abspath(current_path))\n relative_path = os.path.abspath(os.path.join(file_dir, name))\n if relative_path.startswith(self.root):\n name = relative_path[len(self.root) + 1:]\n return name\n\n def load(self, name, parent_path=None):\n name = self.resolve_path(name, parent_path=parent_path)\n if name not in self.templates:\n path = os.path.join(self.root, name)\n f = open(path, \"r\")\n self.templates[name] = Template(f.read(), name=name, loader=self)\n f.close()\n return self.templates[name]\n\n\nclass ZipLoader(Loader):\n \"\"\"A template loader that loads from a zip file and a root directory.\n\n You must use a template loader to use template constructs like\n {% extends %} and {% include %}. Loader caches all templates after\n they are loaded the first time.\n \"\"\"\n def __init__(self, zip_path, root_directory):\n self.zipfile = zipfile.ZipFile(zip_path, 'r')\n self.root = os.path.join(root_directory)\n self.templates = {}\n\n def load(self, name, parent_path=None):\n name = self.resolve_path(name, parent_path=parent_path)\n if name not in self.templates:\n path = os.path.join(self.root, name)\n tpl = self.zipfile.read(path)\n self.templates[name] = Template(tpl, name=name, loader=self)\n return self.templates[name]\n\n\nclass _Node(object):\n def each_child(self):\n return ()\n\n def generate(self, writer):\n raise NotImplementedError()\n\n def find_named_blocks(self, loader, named_blocks):\n for child in self.each_child():\n child.find_named_blocks(loader, named_blocks)\n\n\nclass _File(_Node):\n def __init__(self, body):\n self.body = body\n\n def generate(self, writer):\n writer.write_line(\"def _execute():\")\n with writer.indent():\n writer.write_line(\"_buffer = []\")\n self.body.generate(writer)\n writer.write_line(\"return ''.join(_buffer)\")\n\n def each_child(self):\n return (self.body,)\n\n\nclass _ChunkList(_Node):\n def __init__(self, chunks):\n self.chunks = chunks\n\n def generate(self, writer):\n for chunk in self.chunks:\n chunk.generate(writer)\n\n def each_child(self):\n return self.chunks\n\n\nclass _NamedBlock(_Node):\n def __init__(self, name, body=None):\n self.name = name\n self.body = body\n\n def each_child(self):\n return (self.body,)\n\n def generate(self, writer):\n writer.named_blocks[self.name].generate(writer)\n\n def find_named_blocks(self, loader, named_blocks):\n named_blocks[self.name] = self.body\n _Node.find_named_blocks(self, loader, named_blocks)\n\n\nclass _ExtendsBlock(_Node):\n def __init__(self, name):\n self.name = name\n\n\nclass _IncludeBlock(_Node):\n def __init__(self, name, reader):\n self.name = name\n self.template_name = reader.name\n\n def find_named_blocks(self, loader, named_blocks):\n included = loader.load(self.name, self.template_name)\n included.file.find_named_blocks(loader, named_blocks)\n\n def generate(self, writer):\n included = writer.loader.load(self.name, self.template_name)\n old = writer.current_template\n writer.current_template = included\n included.file.body.generate(writer)\n writer.current_template = old\n\n\nclass _ApplyBlock(_Node):\n def __init__(self, method, body=None):\n self.method = method\n self.body = body\n\n def each_child(self):\n return (self.body,)\n\n def generate(self, writer):\n method_name = \"apply%d\" % writer.apply_counter\n writer.apply_counter += 1\n writer.write_line(\"def %s():\" % method_name)\n with writer.indent():\n writer.write_line(\"_buffer = []\")\n self.body.generate(writer)\n writer.write_line(\"return ''.join(_buffer)\")\n writer.write_line(\"_buffer.append(%s(%s()))\" % (\n self.method, method_name))\n\n\nclass _ControlBlock(_Node):\n def __init__(self, statement, body=None):\n self.statement = statement\n self.body = body\n\n def each_child(self):\n return (self.body,)\n\n def generate(self, writer):\n writer.write_line(\"%s:\" % self.statement)\n with writer.indent():\n self.body.generate(writer)\n\n\nclass _IntermediateControlBlock(_Node):\n def __init__(self, statement):\n self.statement = statement\n\n def generate(self, writer):\n writer.write_line(\"%s:\" % self.statement, writer.indent_size() - 1)\n\n\nclass _Statement(_Node):\n def __init__(self, statement):\n self.statement = statement\n\n def generate(self, writer):\n writer.write_line(self.statement)\n\n\nclass _Expression(_Node):\n def __init__(self, expression):\n self.expression = expression\n\n def generate(self, writer):\n writer.write_line(\"_tmp = %s\" % self.expression)\n writer.write_line(\"if isinstance(_tmp, str): _buffer.append(_tmp)\")\n writer.write_line(\"elif isinstance(_tmp, unicode): \"\n \"_buffer.append(_tmp.encode('utf-8'))\")\n writer.write_line(\"else: _buffer.append(str(_tmp))\")\n\n\nclass _Text(_Node):\n def __init__(self, value):\n self.value = value\n\n def generate(self, writer):\n value = self.value\n\n # Compress lots of white space to a single character. If the whitespace\n # breaks a line, have it continue to break a line, but just with a\n # single \\n character\n if writer.compress_whitespace and \"
    \" not in value:\n            value = re.sub(r\"([\\t ]+)\", \" \", value)\n            value = re.sub(r\"(\\s*\\n\\s*)\", \"\\n\", value)\n\n        if value:\n            writer.write_line('_buffer.append(%r)' % value)\n\n\nclass ParseError(Exception):\n    \"\"\"Raised for template syntax errors.\"\"\"\n    pass\n\n\nclass _CodeWriter(object):\n    def __init__(self, file, named_blocks, loader, current_template,\n                 compress_whitespace):\n        self.file = file\n        self.named_blocks = named_blocks\n        self.loader = loader\n        self.current_template = current_template\n        self.compress_whitespace = compress_whitespace\n        self.apply_counter = 0\n        self._indent = 0\n\n    def indent(self):\n        return self\n\n    def indent_size(self):\n        return self._indent\n\n    def __enter__(self):\n        self._indent += 1\n        return self\n\n    def __exit__(self, *args):\n        assert self._indent > 0\n        self._indent -= 1\n\n    def write_line(self, line, indent=None):\n        if indent == None:\n            indent = self._indent\n        for i in xrange(indent):\n            self.file.write(\"    \")\n        print >> self.file, line\n\n\nclass _TemplateReader(object):\n    def __init__(self, name, text):\n        self.name = name\n        self.text = text\n        self.line = 0\n        self.pos = 0\n\n    def find(self, needle, start=0, end=None):\n        assert start >= 0, start\n        pos = self.pos\n        start += pos\n        if end is None:\n            index = self.text.find(needle, start)\n        else:\n            end += pos\n            assert end >= start\n            index = self.text.find(needle, start, end)\n        if index != -1:\n            index -= pos\n        return index\n\n    def consume(self, count=None):\n        if count is None:\n            count = len(self.text) - self.pos\n        newpos = self.pos + count\n        self.line += self.text.count(\"\\n\", self.pos, newpos)\n        s = self.text[self.pos:newpos]\n        self.pos = newpos\n        return s\n\n    def remaining(self):\n        return len(self.text) - self.pos\n\n    def __len__(self):\n        return self.remaining()\n\n    def __getitem__(self, key):\n        if type(key) is slice:\n            size = len(self)\n            start, stop, step = key.indices(size)\n            if start is None:\n                start = self.pos\n            else:\n                start += self.pos\n\n            if stop is not None:\n                stop += self.pos\n\n            return self.text[slice(start, stop, step)]\n        elif key < 0:\n            return self.text[key]\n        else:\n            return self.text[self.pos + key]\n\n    def __str__(self):\n        return self.text[self.pos:]\n\n\ndef _format_code(code):\n    lines = code.splitlines()\n    format = \"%%%dd  %%s\\n\" % len(repr(len(lines) + 1))\n    return \"\".join([format % (i + 1, line) for (i, line) in enumerate(lines)])\n\n\ndef _parse(reader, in_block=None):\n    body = _ChunkList([])\n    while True:\n        # Find next template directive\n        curly = 0\n        while True:\n            curly = reader.find(\"{\", curly)\n            if curly == -1 or curly + 1 == reader.remaining():\n                # EOF\n                if in_block:\n                    raise ParseError(\"Missing {%% end %%} block for %s\" %\n                                     in_block)\n                body.chunks.append(_Text(reader.consume()))\n                return body\n            # If the first curly brace is not the start of a special token,\n            # start searching from the character after it\n            if reader[curly + 1] not in (\"{\", \"%\"):\n                curly += 1\n                continue\n            # When there are more than 2 curlies in a row, use the\n            # innermost ones.  This is useful when generating languages\n            # like latex where curlies are also meaningful\n            if (curly + 2 < reader.remaining() and\n                reader[curly + 1] == '{' and reader[curly + 2] == '{'):\n                curly += 1\n                continue\n            break\n\n        # Append any text before the special token\n        if curly > 0:\n            body.chunks.append(_Text(reader.consume(curly)))\n\n        start_brace = reader.consume(2)\n        line = reader.line\n\n        # Expression\n        if start_brace == \"{{\":\n            end = reader.find(\"}}\")\n            if end == -1 or reader.find(\"\\n\", 0, end) != -1:\n                raise ParseError(\"Missing end expression }} on line %d\" % line)\n            contents = reader.consume(end).strip()\n            reader.consume(2)\n            if not contents:\n                raise ParseError(\"Empty expression on line %d\" % line)\n            body.chunks.append(_Expression(contents))\n            continue\n\n        # Block\n        assert start_brace == \"{%\", start_brace\n        end = reader.find(\"%}\")\n        if end == -1 or reader.find(\"\\n\", 0, end) != -1:\n            raise ParseError(\"Missing end block %%} on line %d\" % line)\n        contents = reader.consume(end).strip()\n        reader.consume(2)\n        if not contents:\n            raise ParseError(\"Empty block tag ({%% %%}) on line %d\" % line)\n\n        operator, space, suffix = contents.partition(\" \")\n        suffix = suffix.strip()\n\n        # Intermediate (\"else\", \"elif\", etc) blocks\n        intermediate_blocks = {\n            \"else\": set([\"if\", \"for\", \"while\"]),\n            \"elif\": set([\"if\"]),\n            \"except\": set([\"try\"]),\n            \"finally\": set([\"try\"]),\n        }\n        allowed_parents = intermediate_blocks.get(operator)\n        if allowed_parents is not None:\n            if not in_block:\n                raise ParseError(\"%s outside %s block\" %\n                            (operator, allowed_parents))\n            if in_block not in allowed_parents:\n                raise ParseError(\"%s block cannot be attached to %s block\" % \\\n                    (operator, in_block))\n            body.chunks.append(_IntermediateControlBlock(contents))\n            continue\n\n        # End tag\n        elif operator == \"end\":\n            if not in_block:\n                raise ParseError(\"Extra {%% end %%} block on line %d\" % line)\n            return body\n\n        elif operator in (\"extends\", \"include\", \"set\", \"import\", \"comment\"):\n            if operator == \"comment\":\n                continue\n            if operator == \"extends\":\n                suffix = suffix.strip('\"').strip(\"'\")\n                if not suffix:\n                    raise ParseError(\"extends missing file path on line %d\" % \\\n                        line)\n                block = _ExtendsBlock(suffix)\n            elif operator == \"import\":\n                if not suffix:\n                    raise ParseError(\"import missing statement on line %d\" % \\\n                        line)\n                block = _Statement(contents)\n            elif operator == \"include\":\n                suffix = suffix.strip('\"').strip(\"'\")\n                if not suffix:\n                    raise ParseError(\"include missing file path on line %d\" % \\\n                        line)\n                block = _IncludeBlock(suffix, reader)\n            elif operator == \"set\":\n                if not suffix:\n                    raise ParseError(\"set missing statement on line %d\" % line)\n                block = _Statement(suffix)\n            body.chunks.append(block)\n            continue\n\n        elif operator in (\"apply\", \"block\", \"try\", \"if\", \"for\", \"while\"):\n            # parse inner body recursively\n            block_body = _parse(reader, operator)\n            if operator == \"apply\":\n                if not suffix:\n                    raise ParseError(\"apply missing method name on line %d\" % \\\n                        line)\n                block = _ApplyBlock(suffix, block_body)\n            elif operator == \"block\":\n                if not suffix:\n                    raise ParseError(\"block missing name on line %d\" % line)\n                block = _NamedBlock(suffix, block_body)\n            else:\n                block = _ControlBlock(contents, block_body)\n            body.chunks.append(block)\n            continue\n\n        else:\n            raise ParseError(\"unknown operator: %r\" % operator)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476131,"cells":{"repo_name":{"kind":"string","value":"loulich/Couchpotato"},"path":{"kind":"string","value":"libs/requests/packages/urllib3/request.py"},"copies":{"kind":"string","value":"853"},"size":{"kind":"string","value":"5751"},"content":{"kind":"string","value":"try:\n    from urllib.parse import urlencode\nexcept ImportError:\n    from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n    \"\"\"\n    Convenience mixin for classes who implement a :meth:`urlopen` method, such\n    as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n    :class:`~urllib3.poolmanager.PoolManager`.\n\n    Provides behavior for making common types of HTTP request methods and\n    decides which type of request field encoding to use.\n\n    Specifically,\n\n    :meth:`.request_encode_url` is for sending requests whose fields are\n    encoded in the URL (such as GET, HEAD, DELETE).\n\n    :meth:`.request_encode_body` is for sending requests whose fields are\n    encoded in the *body* of the request using multipart or www-form-urlencoded\n    (such as for POST, PUT, PATCH).\n\n    :meth:`.request` is for making any kind of request, it will look up the\n    appropriate encoding format and use one of the above two methods to make\n    the request.\n\n    Initializer parameters:\n\n    :param headers:\n        Headers to include with all requests, unless other headers are given\n        explicitly.\n    \"\"\"\n\n    _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n    def __init__(self, headers=None):\n        self.headers = headers or {}\n\n    def urlopen(self, method, url, body=None, headers=None,\n                encode_multipart=True, multipart_boundary=None,\n                **kw):  # Abstract\n        raise NotImplemented(\"Classes extending RequestMethods must implement \"\n                             \"their own ``urlopen`` method.\")\n\n    def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n        \"\"\"\n        Make a request using :meth:`urlopen` with the appropriate encoding of\n        ``fields`` based on the ``method`` used.\n\n        This is a convenience method that requires the least amount of manual\n        effort. It can be used in most situations, while still having the\n        option to drop down to more specific methods when necessary, such as\n        :meth:`request_encode_url`, :meth:`request_encode_body`,\n        or even the lowest level :meth:`urlopen`.\n        \"\"\"\n        method = method.upper()\n\n        if method in self._encode_url_methods:\n            return self.request_encode_url(method, url, fields=fields,\n                                           headers=headers,\n                                           **urlopen_kw)\n        else:\n            return self.request_encode_body(method, url, fields=fields,\n                                            headers=headers,\n                                            **urlopen_kw)\n\n    def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n        \"\"\"\n        Make a request using :meth:`urlopen` with the ``fields`` encoded in\n        the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n        \"\"\"\n        if fields:\n            url += '?' + urlencode(fields)\n        return self.urlopen(method, url, **urlopen_kw)\n\n    def request_encode_body(self, method, url, fields=None, headers=None,\n                            encode_multipart=True, multipart_boundary=None,\n                            **urlopen_kw):\n        \"\"\"\n        Make a request using :meth:`urlopen` with the ``fields`` encoded in\n        the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n        When ``encode_multipart=True`` (default), then\n        :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n        the payload with the appropriate content type. Otherwise\n        :meth:`urllib.urlencode` is used with the\n        'application/x-www-form-urlencoded' content type.\n\n        Multipart encoding must be used when posting files, and it's reasonably\n        safe to use it in other times too. However, it may break request\n        signing, such as with OAuth.\n\n        Supports an optional ``fields`` parameter of key/value strings AND\n        key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n        the MIME type is optional. For example::\n\n            fields = {\n                'foo': 'bar',\n                'fakefile': ('foofile.txt', 'contents of foofile'),\n                'realfile': ('barfile.txt', open('realfile').read()),\n                'typedfile': ('bazfile.bin', open('bazfile').read(),\n                              'image/jpeg'),\n                'nonamefile': 'contents of nonamefile field',\n            }\n\n        When uploading a file, providing a filename (the first parameter of the\n        tuple) is optional but recommended to best mimick behavior of browsers.\n\n        Note that if ``headers`` are supplied, the 'Content-Type' header will\n        be overwritten because it depends on the dynamic random boundary string\n        which is used to compose the body of the request. The random boundary\n        string can be explicitly set with the ``multipart_boundary`` parameter.\n        \"\"\"\n        if headers is None:\n            headers = self.headers\n\n        extra_kw = {'headers': {}}\n\n        if fields:\n            if 'body' in urlopen_kw:\n                raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n\n            if encode_multipart:\n                body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n            else:\n                body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n\n            extra_kw['body'] = body\n            extra_kw['headers'] = {'Content-Type': content_type}\n\n        extra_kw['headers'].update(headers)\n        extra_kw.update(urlopen_kw)\n\n        return self.urlopen(method, url, **extra_kw)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476132,"cells":{"repo_name":{"kind":"string","value":"amirrpp/django-oscar"},"path":{"kind":"string","value":"tests/unit/wishlist_tests.py"},"copies":{"kind":"string","value":"69"},"size":{"kind":"string","value":"1388"},"content":{"kind":"string","value":"from django.test import TestCase\n\nfrom oscar.apps.wishlists.models import WishList\nfrom oscar.core.compat import get_user_model\n\nUser = get_user_model()\n\n\nclass TestAWishlist(TestCase):\n\n    def test_can_generate_a_random_key(self):\n        key = WishList.random_key(6)\n        self.assertTrue(len(key) == 6)\n\n\nclass TestAPublicWishList(TestCase):\n\n    def setUp(self):\n        self.wishlist = WishList(visibility=WishList.PUBLIC)\n\n    def test_is_visible_to_anyone(self):\n        user = User()\n        self.assertTrue(self.wishlist.is_allowed_to_see(user))\n\n\nclass TestASharedWishList(TestCase):\n\n    def setUp(self):\n        self.wishlist = WishList(visibility=WishList.SHARED)\n\n    def test_is_visible_to_anyone(self):\n        user = User()\n        self.assertTrue(self.wishlist.is_allowed_to_see(user))\n\n\nclass TestAPrivateWishList(TestCase):\n\n    def setUp(self):\n        self.owner = User(id=1)\n        self.another_user = User(id=2)\n        self.wishlist = WishList(owner=self.owner)\n\n    def test_is_visible_only_to_its_owner(self):\n        self.assertTrue(self.wishlist.is_allowed_to_see(self.owner))\n        self.assertFalse(self.wishlist.is_allowed_to_see(self.another_user))\n\n    def test_can_only_be_edited_by_its_owner(self):\n        self.assertTrue(self.wishlist.is_allowed_to_edit(self.owner))\n        self.assertFalse(self.wishlist.is_allowed_to_edit(self.another_user))\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476133,"cells":{"repo_name":{"kind":"string","value":"yzl0083/orange"},"path":{"kind":"string","value":"Orange/OrangeCanvas/scheme/tests/test_annotations.py"},"copies":{"kind":"string","value":"26"},"size":{"kind":"string","value":"1426"},"content":{"kind":"string","value":"\"\"\"\nTests for scheme annotations.\n\n\"\"\"\n\nfrom ...gui import test\n\n\nfrom .. import SchemeArrowAnnotation, SchemeTextAnnotation\n\n\nclass TestAnnotations(test.QCoreApplication):\n    def test_arrow(self):\n        arrow = SchemeArrowAnnotation((0, 0), (10, 10))\n        self.assertTrue(arrow.start_pos == (0, 0))\n        self.assertTrue(arrow.end_pos == (10, 10))\n\n        def count():\n            count.i += 1\n        count.i = 0\n\n        arrow.geometry_changed.connect(count)\n        arrow.set_line((10, 10), (0, 0))\n        self.assertTrue(arrow.start_pos == (10, 10))\n        self.assertTrue(arrow.end_pos == (0, 0))\n        self.assertTrue(count.i == 1)\n\n    def test_text(self):\n        text = SchemeTextAnnotation((0, 0, 10, 100), \"--\")\n        self.assertEqual(text.rect, (0, 0, 10, 100))\n        self.assertEqual(text.text, \"--\")\n\n        def count():\n            count.i += 1\n        count.i = 0\n\n        text.geometry_changed.connect(count)\n        text.set_rect((9, 9, 30, 30))\n        self.assertEqual(text.rect, (9, 9, 30, 30))\n        self.assertEqual(count.i == 1)\n\n        text.rect = (4, 4, 4, 4)\n        self.assertEqual(count.i == 2)\n\n        count.i = 0\n        text.text_changed.connect(count)\n\n        text.set_text(\"...\")\n        self.assertEqual(text.text, \"...\")\n        self.assertTrue(count.i == 1)\n\n        text.text = '=='\n        self.assertEqual(text.text, \"--\")\n        self.assertTrue(count.i == 2)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476134,"cells":{"repo_name":{"kind":"string","value":"devosoft/Pepper"},"path":{"kind":"string","value":"tests/preprocessor_test.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14658"},"content":{"kind":"string","value":"# This file is a part of the Pepper project, https://github.com/devosoft/Pepper\n# (C) Michigan State University, under the MIT License\n# See LICENSE.txt for more information\n\nfrom bunch import Bunch\nfrom pathlib import Path\nfrom unittest.mock import MagicMock\nimport os\nimport pytest\nimport shutil\nimport subprocess\n\nimport pepper.symbol_table as symtable\nimport pepper.abstract_symbol_tree as ast\nimport pepper.preprocessor as preprocessor\n\nSOURCE_FILE_DIRECTORY = \"./tests/test_data/\"\nEXAMPLE_OUTPUT_DIRECTORY = \"./tests/test_data/output_examples/\"\n\n\nclass FakeFile():\n    def __init__(self, name, contents=None):\n        self.name = name\n        self.contents = contents if contents else []\n        self.index = 0\n\n    def readline(self):\n        if self.index >= len(self.contents):\n            return \"\"\n        else:\n            self.index += 1\n            return self.contents[self.index-1]\n\n    def close(self):\n        pass\n\n    def write(self, lines):\n        self.contents.extend(lines.split(\"\\n\"))\n\n    def get_contents(self):\n        return \"\\n\".join(self.contents)\n\n    def name(self):\n        return self.name\n\n\nclass FakeArgs():\n    def __init__(self):\n        self.input_file = None\n        self.output_file = None\n        self.trigger_internal_error = False\n        self.sys_include = False\n        self.debug = True\n\n\ndef preprocess_and_compare_functionally(source, reference, prebuilt_args_object=None):\n    args = None\n    if prebuilt_args_object:\n        args = prebuilt_args_object\n    else:\n        args = FakeArgs()\n\n    if args.input_file is None:\n        fake_input_file = None\n\n        with open(SOURCE_FILE_DIRECTORY + source, 'r') as sourcefile:\n            fake_input_file = FakeFile(f\"{SOURCE_FILE_DIRECTORY}{source}\", sourcefile.readlines())\n\n        args.input_file = fake_input_file\n\n    fake_output_file = FakeFile(f\"{source}.fake_output\")\n    args.output_file = fake_output_file\n\n    preprocessor.main(args)\n\n    if isinstance(reference, FakeFile):\n        assert(args.output_file.contents == reference.contents)\n    else:\n        with open(EXAMPLE_OUTPUT_DIRECTORY + reference) as reference_file:\n            assert(args.output_file.get_contents() == reference_file.read())\n\n\ndef reset_state():\n    symtable.TABLE = dict()\n    symtable.FILE_STACK = []\n    symtable.IFDEF_STACK = []\n    symtable.SYSTEM_INCLUDE_PATHS = []\n    symtable.EXPANDED_MACRO = False\n    symtable.TRIGGER_INTERNAL_ERROR = False\n    symtable.IF_COUNT = 0\n    symtable.IGNORED_FILE_PATHS = set()\n\ndef preprocess_and_compare(source, reference, tmpdir, supportfiles=[], optional_args=[]):\n    test_dir = tmpdir.mkdir('preprocessor')\n    # copy the test file to the test directory\n    shutil.copy(SOURCE_FILE_DIRECTORY + source, test_dir.realpath())\n\n    for entry in supportfiles:\n        shutil.copy(SOURCE_FILE_DIRECTORY + entry, test_dir.realpath())\n\n    call = [\"Pepper\"] + optional_args + [f\"{test_dir.realpath()}/{source}\"]\n\n    process = subprocess.run(call, timeout=2, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    assert(process.returncode == 0)\n    with open(f'{EXAMPLE_OUTPUT_DIRECTORY}{reference}', 'r') as expected_file:\n        with open(f\"{test_dir.realpath()}/{source}.preprocessed.cc\") as outfile:\n            assert(outfile.read() == expected_file.read())\n\n    assert(not process.stderr)\n\nclass TestUnit:\n    def setup_method(self, method):\n        reset_state()\n\n    def test_comments(self, tmpdir):\n        preprocess_and_compare_functionally('comments.cpp', 'comments.cpp.preprocessed.cc')\n\n    def test_nested_macro_expansion(self, tmpdir):\n        preprocess_and_compare_functionally('multiple_macros.cpp',\n                                            'multiple_macros.cpp.preprocessed.cc')\n\n    def test_function_and_macro_calls(self, tmpdir):\n        preprocess_and_compare_functionally('function_and_macro_calls.cpp',\n                                            'function_and_macro_calls.cpp.preprocessed.cc')\n\n    def test_function_and_macro_calls_2(self, tmpdir):\n        preprocess_and_compare_functionally('function_like_macro_2.cpp',\n                                            'function_like_macro_2.cpp.preprocessed.cc')\n\n    def test_basic_function_with_defaults_refactored(self, tmpdir):\n        preprocess_and_compare(\"file_include.cpp\",\n                               \"preprocessed_file_include.cpp\",\n                               tmpdir,\n                               ['SomeFile.h', 'SomeOtherFile.h'])\n\n    def test_ifdef_handling(self, tmpdir):\n        preprocess_and_compare_functionally('ifdef.cpp', 'ifdef.cpp.preprocessed.cc')\n\n    def test_for_loop_not_breaking_macros(self, tmpdir):\n        preprocess_and_compare_functionally(\"for_loop.cpp\", \"for_loop.cpp.preprocessed.cc\")\n\n    def test_variadic_macro_expansion(self, tmpdir):\n        ifile_contents = [\n            \"#define somemacro(a, b, moar...) a + b + mult(moar)\\n\",\n            \"int main {\\n\",\n            \"   cout << somemacro(1, 2, 3, 4, 5, 6) << endl;\\n\",\n            \"}\",\n        ]\n        expected_out = [\n            \"// Macro somemacro with args ['a', 'b', 'moar...'] expanding to 'a + b + mult(moar)'\", # NOQA\n            \"int main {\",\n            \"   cout << 1 + 2 + mult(3, 4, 5, 6) << endl;\",\n            \"}\",\n            \"\",\n        ]\n\n        args = FakeArgs()\n        args.input_file = FakeFile('variadic_expand.cc', ifile_contents)\n        expected_out_file = FakeFile('whatever', expected_out)\n\n        preprocess_and_compare_functionally(None, expected_out_file, args)\n\n    def test_system_file_include(self, tmpdir):\n        system_dir = tmpdir.mkdir('system_include_path')\n\n        args = FakeArgs()\n        args.sys_include = [system_dir.realpath()]\n        # copy some files to the tmpdir, then run search for them\n\n        shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeFile.h', f\"{system_dir.realpath()}/SomeFile.h\")\n        shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeOtherFile.h',\n                    f\"{system_dir.realpath()}/SomeOtherFile.h\")\n\n        preprocess_and_compare_functionally('systemish_include.cpp',\n                                            'systemish_include.cpp.preprocessed.cc',\n                                            args)\n\n    def test_include_path_search(self, tmpdir):\n        # copy some files to the tmpdir, then run search for them\n        test_dir = tmpdir.mkdir('include_path')\n        shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeFile.h', test_dir.realpath())\n        symtable.SYSTEM_INCLUDE_PATHS.append(str(test_dir.realpath()))\n\n        found = ast.PreprocessorIncludeNode.search_system_includes('SomeFile.h')\n        expected = str(Path(f\"{test_dir.realpath()}/{'SomeFile.h'}\"))\n\n        assert(found and (found == expected))\n\n        try:\n            found = ast.PreprocessorIncludeNode.search_system_includes('FileThatDoesNotExist.h')\n            assert(False and \"There should have been an OSError!\")\n        except OSError as err:\n            assert(\"Could not find file FileThatDoesNotExist.h in defined system include paths:\" in str(err)) # NOQA\n\n    def test_error_raised_for_bad_syntax(self, tmpdir):\n        test_dir = tmpdir.mkdir('preprocessor')\n        # copy the test file to the test directory\n        shutil.copy(SOURCE_FILE_DIRECTORY + \"error.cpp\", test_dir.realpath())\n\n        exception_raised = False\n        try:\n            # doesn't actually matter what the reference is\n            preprocess_and_compare_functionally('error.cpp', 'preprocessed_file_include.cpp')\n            assert(False and \"Should have had an exception thrown!\")\n        except symtable.PepperSyntaxError as err:\n            exception_raised = True\n\n        assert(exception_raised)\n\n    def test_internal_error_handling(self, tmpdir):\n        args = FakeArgs()\n        args.trigger_internal_error = True\n\n        exception_raised = False\n        try:\n            preprocess_and_compare_functionally('function_like_macro_2.cpp',\n                                                'function_like_macro_2.cpp.preprocessed.cc',\n                                                args)\n            assert(False and \"Should have had an exception thrown!\")\n        except symtable.PepperInternalError as err:\n            exception_raised = True\n\n        assert(exception_raised)\n\n    def test_if_basic_expressions(self, tmpdir):\n        preprocess_and_compare_functionally('if_expressions.cpp',\n                                            'if_expressions.cpp.preprocessed.cc')\n\n    def test_if_macro_calls(self, tmpdir):\n        preprocess_and_compare_functionally('if_macro_expressions.cpp',\n                                            'if_macro_expressions.cpp.preprocessed.cc')\n\n    def test_if_with_file_includes(self, tmpdir):\n        preprocess_and_compare(\"file_include_if.cpp\", \"file_include_if.preprocessed.cc\",\n                               tmpdir,\n                               ['SomeFile.h', 'SomeOtherFile.h'])\n\n    def test_error_raised_if_token_syntax(self, tmpdir):\n        in_contents = [\n            \"#define M1(a,b) a + b\\n\",\n            \"#if M1(12.2, 12.1 *0.23)\\n\",\n            \"#endif\"\n        ]\n\n        expected = [\"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"type_error.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        exception_raised = False\n        try:\n            # doesn't actually matter what the reference is\n            preprocess_and_compare_functionally(None, expected_file, args)\n            assert(False and \"Should have had an exception thrown!\")\n        except symtable.PepperSyntaxError as err:\n            exception_raised = True\n\n        assert(exception_raised)\n\n    def test_error_raised_macro_eval_syntax(self, tmpdir):\n        in_contents = [\n            \"#define M1(a,b) a and or and b\\n\",\n            \"#if M1(1, 2)\\n\",\n            \"#endif\"\n        ]\n\n        expected = [\"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"macro_error.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        exception_raised = False\n        try:\n            # doesn't actually matter what the reference is\n            preprocess_and_compare_functionally(None, expected_file, args)\n            assert(False and \"Should have had an exception thrown!\")\n        except symtable.PepperSyntaxError as err:\n            exception_raised = True\n\n        assert(exception_raised)\n\n    def test_error_directive_raised(self, tmpdir):\n        in_contents = [\n            \"#ifndef __M1__\\n\",\n            '#error \"This constant should be present!\"\\n',\n            \"#endif\"\n        ]\n\n        expected = [\"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"macro_error.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        exception_raised = False\n        try:\n            # doesn't actually matter what the reference is\n            preprocess_and_compare_functionally(None, expected_file, args)\n            assert(False and \"Should have had an exception thrown!\")\n        except ast.PreprocessorErrorNode.PepperCompileError as err:\n            exception_raised = True\n\n        assert(exception_raised)\n\n    def test_error_directive_not_raised(self, tmpdir):\n        in_contents = [\n            \"#ifdef __M1__\\n\",\n            '#error \"This constant shouldnt be present!\"\\n',\n            \"#endif\"\n        ]\n\n        expected = [\"// endif expression \", \"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"macro_error.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        preprocess_and_compare_functionally(None, expected_file, args)\n\n    def test_warning_directive_raised(self, tmpdir):\n        test_dir = tmpdir.mkdir('preprocessor')\n        source = \"warning.cpp\"\n        reference = source + \".preprocessed.cc\"\n        shutil.copy(SOURCE_FILE_DIRECTORY + source, test_dir.realpath())\n\n        call = [\"Pepper\"] + [f\"{test_dir.realpath()}/{source}\"]\n        process = subprocess.run(call, timeout=2, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        assert(process.returncode == 0)\n\n        with open(f'{EXAMPLE_OUTPUT_DIRECTORY}{reference}', 'r') as expected_file:\n            with open(f\"{test_dir.realpath()}/{source}.preprocessed.cc\") as outfile:\n                assert(outfile.read() == expected_file.read())\n\n        assert(process.stderr ==\n               b'\\nwarning.cpp:4 warning: \"WARN\"\\n\\nwarning.cpp:8 warning: \"WARN\"\\n')\n\n    def test_warning_directive_not_raised(self, tmpdir):\n        preprocess_and_compare(\"no_warning.cpp\", \"no_warning.cpp.preprocessed.cc\",\n                               tmpdir)\n\n    def test_pragma_once_handler(self, tmpdir):\n        assert \"once\" in symtable.PRAGMA_HANDLERS\n\n        # should techincally be 'r' but then it'd fail\n        symtable.FILE_STACK.append(open('./SomeFile.h', 'w'))\n        symtable.PRAGMA_HANDLERS[\"once\"]()\n\n        assert \"./SomeFile.h\" in symtable.IGNORED_FILE_PATHS\n        symtable.FILE_STACK = [Bunch(name=\"BaseFile.h\")]\n\n        include_node = ast.PreprocessorIncludeNode([\"'SomeFile.h'\"], False)\n        include_node.preprocess()\n\n        assert len(symtable.FILE_STACK) == 1\n\n        # Teardown\n\n        os.remove('./SomeFile.h')\n\n    def test_pragma_with_arguments(self, tmpdir):\n        mock_handler = MagicMock()\n        symtable.PRAGMA_HANDLERS['test'] = mock_handler\n\n        in_contents = [\n            \"#pragma test ('ragnlebalrgle testing wooo')\"\n        ]\n\n        expected = [\"\", \"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"arged_pragma.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        preprocess_and_compare_functionally(None, expected_file, args)\n\n        assert len(mock_handler.mock_calls) == 1\n\n    def test_unknown_pragma(self, tmpdir):\n\n        in_contents = [\n            \"#pragma unknwon ('ragnlebalrgle testing wooo')\"\n        ]\n\n        expected = [\"\", \"\"]\n\n        args = FakeArgs()\n        args.input_file = FakeFile(\"arged_pragma.cc\", in_contents)\n        expected_file = FakeFile(\"whatever\", expected)\n\n        with pytest.raises(symtable.PepperInternalError):\n            preprocess_and_compare_functionally(None, expected_file, args)\n\n    def test_pragma_preprocessor(self, tmpdir):\n        preprocess_and_compare(\"pragma_base.cc\", \"pragma_base.cc.preprocessed.cc\",\n                               tmpdir,\n                               ['pragma_include.h'])\n\n\nclass TestSystem:\n    def test_basic_function(self, tmpdir):\n        preprocess_and_compare(\"file_include.cpp\",\n                               \"preprocessed_file_include.cpp\",\n                               tmpdir,\n                               ['SomeFile.h', 'SomeOtherFile.h'])\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476135,"cells":{"repo_name":{"kind":"string","value":"knossos-project/knossos_python_tools"},"path":{"kind":"string","value":"setup.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"2581"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport setuptools\nfrom setuptools import find_packages, setup, Extension\nfrom pkg_resources import parse_version\n\n\n# Setuptools >=18.0 is needed for Cython to work correctly.\nif parse_version(setuptools.__version__) < parse_version('18.0'):\n    print('\\nYour installed Setuptools version is too old.')\n    print('Please upgrade it to at least 18.0, e.g. by running')\n    print('$ python{} -m pip install --upgrade setuptools'.format(sys.version_info[0]))\n    print('If this fails, try additionally passing the \"--user\" switch to the install command, or use Anaconda.')\n    sys.stdout.flush()\n    sys.exit(1)\n\ntry:\n    import numpy\nexcept ImportError:\n    print(\"Numpy not found. Please install Numpy manually: http://www.scipy.org/install.html\")\n    sys.stdout.flush()\n    sys.exit(1)\n\nextensions = [Extension(\n    \"knossos_utils.mergelist_tools\",\n    [\"knossos_utils/mergelist_tools.pyx\"],\n    include_dirs=[numpy.get_include()],\n    language=\"c++\",\n    extra_compile_args=[\"-std=c++0x\", \"-include\", \"cmath\"])\n]\n\ninstall_requires = [\n    \"cython>=0.23\",\n    \"h5py>=2.5\",\n    \"imageio\",\n    \"numpy>=1.10\",\n    \"scipy>=0.16\",\n    \"networkx>=1.11\",\n    \"requests>=2.12\",\n    \"matplotlib\",\n    \"Pillow\"\n]\n\n\ndef read(fname):\n    return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n    name=\"knossos_utils\",\n    version=\"0.1\",\n    description=\"Tools for generating or manipulating knossos datasets and annotation files\",\n    author=\"Sven Dorkenwald, KNOSSOS team\",\n    author_email=\"knossos-team@mpimf-heidelberg.mpg.de\",\n    url=\"https://github.com/knossos-project/knossos_utils\",\n    license=\"GPL\",\n    long_description=read(\"README.md\"),\n    packages=find_packages(),\n    data_files=[(\"\", [\"LICENSE\"])],\n    ext_modules=extensions,\n    setup_requires=[\n        \"cython>=0.23\",\n    ],\n    install_requires=install_requires,\n    extras_require={\n        \"snappy\": [\"python-snappy>=0.5\"],\n        # \"skeletopyze\": only needed for importing skeletopyze skeletons. See https://github.com/funkey/skeletopyze\n    },\n    classifiers=[\n        'Development Status :: 4 - Beta',\n        'Environment :: Console',\n        'Intended Audience :: Science/Research',\n        'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n        'Operating System :: POSIX :: Linux',\n        'Programming Language :: Python :: 2.7',\n        'Programming Language :: Python :: 3',\n        'Topic :: Scientific/Engineering :: Information Analysis',\n        'Topic :: Scientific/Engineering :: Visualization',\n    ],\n)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476136,"cells":{"repo_name":{"kind":"string","value":"unsiloai/syntaxnet-ops-hack"},"path":{"kind":"string","value":"tensorflow/contrib/keras/python/keras/regularizers.py"},"copies":{"kind":"string","value":"58"},"size":{"kind":"string","value":"2778"},"content":{"kind":"string","value":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras built-in regularizers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.contrib.keras.python.keras import backend as K\nfrom tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object\nfrom tensorflow.contrib.keras.python.keras.utils.generic_utils import serialize_keras_object\n\n\nclass Regularizer(object):\n  \"\"\"Regularizer base class.\n  \"\"\"\n\n  def __call__(self, x):\n    return 0.\n\n  @classmethod\n  def from_config(cls, config):\n    return cls(**config)\n\n\nclass L1L2(Regularizer):\n  \"\"\"Regularizer for L1 and L2 regularization.\n\n  Arguments:\n      l1: Float; L1 regularization factor.\n      l2: Float; L2 regularization factor.\n  \"\"\"\n\n  def __init__(self, l1=0., l2=0.):  # pylint: disable=redefined-outer-name\n    self.l1 = K.cast_to_floatx(l1)\n    self.l2 = K.cast_to_floatx(l2)\n\n  def __call__(self, x):\n    regularization = 0.\n    if self.l1:\n      regularization += K.sum(self.l1 * K.abs(x))\n    if self.l2:\n      regularization += K.sum(self.l2 * K.square(x))\n    return regularization\n\n  def get_config(self):\n    return {'l1': float(self.l1), 'l2': float(self.l2)}\n\n\n# Aliases.\n\n\ndef l1(l=0.01):\n  return L1L2(l1=l)\n\n\ndef l2(l=0.01):\n  return L1L2(l2=l)\n\n\ndef l1_l2(l1=0.01, l2=0.01):  # pylint: disable=redefined-outer-name\n  return L1L2(l1=l1, l2=l2)\n\n\ndef serialize(regularizer):\n  return serialize_keras_object(regularizer)\n\n\ndef deserialize(config, custom_objects=None):\n  return deserialize_keras_object(\n      config,\n      module_objects=globals(),\n      custom_objects=custom_objects,\n      printable_module_name='regularizer')\n\n\ndef get(identifier):\n  if identifier is None:\n    return None\n  if isinstance(identifier, dict):\n    return deserialize(identifier)\n  elif isinstance(identifier, six.string_types):\n    config = {'class_name': str(identifier), 'config': {}}\n    return deserialize(config)\n  elif callable(identifier):\n    return identifier\n  else:\n    raise ValueError('Could not interpret regularizer identifier:', identifier)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476137,"cells":{"repo_name":{"kind":"string","value":"raw1z/ultisnips"},"path":{"kind":"string","value":"pythonx/UltiSnips/snippet/parsing/_base.py"},"copies":{"kind":"string","value":"21"},"size":{"kind":"string","value":"2505"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"Common functionality of the snippet parsing codes.\"\"\"\n\nfrom UltiSnips.position import Position\nfrom UltiSnips.snippet.parsing._lexer import tokenize, TabStopToken\nfrom UltiSnips.text_objects import TabStop\n\nfrom UltiSnips.text_objects import  Mirror\nfrom UltiSnips.snippet.parsing._lexer import MirrorToken\n\n\ndef resolve_ambiguity(all_tokens, seen_ts):\n    \"\"\"$1 could be a Mirror or a TabStop.\n\n    This figures this out.\n\n    \"\"\"\n    for parent, token in all_tokens:\n        if isinstance(token, MirrorToken):\n            if token.number not in seen_ts:\n                seen_ts[token.number] = TabStop(parent, token)\n            else:\n                Mirror(parent, seen_ts[token.number], token)\n\n\ndef tokenize_snippet_text(snippet_instance, text, indent,\n                          allowed_tokens_in_text, allowed_tokens_in_tabstops,\n                          token_to_textobject):\n    \"\"\"Turns 'text' into a stream of tokens and creates the text objects from\n    those tokens that are mentioned in 'token_to_textobject' assuming the\n    current 'indent'.\n\n    The 'allowed_tokens_in_text' define which tokens will be recognized\n    in 'text' while 'allowed_tokens_in_tabstops' are the tokens that\n    will be recognized in TabStop placeholder text.\n\n    \"\"\"\n    seen_ts = {}\n    all_tokens = []\n\n    def _do_parse(parent, text, allowed_tokens):\n        \"\"\"Recursive function that actually creates the objects.\"\"\"\n        tokens = list(tokenize(text, indent, parent.start, allowed_tokens))\n        for token in tokens:\n            all_tokens.append((parent, token))\n            if isinstance(token, TabStopToken):\n                ts = TabStop(parent, token)\n                seen_ts[token.number] = ts\n                _do_parse(ts, token.initial_text,\n                          allowed_tokens_in_tabstops)\n            else:\n                klass = token_to_textobject.get(token.__class__, None)\n                if klass is not None:\n                    klass(parent, token)\n    _do_parse(snippet_instance, text, allowed_tokens_in_text)\n    return all_tokens, seen_ts\n\n\ndef finalize(all_tokens, seen_ts, snippet_instance):\n    \"\"\"Adds a tabstop 0 if non is in 'seen_ts' and brings the text of the\n    snippet instance into Vim.\"\"\"\n    if 0 not in seen_ts:\n        mark = all_tokens[-1][1].end  # Last token is always EndOfText\n        m1 = Position(mark.line, mark.col)\n        TabStop(snippet_instance, 0, mark, m1)\n    snippet_instance.replace_initial_text()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476138,"cells":{"repo_name":{"kind":"string","value":"pizzathief/scipy"},"path":{"kind":"string","value":"scipy/integrate/tests/test_quadrature.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"8426"},"content":{"kind":"string","value":"import numpy as np\nfrom numpy import cos, sin, pi\nfrom numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,\n                           assert_, suppress_warnings)\n\nfrom scipy.integrate import (quadrature, romberg, romb, newton_cotes,\n                             cumtrapz, quad, simps, fixed_quad,\n                             AccuracyWarning)\n\n\nclass TestFixedQuad(object):\n    def test_scalar(self):\n        n = 4\n        func = lambda x: x**(2*n - 1)\n        expected = 1/(2*n)\n        got, _ = fixed_quad(func, 0, 1, n=n)\n        # quadrature exact for this input\n        assert_allclose(got, expected, rtol=1e-12)\n\n    def test_vector(self):\n        n = 4\n        p = np.arange(1, 2*n)\n        func = lambda x: x**p[:,None]\n        expected = 1/(p + 1)\n        got, _ = fixed_quad(func, 0, 1, n=n)\n        assert_allclose(got, expected, rtol=1e-12)\n\n\nclass TestQuadrature(object):\n    def quad(self, x, a, b, args):\n        raise NotImplementedError\n\n    def test_quadrature(self):\n        # Typical function with two extra arguments:\n        def myfunc(x, n, z):       # Bessel function integrand\n            return cos(n*x-z*sin(x))/pi\n        val, err = quadrature(myfunc, 0, pi, (2, 1.8))\n        table_val = 0.30614353532540296487\n        assert_almost_equal(val, table_val, decimal=7)\n\n    def test_quadrature_rtol(self):\n        def myfunc(x, n, z):       # Bessel function integrand\n            return 1e90 * cos(n*x-z*sin(x))/pi\n        val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)\n        table_val = 1e90 * 0.30614353532540296487\n        assert_allclose(val, table_val, rtol=1e-10)\n\n    def test_quadrature_miniter(self):\n        # Typical function with two extra arguments:\n        def myfunc(x, n, z):       # Bessel function integrand\n            return cos(n*x-z*sin(x))/pi\n        table_val = 0.30614353532540296487\n        for miniter in [5, 52]:\n            val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)\n            assert_almost_equal(val, table_val, decimal=7)\n            assert_(err < 1.0)\n\n    def test_quadrature_single_args(self):\n        def myfunc(x, n):\n            return 1e90 * cos(n*x-1.8*sin(x))/pi\n        val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)\n        table_val = 1e90 * 0.30614353532540296487\n        assert_allclose(val, table_val, rtol=1e-10)\n\n    def test_romberg(self):\n        # Typical function with two extra arguments:\n        def myfunc(x, n, z):       # Bessel function integrand\n            return cos(n*x-z*sin(x))/pi\n        val = romberg(myfunc, 0, pi, args=(2, 1.8))\n        table_val = 0.30614353532540296487\n        assert_almost_equal(val, table_val, decimal=7)\n\n    def test_romberg_rtol(self):\n        # Typical function with two extra arguments:\n        def myfunc(x, n, z):       # Bessel function integrand\n            return 1e19*cos(n*x-z*sin(x))/pi\n        val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)\n        table_val = 1e19*0.30614353532540296487\n        assert_allclose(val, table_val, rtol=1e-10)\n\n    def test_romb(self):\n        assert_equal(romb(np.arange(17)), 128)\n\n    def test_romb_gh_3731(self):\n        # Check that romb makes maximal use of data points\n        x = np.arange(2**4+1)\n        y = np.cos(0.2*x)\n        val = romb(y)\n        val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())\n        assert_allclose(val, val2, rtol=1e-8, atol=0)\n\n        # should be equal to romb with 2**k+1 samples\n        with suppress_warnings() as sup:\n            sup.filter(AccuracyWarning, \"divmax .4. exceeded\")\n            val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)\n        assert_allclose(val, val3, rtol=1e-12, atol=0)\n\n    def test_non_dtype(self):\n        # Check that we work fine with functions returning float\n        import math\n        valmath = romberg(math.sin, 0, 1)\n        expected_val = 0.45969769413185085\n        assert_almost_equal(valmath, expected_val, decimal=7)\n\n    def test_newton_cotes(self):\n        \"\"\"Test the first few degrees, for evenly spaced points.\"\"\"\n        n = 1\n        wts, errcoff = newton_cotes(n, 1)\n        assert_equal(wts, n*np.array([0.5, 0.5]))\n        assert_almost_equal(errcoff, -n**3/12.0)\n\n        n = 2\n        wts, errcoff = newton_cotes(n, 1)\n        assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)\n        assert_almost_equal(errcoff, -n**5/2880.0)\n\n        n = 3\n        wts, errcoff = newton_cotes(n, 1)\n        assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)\n        assert_almost_equal(errcoff, -n**5/6480.0)\n\n        n = 4\n        wts, errcoff = newton_cotes(n, 1)\n        assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)\n        assert_almost_equal(errcoff, -n**7/1935360.0)\n\n    def test_newton_cotes2(self):\n        \"\"\"Test newton_cotes with points that are not evenly spaced.\"\"\"\n\n        x = np.array([0.0, 1.5, 2.0])\n        y = x**2\n        wts, errcoff = newton_cotes(x)\n        exact_integral = 8.0/3\n        numeric_integral = np.dot(wts, y)\n        assert_almost_equal(numeric_integral, exact_integral)\n\n        x = np.array([0.0, 1.4, 2.1, 3.0])\n        y = x**2\n        wts, errcoff = newton_cotes(x)\n        exact_integral = 9.0\n        numeric_integral = np.dot(wts, y)\n        assert_almost_equal(numeric_integral, exact_integral)\n\n    def test_simps(self):\n        y = np.arange(17)\n        assert_equal(simps(y), 128)\n        assert_equal(simps(y, dx=0.5), 64)\n        assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)\n\n        y = np.arange(4)\n        x = 2**y\n        assert_equal(simps(y, x=x, even='avg'), 13.875)\n        assert_equal(simps(y, x=x, even='first'), 13.75)\n        assert_equal(simps(y, x=x, even='last'), 14)\n\n\nclass TestCumtrapz(object):\n    def test_1d(self):\n        x = np.linspace(-2, 2, num=5)\n        y = x\n        y_int = cumtrapz(y, x, initial=0)\n        y_expected = [0., -1.5, -2., -1.5, 0.]\n        assert_allclose(y_int, y_expected)\n\n        y_int = cumtrapz(y, x, initial=None)\n        assert_allclose(y_int, y_expected[1:])\n\n    def test_y_nd_x_nd(self):\n        x = np.arange(3 * 2 * 4).reshape(3, 2, 4)\n        y = x\n        y_int = cumtrapz(y, x, initial=0)\n        y_expected = np.array([[[0., 0.5, 2., 4.5],\n                                [0., 4.5, 10., 16.5]],\n                               [[0., 8.5, 18., 28.5],\n                                [0., 12.5, 26., 40.5]],\n                               [[0., 16.5, 34., 52.5],\n                                [0., 20.5, 42., 64.5]]])\n\n        assert_allclose(y_int, y_expected)\n\n        # Try with all axes\n        shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]\n        for axis, shape in zip([0, 1, 2], shapes):\n            y_int = cumtrapz(y, x, initial=3.45, axis=axis)\n            assert_equal(y_int.shape, (3, 2, 4))\n            y_int = cumtrapz(y, x, initial=None, axis=axis)\n            assert_equal(y_int.shape, shape)\n\n    def test_y_nd_x_1d(self):\n        y = np.arange(3 * 2 * 4).reshape(3, 2, 4)\n        x = np.arange(4)**2\n        # Try with all axes\n        ys_expected = (\n            np.array([[[4., 5., 6., 7.],\n                       [8., 9., 10., 11.]],\n                      [[40., 44., 48., 52.],\n                       [56., 60., 64., 68.]]]),\n            np.array([[[2., 3., 4., 5.]],\n                      [[10., 11., 12., 13.]],\n                      [[18., 19., 20., 21.]]]),\n            np.array([[[0.5, 5., 17.5],\n                       [4.5, 21., 53.5]],\n                      [[8.5, 37., 89.5],\n                       [12.5, 53., 125.5]],\n                      [[16.5, 69., 161.5],\n                       [20.5, 85., 197.5]]]))\n\n        for axis, y_expected in zip([0, 1, 2], ys_expected):\n            y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)\n            assert_allclose(y_int, y_expected)\n\n    def test_x_none(self):\n        y = np.linspace(-2, 2, num=5)\n\n        y_int = cumtrapz(y)\n        y_expected = [-1.5, -2., -1.5, 0.]\n        assert_allclose(y_int, y_expected)\n\n        y_int = cumtrapz(y, initial=1.23)\n        y_expected = [1.23, -1.5, -2., -1.5, 0.]\n        assert_allclose(y_int, y_expected)\n\n        y_int = cumtrapz(y, dx=3)\n        y_expected = [-4.5, -6., -4.5, 0.]\n        assert_allclose(y_int, y_expected)\n\n        y_int = cumtrapz(y, dx=3, initial=1.23)\n        y_expected = [1.23, -4.5, -6., -4.5, 0.]\n        assert_allclose(y_int, y_expected)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476139,"cells":{"repo_name":{"kind":"string","value":"epssy/hue"},"path":{"kind":"string","value":"apps/jobbrowser/src/jobbrowser/models.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"22542"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport logging\nimport lxml.html\nimport re\nimport urllib2\n\nfrom urlparse import urlparse, urlunparse\n\nfrom django.core.urlresolvers import reverse\nfrom desktop.lib.view_util import format_duration_in_millis\nfrom desktop.lib import i18n\nfrom django.utils.html import escape\nfrom filebrowser.views import location_to_url\nfrom hadoop import job_tracker\nfrom hadoop import confparse\nfrom hadoop.api.jobtracker.ttypes import JobNotFoundException\n\nimport hadoop.api.jobtracker.ttypes as ttypes\nfrom desktop.lib.exceptions_renderable import PopupException\n\nfrom django.utils.translation import ugettext as _\nfrom jobbrowser.conf import DISABLE_KILLING_JOBS\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef can_view_job(username, job):\n  acl = get_acls(job).get('mapreduce.job.acl-view-job', '')\n  return acl == '*' or username in acl.split(',')\n\ndef can_modify_job(username, job):\n  acl = get_acls(job).get('mapreduce.job.acl-modify-job', '')\n  return acl == '*' or username in acl.split(',')\n\ndef get_acls(job):\n  if job.is_mr2:\n    return job.acls\n  else:\n    return job.full_job_conf\n\ndef can_kill_job(self, user):\n  if DISABLE_KILLING_JOBS.get():\n    return False\n\n  if self.status.lower() not in ('running', 'pending', 'accepted'):\n    return False\n\n  if user.is_superuser:\n    return True\n\n  if can_modify_job(user.username, self):\n    return True\n\n  return user.username == self.user\n\nclass JobLinkage(object):\n  \"\"\"\n  A thin representation of a job, without much of the details.\n  Its purpose is to wrap a JobID to allow us to get further\n  information from Hadoop, without instantiating a full Job object\n  (which requires talking to Hadoop).\n  \"\"\"\n  def __init__(self, jobtracker, jobid):\n    \"\"\"\n    JobLinkage(jobtracker, jobid) -> JobLinkage\n    The jobid is the jobid string (not the thrift jobid)\n    \"\"\"\n    self._jobtracker = jobtracker\n    self.jobId = jobid\n    self.jobId_short = \"_\".join(jobid.split(\"_\")[-2:])\n    self.is_mr2 = False\n\n  def get_task(self, task_id):\n    \"\"\"Retrieve a TaskInProgress from hadoop.\"\"\"\n    ttask = self._jobtracker.get_task(\n                    self._jobtracker.thriftjobid_from_string(self.jobId),\n                    self._jobtracker.thrifttaskid_from_string(task_id))\n    return Task(ttask, self._jobtracker)\n\nclass Job(JobLinkage):\n  \"\"\"\n  Creates a Job instance pulled from the job tracker Thrift interface.\n  \"\"\"\n\n  def __getitem__(self, item):\n    \"\"\"\n    For backwards-compatibility, resolve job[\"foo\"] as job.foo\n    \"\"\"\n    return getattr(self, item)\n\n  @staticmethod\n  def from_id(jt, jobid, is_finished=False):\n    \"\"\"\n      Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically\n      located in request.jt.\n    \"\"\"\n    try:\n      thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))\n    except JobNotFoundException:\n      try:\n        thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))\n      except JobNotFoundException, e:\n        raise PopupException(_(\"Could not find job with id %(jobid)s.\") % {'jobid': jobid}, detail=e)\n\n    return Job(jt, thriftjob)\n\n  @staticmethod\n  def from_thriftjob(jt, thriftjob):\n    \"\"\"\n      Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.\n      The job tracker interface is typically located in request.jt\n    \"\"\"\n    return Job(jt, thriftjob)\n\n  def __init__(self, jt, thriftJob):\n    \"\"\"\n    Returns a Job instance given a job tracker interface and a thriftjob object returned from that\n    job tracker interface.  The job tracker interface is typically located in request.jt\n    \"\"\"\n    JobLinkage.__init__(self, jt, thriftJob.jobID.asString)\n    self.jt = jt\n    self.job = thriftJob\n    self.tasks = []\n    if self.job.tasks is not None:\n      self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)\n\n    self.task_map = dict( (task.taskId, task) for task in self.tasks )\n    self._counters = None\n    self._conf_keys = None\n    self._full_job_conf = None\n    self._init_attributes()\n    self.is_retired = hasattr(thriftJob, 'is_retired')\n    self.is_mr2 = False\n    self.applicationType = 'MR2'\n\n  @property\n  def counters(self):\n    if self.is_retired:\n      self._counters = {}\n    elif self._counters is None:\n      rollups = self.jt.get_job_counter_rollups(self.job.jobID)\n      # We get back a structure with counter lists for maps, reduces, and total\n      # and we need to invert this\n\n      def aggregate_counters(ctrs_from_jt, key, target):\n        for group in ctrs_from_jt.groups:\n          if group.name not in target:\n            target[group.name] = {\n              'name': group.name,\n              'displayName': group.displayName,\n              'counters': {}\n              }\n          agg_counters = target[group.name]['counters']\n          for counter in group.counters.itervalues():\n            if counter.name not in agg_counters:\n              agg_counters[counter.name] = {\n                'name': counter.name,\n                'displayName': counter.displayName,\n                }\n            agg_counters[counter.name][key] = counter.value\n\n      self._counters = {}\n      aggregate_counters(rollups.mapCounters, \"map\", self._counters)\n      aggregate_counters(rollups.reduceCounters, \"reduce\", self._counters)\n      aggregate_counters(rollups.jobCounters, \"total\", self._counters)\n    return self._counters\n\n  @property\n  def conf_keys(self):\n    if self._conf_keys is None:\n      self._initialize_conf_keys()\n    return self._conf_keys\n\n  @property\n  def full_job_conf(self):\n    if self._full_job_conf is None:\n      self._initialize_conf_keys()\n    return self._full_job_conf\n\n  def _init_attributes(self):\n    self.queueName = i18n.smart_unicode(self.job.profile.queueName)\n    self.jobName = i18n.smart_unicode(self.job.profile.name)\n    self.user = i18n.smart_unicode(self.job.profile.user)\n    self.mapProgress = self.job.status.mapProgress\n    self.reduceProgress = self.job.status.reduceProgress\n    self.setupProgress = self.job.status.setupProgress\n    self.cleanupProgress = self.job.status.cleanupProgress\n\n    if self.job.desiredMaps == 0:\n      maps_percent_complete = 0\n    else:\n      maps_percent_complete = int(round(float(self.job.finishedMaps) / self.job.desiredMaps * 100))\n\n    self.desiredMaps = self.job.desiredMaps\n\n    if self.job.desiredReduces == 0:\n      reduces_percent_complete = 0\n    else:\n      reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))\n\n    self.desiredReduces = self.job.desiredReduces\n    self.maps_percent_complete = maps_percent_complete\n    self.finishedMaps = self.job.finishedMaps\n    self.finishedReduces = self.job.finishedReduces\n    self.reduces_percent_complete = reduces_percent_complete\n    self.startTimeMs = self.job.startTime\n    self.startTimeFormatted = format_unixtime_ms(self.job.startTime)\n    self.launchTimeMs = self.job.launchTime\n    self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)\n\n    self.finishTimeMs = self.job.finishTime\n    self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)\n    self.status = self.job.status.runStateAsString\n    self.priority = self.job.priorityAsString\n    self.jobFile = self.job.profile.jobFile\n\n    finishTime = self.job.finishTime\n    if finishTime == 0:\n      finishTime = datetime.datetime.now()\n    else:\n      finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)\n    self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)\n\n    diff = int(finishTime.strftime(\"%s\")) * 1000 - self.startTimeMs\n    self.durationFormatted = format_duration_in_millis(diff)\n    self.durationInMillis = diff\n\n  def kill(self):\n    self.jt.kill_job(self.job.jobID)\n\n  def get_task(self, id):\n    try:\n      return self.task_map[id]\n    except KeyError:\n      return JobLinkage.get_task(self, id)\n\n  def filter_tasks(self, task_types=None, task_states=None, task_text=None):\n    \"\"\"\n    Filters the tasks of the job.\n    Pass in task_type and task_state as sets; None for \"all\".\n    task_text is used to search in the state, mostRecentState, and the ID.\n    \"\"\"\n    assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)\n    assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)\n\n    def is_good_match(t):\n      if task_types is not None:\n        if t.task.taskID.taskTypeAsString.lower() not in task_types:\n          return False\n\n      if task_states is not None:\n        if t.state.lower() not in task_states:\n          return False\n\n      if task_text is not None:\n        tt_lower = task_text.lower()\n        if tt_lower not in t.state.lower() and tt_lower not in t.mostRecentState.lower() and tt_lower not in t.task.taskID.asString.lower():\n          return False\n\n      return True\n\n    return [ t for t in self.tasks if is_good_match(t) ]\n\n  def _initialize_conf_keys(self):\n    if self.is_retired:\n      self._conf_keys = {}\n      self._full_job_conf = {}\n    else:\n      conf_keys = [\n        'mapred.mapper.class',\n        'mapred.reducer.class',\n        'mapred.input.format.class',\n        'mapred.output.format.class',\n        'mapred.input.dir',\n        'mapred.output.dir',\n        ]\n      jobconf = get_jobconf(self.jt, self.jobId)\n      self._full_job_conf = jobconf\n      self._conf_keys = {}\n      for k, v in jobconf.iteritems():\n        if k in conf_keys:\n          self._conf_keys[dots_to_camel_case(k)] = v\n\n\nclass TaskList(object):\n  @staticmethod\n  def select(jt, jobid, task_types, task_states, text, count, offset):\n    \"\"\"\n    select(jt, jobid, task_types, task_states, text, count, offset) -> TaskList\n\n    Retrieve a TaskList from Hadoop according to the given criteria.\n    task_types is a set of job_tracker.VALID_TASK_TYPES. A value to None means everything.\n    task_states is a set of job_tracker.VALID_TASK_STATES. A value to None means everything.\n    \"\"\"\n    assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)\n    assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)\n\n    if task_types is None:\n      task_types = job_tracker.VALID_TASK_TYPES\n    if task_states is None:\n      task_states = job_tracker.VALID_TASK_STATES\n\n    tjobid = jt.thriftjobid_from_string(jobid)\n    thrift_list = jt.get_task_list(tjobid, task_types, task_states, text, count, offset)\n    return TaskList.from_thriftTaskList(thrift_list, jt)\n\n  @staticmethod\n  def from_thriftTaskList(thrift_task_list, jobtracker):\n    \"\"\"TaskList.from_thriftTaskList(thrift_task_list, jobtracker) -> TaskList\n    \"\"\"\n    if thrift_task_list is None:\n      return None\n    return TaskList(thrift_task_list, jobtracker)\n\n  def __init__(self, tasklist, jobtracker):\n    self.__tasklist = tasklist                  # The thrift task list\n    self.__jt = jobtracker\n    self.__init_attributes()\n\n  def __init_attributes(self):\n    self.__tasksSoFar = [ Task(t, self.__jt) for t in self.__tasklist.tasks ]\n    self.__nTotalTasks = self.__tasklist.numTotalTasks\n\n  def __iter__(self):\n    return self.__tasksSoFar.__iter__()\n\n  def __len__(self):\n    return len(self.__tasksSoFar)\n\n  def __getitem__(self, key):\n    return self.__tasksSoFar[key]\n\n  @property\n  def tasks(self):\n    return self.__tasksSoFar\n\n  @property\n  def numTotalTasks(self):\n    return self.__nTotalTasks\n\n\nclass Task(object):\n\n  def __getitem__(self, item):\n    \"\"\"\n    For backwards-compatibility, resolve job[\"foo\"] as job.foo\n    \"\"\"\n    return getattr(self, item)\n\n  def __init__(self, task, jt):\n    self.task = task\n    self.jt = jt\n    self._init_attributes()\n\n    self.attempt_map = {}\n    for id, attempt in self.task.taskStatuses.iteritems():\n      ta = TaskAttempt(attempt, task=self)\n      self.attempt_map[id] = ta\n\n  @property\n  def attempts(self):\n    return self.attempt_map.values()\n\n  def _init_attributes(self):\n    self.taskType = self.task.taskID.taskTypeAsString\n    self.taskId = self.task.taskID.asString\n    self.taskId_short = \"_\".join(self.taskId.split(\"_\")[-2:])\n    self.startTimeMs = self.task.startTime\n    self.startTimeFormatted = format_unixtime_ms(self.task.startTime)\n    self.execStartTimeMs = self.task.execStartTime\n    self.execStartTimeFormatted = format_unixtime_ms(self.task.execStartTime)\n    self.execFinishTimeMs = self.task.execFinishTime\n    self.execFinishTimeFormatted = format_unixtime_ms(self.task.execFinishTime)\n    self.state = self.task.state\n    assert self.state in job_tracker.VALID_TASK_STATES\n    self.progress = self.task.progress\n    self.taskId = self.task.taskID.asString\n    self.jobId = self.task.taskID.jobID.asString\n    self.taskAttemptIds = self.task.taskStatuses.keys()\n    self.mostRecentState = self.task.mostRecentState\n    self.diagnosticMap = self.task.taskDiagnosticData\n    self.counters = self.task.counters\n    self.failed = self.task.failed\n    self.complete = self.task.complete\n    self.is_mr2 = False\n\n  def get_attempt(self, id):\n    \"\"\"\n    Returns a TaskAttempt for a given id.\n    \"\"\"\n    return self.attempt_map[id]\n\n\nclass TaskAttempt(object):\n\n  def __getitem__(self, item):\n    \"\"\"\n    For backwards-compatibility, resolve task[\"foo\"] as task.foo.\n    \"\"\"\n    return getattr(self, item)\n\n  def __init__(self, task_attempt, task):\n    assert task_attempt is not None\n    self.task_attempt = task_attempt\n    self.task = task\n    self._init_attributes();\n\n  def _init_attributes(self):\n    self.taskType = self.task_attempt.taskID.taskID.taskTypeAsString\n    self.attemptId = self.task_attempt.taskID.asString\n    self.attemptId_short = \"_\".join(self.attemptId.split(\"_\")[-2:])\n    self.startTimeMs = self.task_attempt.startTime\n    self.startTimeFormatted = format_unixtime_ms(self.task_attempt.startTime)\n    self.finishTimeMs = self.task_attempt.finishTime\n    self.finishTimeFormatted = format_unixtime_ms(self.task_attempt.finishTime)\n    self.state = self.task_attempt.stateAsString.lower()\n    self.taskTrackerId = self.task_attempt.taskTracker\n    self.phase = self.task_attempt.phaseAsString\n    self.progress = self.task_attempt.progress\n    self.outputSize = self.task_attempt.outputSize\n    self.shuffleFinishTimeMs = self.task_attempt.shuffleFinishTime\n    self.shuffleFinishTimeFormatted = format_unixtime_ms(self.task_attempt.shuffleFinishTime)\n    self.sortFinishTimeMs = self.task_attempt.sortFinishTime\n    self.sortFinishTimeFormatted = format_unixtime_ms(self.task_attempt.sortFinishTime)\n    self.mapFinishTimeMs = self.task_attempt.mapFinishTime # DO NOT USE, NOT VALID IN 0.20\n    self.mapFinishTimeFormatted = format_unixtime_ms(self.task_attempt.mapFinishTime)\n    self.counters = self.task_attempt.counters\n    self.is_mr2 = False\n\n  def get_tracker(self):\n    try:\n      tracker = Tracker.from_name(self.task.jt, self.taskTrackerId)\n      return tracker\n    except ttypes.TaskTrackerNotFoundException, e:\n      LOGGER.warn(\"Tracker %s not found: %s\" % (self.taskTrackerId, e))\n      if LOGGER.isEnabledFor(logging.DEBUG):\n        all_trackers = self.task.jt.all_task_trackers()\n        for t in all_trackers.trackers:\n          LOGGER.debug(\"Available tracker: %s\" % (t.trackerName,))\n      raise ttypes.TaskTrackerNotFoundException(\n                          _(\"Cannot look up TaskTracker %(id)s.\") % {'id': self.taskTrackerId})\n\n  def get_task_log(self):\n    \"\"\"\n    get_task_log(task_id) -> (stdout_text, stderr_text, syslog_text)\n\n    Retrieve the task log from the TaskTracker, at this url:\n      http://:/tasklog?taskid=\n    Optional query string:\n      &filter=  : where  is 'syslog', 'stdout', or 'stderr'.\n      &start=   : specify the start offset of the log section, when using a filter.\n      &end=     : specify the end offset of the log section, when using a filter.\n    \"\"\"\n    tracker = self.get_tracker()\n    url = urlunparse(('http',\n                      '%s:%s' % (tracker.host, tracker.httpPort),\n                      'tasklog',\n                      None,\n                      'attemptid=%s' % (self.attemptId,),\n                      None))\n    LOGGER.info('Retrieving %s' % (url,))\n    try:\n      data = urllib2.urlopen(url)\n    except urllib2.URLError:\n      raise urllib2.URLError(_(\"Cannot retrieve logs from TaskTracker %(id)s.\") % {'id': self.taskTrackerId})\n\n    et = lxml.html.parse(data)\n    log_sections = et.findall('body/pre')\n    logs = [section.text or '' for section in log_sections]\n    if len(logs) < 3:\n      LOGGER.warn('Error parsing task attempt log for %s at \"%s\". Found %d (not 3) log sections' %\n                  (self.attemptId, url, len(log_sections)))\n      err = _(\"Hue encountered an error while retrieving logs from '%s'.\") % (url,)\n      logs += [err] * (3 - len(logs))\n    return logs\n\n\nclass Tracker(object):\n\n  def __getitem__(self, item):\n    \"\"\"\n    For backwards-compatibility, resolve job[\"foo\"] as job.foo.\n    \"\"\"\n    return getattr(self, item)\n\n  @staticmethod\n  def from_name(jt, trackername):\n    return Tracker(jt.task_tracker(trackername))\n\n  def __init__(self, thrifttracker):\n    self.tracker = thrifttracker\n    self._init_attributes();\n\n  def _init_attributes(self):\n    self.trackerId = self.tracker.trackerName\n    self.httpPort = self.tracker.httpPort\n    self.host = self.tracker.host\n    self.lastSeenMs = self.tracker.lastSeen\n    self.lastSeenFormatted = format_unixtime_ms(self.tracker.lastSeen)\n    self.totalVirtualMemory = self.tracker.totalVirtualMemory\n    self.totalPhysicalMemory = self.tracker.totalPhysicalMemory\n    self.availableSpace = self.tracker.availableSpace\n    self.failureCount = self.tracker.failureCount\n    self.mapCount = self.tracker.mapCount\n    self.reduceCount = self.tracker.reduceCount\n    self.maxMapTasks = self.tracker.maxMapTasks\n    self.maxReduceTasks = self.tracker.maxReduceTasks\n    self.taskReports = self.tracker.taskReports\n    self.is_mr2 = False\n\n\nclass Cluster(object):\n\n  def __getitem__(self, item):\n    \"\"\"\n    For backwards-compatibility, resolve job[\"foo\"] as job.foo\n    \"\"\"\n    return getattr(self, item)\n\n  def __init__(self, jt):\n    self.status = jt.cluster_status()\n    self._init_attributes();\n\n  def _init_attributes(self):\n    self.mapTasksInProgress = self.status.mapTasks\n    self.reduceTasksInProgress = self.status.reduceTasks\n    self.maxMapTasks = self.status.maxMapTasks\n    self.maxReduceTasks = self.status.maxReduceTasks\n    self.usedHeapMemory = self.status.usedMemory\n    self.maxHeapMemory = self.status.maxMemory\n    self.clusterStartTimeMs = self.status.startTime\n    self.clusterStartTimeFormatted = format_unixtime_ms(self.status.startTime)\n    self.identifier = self.status.identifier\n    self.taskTrackerExpiryInterval = self.status.taskTrackerExpiryInterval\n    self.totalJobSubmissions = self.status.totalSubmissions\n    self.state = self.status.stateAsString\n    self.numActiveTrackers = self.status.numActiveTrackers\n    self.activeTrackerNames = self.status.activeTrackerNames\n    self.numBlackListedTrackers = self.status.numBlacklistedTrackers\n    self.blacklistedTrackerNames = self.status.blacklistedTrackerNames\n    self.hostname = self.status.hostname\n    self.httpPort = self.status.httpPort\n\n\nclass LinkJobLogs(object):\n\n  @classmethod\n  def _make_hdfs_links(cls, log):\n    escaped_logs = escape(log)\n    return re.sub('((?<= |;)/|hdfs://)[^ <&\\t;,\\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)\n\n  @classmethod\n  def _make_mr_links(cls, log):\n    escaped_logs = escape(log)\n    return re.sub('(job_[0-9_]+(/|\\.)?)', LinkJobLogs._replace_mr_link, escaped_logs)\n\n  @classmethod\n  def _make_links(cls, log):\n    escaped_logs = escape(log)\n    hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\\t;,\\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)\n    return re.sub('(job_[0-9_]+(/|\\.)?)', LinkJobLogs._replace_mr_link, hdfs_links)\n\n  @classmethod\n  def _replace_hdfs_link(self, match):\n    try:\n      return '%s' % (location_to_url(match.group(0), strict=False), match.group(0))\n    except:\n      LOGGER.exception('failed to replace hdfs links: %s' % (match.groups(),))\n      return match.group(0)\n\n  @classmethod\n  def _replace_mr_link(self, match):\n    try:\n      return '%s' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))\n    except:\n      LOGGER.exception('failed to replace mr links: %s' % (match.groups(),))\n      return match.group(0)\n\n\ndef get_jobconf(jt, jobid):\n  \"\"\"\n  Returns a dict representation of the jobconf for the job corresponding\n  to jobid. filter_keys is an optional list of configuration keys to filter on.\n  \"\"\"\n  jid = jt.thriftjobid_from_string(jobid)\n  # This will throw if the the jobconf can't be found\n  xml_data = jt.get_job_xml(jid)\n  return confparse.ConfParse(xml_data)\n\ndef format_unixtime_ms(unixtime):\n  \"\"\"\n  Format a unix timestamp in ms to a human readable string\n  \"\"\"\n  if unixtime:\n    return str(datetime.datetime.fromtimestamp(unixtime/1000).strftime(\"%x %X %Z\"))\n  else:\n    return \"\"\n\nDOTS = re.compile(\"\\.([a-z])\")\ndef dots_to_camel_case(dots):\n  \"\"\"\n  Takes a string delimited with periods and returns a camel-case string.\n  Example: dots_to_camel_case(\"foo.bar.baz\") //returns fooBarBaz\n  \"\"\"\n  def return_upper(match):\n    return match.groups()[0].upper()\n  return str(DOTS.sub(return_upper, dots))\n\ndef get_path(hdfs_url):\n  \"\"\"\n  Returns the path component of an HDFS url.\n  \"\"\"\n  # urlparse is lame, and only \"uses_netloc\" for a certain\n  # set of protocols.  So we replace hdfs with gopher:\n  if hdfs_url.startswith(\"hdfs://\"):\n    gopher_url = \"gopher://\" + hdfs_url[7:]\n    path = urlparse(gopher_url)[2] # path\n    return path\n  else:\n    return hdfs_url\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476140,"cells":{"repo_name":{"kind":"string","value":"Havate/havate-openstack"},"path":{"kind":"string","value":"proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/database_backups/workflows/create_backup.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"3194"},"content":{"kind":"string","value":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 Rackspace Hosting\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\nimport logging\n\nfrom django.utils.translation import ugettext_lazy as _  # noqa\n\nfrom horizon import exceptions\nfrom horizon import forms\nfrom horizon import workflows\n\nfrom openstack_dashboard import api\n\nLOG = logging.getLogger(__name__)\n\n\nclass BackupDetailsAction(workflows.Action):\n    name = forms.CharField(max_length=80, label=_(\"Name\"))\n    instance = forms.ChoiceField(label=_(\"Database Instance\"))\n    description = forms.CharField(max_length=512, label=_(\"Description\"),\n                                  widget=forms.TextInput(),\n                                  required=False,\n                                  help_text=_(\"Optional Backup Description\"))\n\n    class Meta:\n        name = _(\"Details\")\n        help_text_template = \\\n            \"project/database_backups/_backup_details_help.html\"\n\n    def populate_instance_choices(self, request, context):\n        LOG.info(\"Obtaining list of instances.\")\n        try:\n            instances = api.trove.instance_list(request)\n        except Exception:\n            instances = []\n            msg = _(\"Unable to list database instance to backup.\")\n            exceptions.handle(request, msg)\n        return [(i.id, i.name) for i in instances]\n\n\nclass SetBackupDetails(workflows.Step):\n    action_class = BackupDetailsAction\n    contributes = [\"name\", \"description\", \"instance\"]\n\n\nclass CreateBackup(workflows.Workflow):\n    slug = \"create_backup\"\n    name = _(\"Backup Database\")\n    finalize_button_name = _(\"Backup\")\n    success_message = _('Scheduled backup \"%(name)s\".')\n    failure_message = _('Unable to launch %(count)s named \"%(name)s\".')\n    success_url = \"horizon:project:database_backups:index\"\n    default_steps = [SetBackupDetails]\n\n    def get_initial(self):\n        initial = super(CreateBackup, self).get_initial()\n        initial['instance_id']\n\n    def format_status_message(self, message):\n        name = self.context.get('name', 'unknown instance')\n        return message % {\"count\": _(\"instance\"), \"name\": name}\n\n    def handle(self, request, context):\n        try:\n            LOG.info(\"Creating backup\")\n            api.trove.backup_create(request,\n                                    context['name'],\n                                    context['instance'],\n                                    context['description'])\n            return True\n        except Exception:\n            LOG.exception(\"Exception while creating backup\")\n            msg = _('Error creating database backup.')\n            exceptions.handle(request, msg)\n            return False\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476141,"cells":{"repo_name":{"kind":"string","value":"jcshen007/cloudstack"},"path":{"kind":"string","value":"plugins/hypervisors/baremetal/resources/security_group_agent/security_group_agent/sglib.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"7010"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n# Automatically generated by addcopyright.py at 01/29/2013\n\nimport sys, os, time, atexit\r\nimport traceback\r\nimport subprocess\nfrom signal import SIGTERM \r\nimport cherrypy\nimport copy\n\nclass Request(object):\n    def __init__(self):\n        self.headers = None\n        self.body = None\n        self.method = None\n        self.query_string = None\n\n    @staticmethod\n    def from_cherrypy_request(creq):\n        req = Request()\n        req.headers = copy.copy(creq.headers)\n        req.body = creq.body.fp.read() if creq.body else None\n        req.method = copy.copy(creq.method)\n        req.query_string = copy.copy(creq.query_string) if creq.query_string else None\n        return req\n\nclass ShellError(Exception):\n    '''shell error'''\n\nclass ShellCmd(object):\n    '''\n    classdocs\n    '''\n    def __init__(self, cmd, workdir=None, pipe=True):\n        '''\n        Constructor\n        '''\n        self.cmd = cmd\n        if pipe:\n            self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/sh', cwd=workdir)\n        else:\n            self.process = subprocess.Popen(cmd, shell=True, executable='/bin/sh', cwd=workdir)\n\n        self.stdout = None\n        self.stderr = None\n        self.return_code = None\n\n    def __call__(self, is_exception=True):\n        (self.stdout, self.stderr) = self.process.communicate()\n        if is_exception and self.process.returncode != 0:\n            err = []\n            err.append('failed to execute shell command: %s' % self.cmd)\n            err.append('return code: %s' % self.process.returncode)\n            err.append('stdout: %s' % self.stdout)\n            err.append('stderr: %s' % self.stderr)\n            raise ShellError('\\n'.join(err))\n\n        self.return_code = self.process.returncode\n        return self.stdout\n\nclass Daemon(object):\n    \"\"\"\n    A generic daemon class.\n\n    Usage: subclass the Daemon class and override the run() method\n    \"\"\"\r\n    atexit_hooks = []\r\n\n    def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n        self.stdin = stdin\n        self.stdout = stdout\n        self.stderr = stderr\n        self.pidfile = pidfile\n    \r\n    @staticmethod\r\n    def register_atexit_hook(hook):\r\n        Daemon.atexit_hooks.append(hook)\r\n        \r\n    @staticmethod\n    def _atexit():\r\n        for hook in Daemon.atexit_hooks:\r\n            try:\r\n                hook()\n            except Exception:\r\n                content = traceback.format_exc()\n                err = 'Exception when calling atexit hook[%s]\\n%s' % (hook.__name__, content)\r\n                #logger.error(err)\r\n\n    def daemonize(self):\n        \"\"\"\n        do the UNIX double-fork magic, see Stevens' \"Advanced\n        Programming in the UNIX Environment\" for details (ISBN 0201563177)\n        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16\n        \"\"\"\n        try:\n            pid = os.fork()\n            if pid > 0:\n                # exit first parent\n                sys.exit(0)\n        except OSError, e:\n            sys.stderr.write(\"fork #1 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n            sys.exit(1)\n\n        # decouple from parent environment\n        os.chdir(\"/\")\n        os.setsid()\n        os.umask(0)\n\n        # do second fork\n        try:\n            pid = os.fork()\n            if pid > 0:\n                # exit from second parent\n                sys.exit(0)\n        except OSError, e:\n            sys.stderr.write(\"fork #2 failed: %d (%s)\\n\" % (e.errno, e.strerror))\n            sys.exit(1)\n\n        # redirect standard file descriptors\n        sys.stdout.flush()\n        sys.stderr.flush()\n        si = file(self.stdin, 'r')\n        so = file(self.stdout, 'a+')\n        se = file(self.stderr, 'a+', 0)\n        os.dup2(si.fileno(), sys.stdin.fileno())\n        os.dup2(so.fileno(), sys.stdout.fileno())\n        os.dup2(se.fileno(), sys.stderr.fileno())\n\n        # write pidfile\n        Daemon.register_atexit_hook(self.delpid)\n        atexit.register(Daemon._atexit)\r\n        pid = str(os.getpid())\n        file(self.pidfile,'w').write(\"%s\\n\" % pid)\n\n    def delpid(self):\n        os.remove(self.pidfile)\n\n    def start(self):\n        \"\"\"\n        Start the daemon\n        \"\"\"\n        # Check for a pidfile to see if the daemon already runs\n        try:\n            pf = file(self.pidfile,'r')\n            pid = int(pf.read().strip())\n            pf.close()\n        except IOError:\n            pid = None\n\n        if pid:\n            pscmd = ShellCmd('ps -p %s > /dev/null' % pid)\n            pscmd(is_exception=False)\n            if pscmd.return_code == 0:\n                message = \"Daemon already running, pid is %s\\n\"\n                sys.stderr.write(message % pid)\n                sys.exit(0)\n\n        # Start the daemon\n        self.daemonize()\r\n        try:\n            self.run()\r\n        except Exception:\r\n            content = traceback.format_exc()\r\n            #logger.error(content)\r\n            sys.exit(1)\n\n    def stop(self):\n        \"\"\"\n        Stop the daemon\n        \"\"\"\n        # Get the pid from the pidfile\n        try:\n            pf = file(self.pidfile,'r')\n            pid = int(pf.read().strip())\n            pf.close()\n        except IOError:\n            pid = None\n\n        if not pid:\n            message = \"pidfile %s does not exist. Daemon not running?\\n\"\n            sys.stderr.write(message % self.pidfile)\n            return # not an error in a restart\n\n        # Try killing the daemon process\n        try:\n            while 1:\n                os.kill(pid, SIGTERM)\n                time.sleep(0.1)\n        except OSError, err:\n            err = str(err)\n            if err.find(\"No such process\") > 0:\n                if os.path.exists(self.pidfile):\n                    os.remove(self.pidfile)\n            else:\n                print str(err)\n                sys.exit(1)\n\n    def restart(self):\n        \"\"\"\n        Restart the daemon\n        \"\"\"\n        self.stop()\n        self.start()\n\n    def run(self):\n        \"\"\"\n        You should override this method when you subclass Daemon. It will be called after the process has been\n        daemonized by start() or restart().\n        \"\"\"\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476142,"cells":{"repo_name":{"kind":"string","value":"sgarrity/bedrock"},"path":{"kind":"string","value":"bedrock/firefox/urls.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7825"},"content":{"kind":"string","value":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.conf.urls import url\n\nimport bedrock.releasenotes.views\nfrom bedrock.mozorg.util import page\nfrom bedrock.releasenotes import version_re\n\nfrom bedrock.firefox import views\n\nlatest_re = r'^firefox(?:/(?P%s))?/%s/$'\nfirstrun_re = latest_re % (version_re, 'firstrun')\nwhatsnew_re = latest_re % (version_re, 'whatsnew')\nwhatsnew_re_india = latest_re % (version_re, 'whatsnew/india')\nwhatsnew_re_all = latest_re % (version_re, 'whatsnew/all')\nplatform_re = '(?Pandroid|ios)'\nchannel_re = '(?Pbeta|aurora|developer|nightly|organizations)'\nreleasenotes_re = latest_re % (version_re, r'(aurora|release)notes')\nandroid_releasenotes_re = releasenotes_re.replace(r'firefox', 'firefox/android')\nios_releasenotes_re = releasenotes_re.replace(r'firefox', 'firefox/ios')\nsysreq_re = latest_re % (version_re, 'system-requirements')\nandroid_sysreq_re = sysreq_re.replace(r'firefox', 'firefox/android')\nios_sysreq_re = sysreq_re.replace(r'firefox', 'firefox/ios')\n\n\nurlpatterns = (\n    url(r'^firefox/$', views.firefox_home, name='firefox'),\n    url(r'^firefox/all/$', views.firefox_all, name='firefox.all'),\n    url(r'^firefox/accounts/$', views.firefox_accounts, name='firefox.accounts'),\n    url(r'^firefox/campaign/$', views.campaign, name='firefox.campaign'),\n    page('firefox/flashback', 'firefox/flashback/index.html', active_locales=['en-US', 'de', 'fr']),\n    page('firefox/channel/desktop', 'firefox/channel/desktop.html'),\n    page('firefox/channel/android', 'firefox/channel/android.html'),\n    page('firefox/channel/ios', 'firefox/channel/ios.html'),\n    url(r'^firefox/concerts/$', views.firefox_concerts, name='firefox.concerts'),\n    page('firefox/developer', 'firefox/developer/index.html'),\n    url('firefox/election/$', views.election_with_cards, name='firefox.election'),\n    page('firefox/enterprise', 'firefox/enterprise/index.html'),\n    page('firefox/enterprise/signup', 'firefox/enterprise/signup.html'),\n    page('firefox/enterprise/signup/thanks', 'firefox/enterprise/signup-thanks.html'),\n    page('firefox/facebookcontainer', 'firefox/facebookcontainer/index.html'),\n    page('firefox/features', 'firefox/features/index.html'),\n    url('^firefox/features/bookmarks/$',\n        views.FeaturesBookmarksView.as_view(),\n        name='firefox.features.bookmarks'),\n    url('^firefox/features/fast/$',\n        views.FeaturesFastView.as_view(),\n        name='firefox.features.fast'),\n    url('^firefox/features/independent/$',\n        views.FeaturesIndependentView.as_view(),\n        name='firefox.features.independent'),\n    url('^firefox/features/memory/$',\n        views.FeaturesMemoryView.as_view(),\n        name='firefox.features.memory'),\n    url('^firefox/features/password-manager/$',\n        views.FeaturesPasswordManagerView.as_view(),\n        name='firefox.features.password-manager'),\n    url('^firefox/features/private-browsing/$',\n        views.FeaturesPrivateBrowsingView.as_view(),\n        name='firefox.features.private-browsing'),\n    url(r'^firefox/ios/testflight/$', views.ios_testflight, name='firefox.ios.testflight'),\n    page('firefox/mobile', 'firefox/mobile/index.html'),\n    page('firefox/mobile/get-app', 'firefox/mobile/get-app.html'),\n    url('^firefox/send-to-device-post/$', views.send_to_device_ajax,\n        name='firefox.send-to-device-post'),\n    page('firefox/unsupported-systems', 'firefox/unsupported-systems.html'),\n    url(r'^firefox/new/$', views.new, name='firefox.new'),\n    url(r'^firefox/download/thanks/$', views.download_thanks, name='firefox.download.thanks'),\n    page('firefox/nightly/firstrun', 'firefox/nightly_firstrun.html'),\n    url(r'^firefox/installer-help/$', views.installer_help,\n        name='firefox.installer-help'),\n    url(firstrun_re, views.FirstrunView.as_view(), name='firefox.firstrun'),\n    url(whatsnew_re, views.WhatsNewRedirectorView.as_view(), name='firefox.whatsnew'),\n    url(whatsnew_re_india, views.WhatsNewIndiaView.as_view(), name='firefox.whatsnew.india'),\n    url(whatsnew_re_all, views.WhatsnewView.as_view(), name='firefox.whatsnew.all'),\n\n    page('firefox/features/adblocker', 'firefox/features/adblocker.html'),\n    page('firefox/concerts', 'firefox/concerts.html'),\n\n    # Release notes\n    url('^firefox/(?:%s/)?(?:%s/)?notes/$' % (platform_re, channel_re),\n        bedrock.releasenotes.views.latest_notes, name='firefox.notes'),\n    url('^firefox/nightly/notes/feed/$',\n        bedrock.releasenotes.views.nightly_feed, name='firefox.nightly.notes.feed'),\n    url('firefox/(?:latest/)?releasenotes/$', bedrock.releasenotes.views.latest_notes,\n        {'product': 'firefox'}),\n    url('^firefox/(?:%s/)?(?:%s/)?system-requirements/$' % (platform_re, channel_re),\n        bedrock.releasenotes.views.latest_sysreq,\n        {'product': 'firefox'}, name='firefox.sysreq'),\n    url(releasenotes_re, bedrock.releasenotes.views.release_notes, name='firefox.desktop.releasenotes'),\n    url(android_releasenotes_re, bedrock.releasenotes.views.release_notes,\n        {'product': 'Firefox for Android'}, name='firefox.android.releasenotes'),\n    url(ios_releasenotes_re, bedrock.releasenotes.views.release_notes,\n        {'product': 'Firefox for iOS'}, name='firefox.ios.releasenotes'),\n    url(sysreq_re, bedrock.releasenotes.views.system_requirements,\n        name='firefox.system_requirements'),\n    url(android_sysreq_re, bedrock.releasenotes.views.system_requirements,\n        {'product': 'Firefox for Android'}, name='firefox.android.system_requirements'),\n    url(ios_sysreq_re, bedrock.releasenotes.views.system_requirements,\n        {'product': 'Firefox for iOS'}, name='firefox.ios.system_requirements'),\n    url('^firefox/releases/$', bedrock.releasenotes.views.releases_index,\n        {'product': 'Firefox'}, name='firefox.releases.index'),\n\n    url('^firefox/stub_attribution_code/$', views.stub_attribution_code,\n        name='firefox.stub_attribution_code'),\n\n    url(r'^firefox/welcome/1/$', views.firefox_welcome_page1, name='firefox.welcome.page1'),\n    page('firefox/welcome/2', 'firefox/welcome/page2.html'),\n    page('firefox/welcome/3', 'firefox/welcome/page3.html'),\n    page('firefox/welcome/4', 'firefox/welcome/page4.html'),\n    page('firefox/welcome/5', 'firefox/welcome/page5.html'),\n\n    page('firefox/switch', 'firefox/switch.html'),\n    page('firefox/pocket', 'firefox/pocket.html'),\n\n    # Bug 1519084\n    page('firefox/dedicated-profiles', 'firefox/dedicated-profiles.html'),\n\n    # Issue 6178\n    page('firefox/this-browser-comes-highly-recommended', 'firefox/recommended.html'),\n\n    # Issue 6604, SEO firefox/new pages\n    page('firefox/windows', 'firefox/new/scene1_windows.html'),\n    page('firefox/mac', 'firefox/new/scene1_mac.html'),\n    page('firefox/linux', 'firefox/new/scene1_linux.html'),\n\n    page('firefox/windows-64-bit', 'firefox/windows-64-bit.html'),\n    page('firefox/enterprise/sla', 'firefox/enterprise/sla.html'),\n\n    page('firefox/features/safebrowser', 'firefox/features/safebrowser.html'),\n    page('firefox/best-browser', 'firefox/best-browser.html'),\n\n    page('firefox/browsers/compare', 'firefox/compare/index.html'),\n    page('firefox/browsers/compare/chrome', 'firefox/compare/chrome.html'),\n\n    # Lockwise\n    page('firefox/lockwise', 'firefox/lockwise/lockwise.html'),\n\n    # Issue 7765, 7709\n    page('firefox/privacy', 'firefox/privacy/index.html'),\n    page('firefox/privacy/products', 'firefox/privacy/products.html'),\n\n    # Issue 8432\n    page('firefox/set-as-default/thanks', 'firefox/set-as-default/thanks.html'),\n    # Default browser campaign\n    page('firefox/set-as-default', 'firefox/set-as-default/landing-page.html')\n)\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":476143,"cells":{"repo_name":{"kind":"string","value":"emonty/ansible"},"path":{"kind":"string","value":"hacking/fix_test_syntax.py"},"copies":{"kind":"string","value":"135"},"size":{"kind":"string","value":"3563"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# (c) 2017, Matt Martz \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see .\n\n# Purpose:\n# The purpose of this script is to convert uses of tests as filters to proper jinja test syntax\n# as part of https://github.com/ansible/proposals/issues/83\n\n# Notes:\n# This script is imperfect, but was close enough to \"fix\" all integration tests\n# with the exception of:\n#\n# 1. One file needed manual remediation, where \\\\\\\\ was ultimately replace with \\\\ in 8 locations.\n# 2. Multiple filter pipeline is unsupported. Example:\n#        var|string|search('foo')\n#    Which should be converted to:\n#        var|string is search('foo')\n\nimport argparse\nimport os\nimport re\n\nfrom ansible.plugins.test import core, files, mathstuff\n\n\nTESTS = list(core.TestModule().tests().keys()) + list(files.TestModule().tests().keys()) + list(mathstuff.TestModule().tests().keys())\n\n\nTEST_MAP = {\n    'version_compare': 'version',\n    'is_dir': 'directory',\n    'is_file': 'file',\n    'is_link': 'link',\n    'is_abs': 'abs',\n    'is_same_file': 'same_file',\n    'is_mount': 'mount',\n    'issubset': 'subset',\n    'issuperset': 'superset',\n    'isnan': 'nan',\n    'succeeded': 'successful',\n    'success': 'successful',\n    'change': 'changed',\n    'skip': 'skipped',\n}\n\n\nFILTER_RE = re.compile(r'((.+?)\\s*([\\w \\.\\'\"]+)(\\s*)\\|(\\s*)(\\w+))')\nNOT_RE = re.compile(r'( ?)not ')\nASSERT_SPACE_RE = re.compile(r'- ([\\'\"])\\s+')\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n    'path',\n    help='Path to a directory that will be recursively walked. All .yml and .yaml files will be evaluated '\n         'and uses of tests as filters will be conveted to proper jinja test syntax files to have test syntax '\n         'fixed'\n)\nargs = parser.parse_args()\n\nfor root, dirs, filenames in os.walk(args.path):\n    for name in filenames:\n        if os.path.splitext(name)[1] not in ('.yml', '.yaml'):\n            continue\n        path = os.path.join(root, name)\n\n        print(path)\n        with open(path) as f:\n            text = f.read()\n\n        for match in FILTER_RE.findall(text):\n            filter_name = match[5]\n\n            is_not = match[2].strip(' \"\\'').startswith('not ')\n\n            try:\n                test_name = TEST_MAP[filter_name]\n            except KeyError:\n                test_name = filter_name\n\n            if test_name not in TESTS:\n                continue\n\n            if is_not:\n                before = NOT_RE.sub(r'\\1', match[2]).rstrip()\n                text = re.sub(\n                    re.escape(match[0]),\n                    '%s %s is not %s' % (match[1], before, test_name,),\n                    text\n                )\n            else:\n                text = re.sub(\n                    re.escape(match[0]),\n                    '%s %s is %s' % (match[1], match[2].rstrip(), test_name,),\n                    text\n                )\n\n        with open(path, 'w+') as f:\n            f.write(text)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476144,"cells":{"repo_name":{"kind":"string","value":"open-pli/enigma2"},"path":{"kind":"string","value":"lib/python/Components/ActionMap.py"},"copies":{"kind":"string","value":"45"},"size":{"kind":"string","value":"2560"},"content":{"kind":"string","value":"from enigma import eActionMap\n\nclass ActionMap:\n\tdef __init__(self, contexts = [ ], actions = { }, prio=0):\n\t\tself.actions = actions\n\t\tself.contexts = contexts\n\t\tself.prio = prio\n\t\tself.p = eActionMap.getInstance()\n\t\tself.bound = False\n\t\tself.exec_active = False\n\t\tself.enabled = True\n\n\tdef setEnabled(self, enabled):\n\t\tself.enabled = enabled\n\t\tself.checkBind()\n\n\tdef doBind(self):\n\t\tif not self.bound:\n\t\t\tfor ctx in self.contexts:\n\t\t\t\tself.p.bindAction(ctx, self.prio, self.action)\n\t\t\tself.bound = True\n\n\tdef doUnbind(self):\n\t\tif self.bound:\n\t\t\tfor ctx in self.contexts:\n\t\t\t\tself.p.unbindAction(ctx, self.action)\n\t\t\tself.bound = False\n\n\tdef checkBind(self):\n\t\tif self.exec_active and self.enabled:\n\t\t\tself.doBind()\n\t\telse:\n\t\t\tself.doUnbind()\n\n\tdef execBegin(self):\n\t\tself.exec_active = True\n\t\tself.checkBind()\n\n\tdef execEnd(self):\n\t\tself.exec_active = False\n\t\tself.checkBind()\n\n\tdef action(self, context, action):\n\t\tprint \" \".join((\"action -> \", context, action))\n\t\tif self.actions.has_key(action):\n\t\t\tres = self.actions[action]()\n\t\t\tif res is not None:\n\t\t\t\treturn res\n\t\t\treturn 1\n\t\telse:\n\t\t\tprint \"unknown action %s/%s! typo in keymap?\" % (context, action)\n\t\t\treturn 0\n\n\tdef destroy(self):\n\t\tpass\n\nclass NumberActionMap(ActionMap):\n\tdef action(self, contexts, action):\n\t\tnumbers = (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\")\n\t\tif (action in numbers and self.actions.has_key(action)):\n\t\t\tres = self.actions[action](int(action))\n\t\t\tif res is not None:\n\t\t\t\treturn res\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn ActionMap.action(self, contexts, action)\n\nclass HelpableActionMap(ActionMap):\n\t\"\"\"An Actionmap which automatically puts the actions into the helpList.\n\n\tNote that you can only use ONE context here!\"\"\"\n\n\t# sorry for this complicated code.\n\t# it's not more than converting a \"documented\" actionmap\n\t# (where the values are possibly (function, help)-tuples)\n\t# into a \"classic\" actionmap, where values are just functions.\n\t# the classic actionmap is then passed to the ActionMap constructor,\n\t# the collected helpstrings (with correct context, action) is\n\t# added to the screen's \"helpList\", which will be picked up by\n\t# the \"HelpableScreen\".\n\tdef __init__(self, parent, context, actions = { }, prio=0):\n\t\talist = [ ]\n\t\tadict = { }\n\t\tfor (action, funchelp) in actions.iteritems():\n\t\t\t# check if this is a tuple\n\t\t\tif isinstance(funchelp, tuple):\n\t\t\t\talist.append((action, funchelp[1]))\n\t\t\t\tadict[action] = funchelp[0]\n\t\t\telse:\n\t\t\t\tadict[action] = funchelp\n\n\t\tActionMap.__init__(self, [context], adict, prio)\n\n\t\tparent.helpList.append((self, context, alist))\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476145,"cells":{"repo_name":{"kind":"string","value":"gurneyalex/OpenUpgrade"},"path":{"kind":"string","value":"addons/mail/tests/test_mail_message.py"},"copies":{"kind":"string","value":"38"},"size":{"kind":"string","value":"27445"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n#    OpenERP, Open Source Business Applications\n#    Copyright (c) 2012-TODAY OpenERP S.A. \n#\n#    This program is free software: you can redistribute it and/or modify\n#    it under the terms of the GNU Affero General Public License as\n#    published by the Free Software Foundation, either version 3 of the\n#    License, or (at your option) any later version.\n#\n#    This program is distributed in the hope that it will be useful,\n#    but WITHOUT ANY WARRANTY; without even the implied warranty of\n#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n#    GNU Affero General Public License for more details.\n#\n#    You should have received a copy of the GNU Affero General Public License\n#    along with this program.  If not, see .\n#\n##############################################################################\n\nfrom openerp.addons.mail.tests.common import TestMail\nfrom openerp.exceptions import AccessError\nfrom openerp.osv.orm import except_orm\nfrom openerp.tools import mute_logger\n\n\nclass TestMailMail(TestMail):\n\n    def test_00_partner_find_from_email(self):\n        \"\"\" Tests designed for partner fetch based on emails. \"\"\"\n        cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs\n\n        # --------------------------------------------------\n        # Data creation\n        # --------------------------------------------------\n        # 1 - Partner ARaoul\n        p_a_id = self.res_partner.create(cr, uid, {'name': 'ARaoul', 'email': 'test@test.fr'})\n\n        # --------------------------------------------------\n        # CASE1: without object\n        # --------------------------------------------------\n\n        # Do: find partner with email -> first partner should be found\n        partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul '], link_mail=False)[0]\n        self.assertEqual(partner_info['full_name'], 'Maybe Raoul ',\n                         'mail_thread: message_partner_info_from_emails did not handle email')\n        self.assertEqual(partner_info['partner_id'], p_a_id,\n                         'mail_thread: message_partner_info_from_emails wrong partner found')\n\n        # Data: add some data about partners\n        # 2 - User BRaoul\n        p_b_id = self.res_partner.create(cr, uid, {'name': 'BRaoul', 'email': 'test@test.fr', 'user_ids': [(4, user_raoul.id)]})\n\n        # Do: find partner with email -> first user should be found\n        partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul '], link_mail=False)[0]\n        self.assertEqual(partner_info['partner_id'], p_b_id,\n                         'mail_thread: message_partner_info_from_emails wrong partner found')\n\n        # --------------------------------------------------\n        # CASE1: with object\n        # --------------------------------------------------\n\n        # Do: find partner in group where there is a follower with the email -> should be taken\n        self.mail_group.message_subscribe(cr, uid, [group_pigs.id], [p_b_id])\n        partner_info = self.mail_group.message_partner_info_from_emails(cr, uid, group_pigs.id, ['Maybe Raoul '], link_mail=False)[0]\n        self.assertEqual(partner_info['partner_id'], p_b_id,\n                         'mail_thread: message_partner_info_from_emails wrong partner found')\n\n\nclass TestMailMessage(TestMail):\n\n    def test_00_mail_message_values(self):\n        \"\"\" Tests designed for testing email values based on mail.message, aliases, ... \"\"\"\n        cr, uid, user_raoul_id = self.cr, self.uid, self.user_raoul_id\n\n        # Data: update + generic variables\n        reply_to1 = '_reply_to1@example.com'\n        reply_to2 = '_reply_to2@example.com'\n        email_from1 = 'from@example.com'\n        alias_domain = 'schlouby.fr'\n        raoul_from = 'Raoul Grosbedon '\n        raoul_from_alias = 'Raoul Grosbedon '\n        raoul_reply = '\"Followers of Pigs\" '\n        raoul_reply_alias = '\"Followers of Pigs\" '\n\n        # --------------------------------------------------\n        # Case1: without alias_domain\n        # --------------------------------------------------\n        param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])\n        self.registry('ir.config_parameter').unlink(cr, uid, param_ids)\n\n        # Do: free message; specified values > default values\n        msg_id = self.mail_message.create(cr, user_raoul_id, {'reply_to': reply_to1, 'email_from': email_from1})\n        msg = self.mail_message.browse(cr, user_raoul_id, msg_id)\n        # Test: message content\n        self.assertIn('reply_to', msg.message_id,\n                      'mail_message: message_id should be specific to a mail_message with a given reply_to')\n        self.assertEqual(msg.reply_to, reply_to1,\n                         'mail_message: incorrect reply_to: should come from values')\n        self.assertEqual(msg.email_from, email_from1,\n                         'mail_message: incorrect email_from: should come from values')\n\n        # Do: create a mail_mail with the previous mail_message + specified reply_to\n        mail_id = self.mail_mail.create(cr, user_raoul_id, {'mail_message_id': msg_id, 'state': 'cancel', 'reply_to': reply_to2})\n        mail = self.mail_mail.browse(cr, user_raoul_id, mail_id)\n        # Test: mail_mail content\n        self.assertEqual(mail.reply_to, reply_to2,\n                         'mail_mail: incorrect reply_to: should come from values')\n        self.assertEqual(mail.email_from, email_from1,\n                         'mail_mail: incorrect email_from: should come from mail.message')\n\n        # Do: mail_message attached to a document\n        msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})\n        msg = self.mail_message.browse(cr, user_raoul_id, msg_id)\n        # Test: message content\n        self.assertIn('mail.group', msg.message_id,\n                      'mail_message: message_id should contain model')\n        self.assertIn('%s' % self.group_pigs_id, msg.message_id,\n                      'mail_message: message_id should contain res_id')\n        self.assertEqual(msg.reply_to, raoul_reply,\n                         'mail_message: incorrect reply_to: should be Raoul')\n        self.assertEqual(msg.email_from, raoul_from,\n                         'mail_message: incorrect email_from: should be Raoul')\n\n        # --------------------------------------------------\n        # Case2: with alias_domain, without catchall alias\n        # --------------------------------------------------\n        self.registry('ir.config_parameter').set_param(cr, uid, 'mail.catchall.domain', alias_domain)\n        self.registry('ir.config_parameter').unlink(cr, uid, self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.alias')]))\n\n        # Update message\n        msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})\n        msg = self.mail_message.browse(cr, user_raoul_id, msg_id)\n        # Test: generated reply_to\n        self.assertEqual(msg.reply_to, raoul_reply_alias,\n                         'mail_mail: incorrect reply_to: should be Pigs alias')\n\n        # Update message: test alias on email_from\n        msg_id = self.mail_message.create(cr, user_raoul_id, {})\n        msg = self.mail_message.browse(cr, user_raoul_id, msg_id)\n        # Test: generated reply_to\n        self.assertEqual(msg.reply_to, raoul_from_alias,\n                         'mail_mail: incorrect reply_to: should be message email_from using Raoul alias')\n\n        # --------------------------------------------------\n        # Case2: with alias_domain and  catchall alias\n        # --------------------------------------------------\n        self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.alias', 'gateway')\n\n        # Update message\n        msg_id = self.mail_message.create(cr, user_raoul_id, {})\n        msg = self.mail_message.browse(cr, user_raoul_id, msg_id)\n        # Test: generated reply_to\n        self.assertEqual(msg.reply_to, 'gateway@schlouby.fr',\n                         'mail_mail: reply_to should equal the catchall email alias')\n\n        # Do: create a mail_mail\n        mail_id = self.mail_mail.create(cr, uid, {'state': 'cancel', 'reply_to': 'someone@example.com'})\n        mail = self.mail_mail.browse(cr, uid, mail_id)\n        # Test: mail_mail content\n        self.assertEqual(mail.reply_to, 'someone@example.com',\n                         'mail_mail: reply_to should equal the rpely_to given to create')\n\n    @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')\n    def test_10_mail_message_search_access_rights(self):\n        \"\"\" Testing mail_message.search() using specific _search implementation \"\"\"\n        cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id\n        # Data: comment subtype for mail.message creation\n        ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')\n        subtype_id = ref and ref[1] or False\n\n        # Data: Birds group, private\n        group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})\n        # Data: Raoul is member of Pigs\n        self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])\n        # Data: various author_ids, partner_ids, documents\n        msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})\n        msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})\n        msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})\n        msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})\n        msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})\n        msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})\n        msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})\n        msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})\n\n        # Test: Bert: 2 messages that have Bert in partner_ids\n        msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])\n        self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')\n        # Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)\n        msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])\n        self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')\n        # Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author\n        msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])\n        self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')\n        # Test: Admin: all messages\n        msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])\n        self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')\n\n    @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')\n    def test_15_mail_message_check_access_rule(self):\n        \"\"\" Testing mail_message.check_access_rule() \"\"\"\n        cr, uid = self.cr, self.uid\n        partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id\n        user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id\n\n        # Prepare groups: Pigs (employee), Jobs (public)\n        pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')\n        priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')\n\n        # prepare an attachment\n        attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})\n\n        # ----------------------------------------\n        # CASE1: read\n        # ----------------------------------------\n\n        # Do: create a new mail.message\n        message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})\n\n        # Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc\n        self.assertRaises(except_orm, self.mail_message.read,\n            cr, user_bert_id, message_id)\n        # Do: message is pushed to Bert\n        notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})\n        # Test: Bert reads the message, ok because notification pushed\n        self.mail_message.read(cr, user_bert_id, message_id)\n        # Test: Bert downloads attachment, ok because he can read message\n        self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)\n        # Do: remove notification\n        self.mail_notification.unlink(cr, uid, notif_id)\n        # Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc\n        self.assertRaises(except_orm, self.mail_message.read,\n            cr, self.user_bert_id, message_id)\n        # Test: Bert downloads attachment, crash because he can't read message\n        self.assertRaises(except_orm, self.mail_message.download_attachment,\n            cr, user_bert_id, message_id, attachment_id)\n        # Do: Bert is now the author\n        self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})\n        # Test: Bert reads the message, ok because Bert is the author\n        self.mail_message.read(cr, user_bert_id, message_id)\n        # Do: Bert is not the author anymore\n        self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})\n        # Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc\n        self.assertRaises(except_orm, self.mail_message.read,\n            cr, user_bert_id, message_id)\n        # Do: message is attached to a document Bert can read, Jobs\n        self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})\n        # Test: Bert reads the message, ok because linked to a doc he is allowed to read\n        self.mail_message.read(cr, user_bert_id, message_id)\n        # Do: message is attached to a document Bert cannot read, Pigs\n        self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})\n        # Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc\n        self.assertRaises(except_orm, self.mail_message.read,\n            cr, user_bert_id, message_id)\n\n        # ----------------------------------------\n        # CASE2: create\n        # ----------------------------------------\n\n        # Do: Bert creates a message on Pigs -> ko, no creation rights\n        self.assertRaises(AccessError, self.mail_message.create,\n            cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})\n        # Do: Bert create a message on Jobs -> ko, no creation rights\n        self.assertRaises(AccessError, self.mail_message.create,\n            cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})\n        # Do: Bert create a private message -> ko, no creation rights\n        self.assertRaises(AccessError, self.mail_message.create,\n            cr, user_bert_id, {'body': 'Test'})\n\n        # Do: Raoul creates a message on Jobs -> ok, write access to the related document\n        self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})\n        # Do: Raoul creates a message on Priv -> ko, no write access to the related document\n        self.assertRaises(except_orm, self.mail_message.create,\n            cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})\n        # Do: Raoul creates a private message -> ok\n        self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})\n        # Do: Raoul creates a reply to a message on Priv -> ko\n        self.assertRaises(except_orm, self.mail_message.create,\n            cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})\n        # Do: Raoul creates a reply to a message on Priv-> ok if has received parent\n        self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})\n        self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})\n\n    def test_20_message_set_star(self):\n        \"\"\" Tests for starring messages and its related access rights \"\"\"\n        cr, uid = self.cr, self.uid\n        # Data: post a message on Pigs\n        msg_id = self.group_pigs.message_post(body='My Body', subject='1')\n        msg = self.mail_message.browse(cr, uid, msg_id)\n        msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)\n\n        # Do: Admin stars msg\n        self.mail_message.set_message_starred(cr, uid, [msg.id], True)\n        msg.refresh()\n        # Test: notification exists\n        notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])\n        self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')\n        # Test: notification starred\n        notif = self.mail_notification.browse(cr, uid, notif_ids[0])\n        self.assertTrue(notif.starred, 'mail_notification starred failed')\n        self.assertTrue(msg.starred, 'mail_message starred failed')\n\n        # Do: Raoul stars msg\n        self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)\n        msg_raoul.refresh()\n        # Test: notification exists\n        notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])\n        self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')\n        # Test: notification starred\n        notif = self.mail_notification.browse(cr, uid, notif_ids[0])\n        self.assertTrue(notif.starred, 'mail_notification starred failed')\n        self.assertTrue(msg_raoul.starred, 'mail_message starred failed')\n\n        # Do: Admin unstars msg\n        self.mail_message.set_message_starred(cr, uid, [msg.id], False)\n        msg.refresh()\n        msg_raoul.refresh()\n        # Test: msg unstarred for Admin, starred for Raoul\n        self.assertFalse(msg.starred, 'mail_message starred failed')\n        self.assertTrue(msg_raoul.starred, 'mail_message starred failed')\n\n    def test_30_message_set_read(self):\n        \"\"\" Tests for reading messages and its related access rights \"\"\"\n        cr, uid = self.cr, self.uid\n        # Data: post a message on Pigs\n        msg_id = self.group_pigs.message_post(body='My Body', subject='1')\n        msg = self.mail_message.browse(cr, uid, msg_id)\n        msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)\n\n        # Do: Admin reads msg\n        self.mail_message.set_message_read(cr, uid, [msg.id], True)\n        msg.refresh()\n        # Test: notification exists\n        notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])\n        self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')\n        # Test: notification read\n        notif = self.mail_notification.browse(cr, uid, notif_ids[0])\n        self.assertTrue(notif.read, 'mail_notification read failed')\n        self.assertFalse(msg.to_read, 'mail_message read failed')\n\n        # Do: Raoul reads msg\n        self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)\n        msg_raoul.refresh()\n        # Test: notification exists\n        notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])\n        self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')\n        # Test: notification read\n        notif = self.mail_notification.browse(cr, uid, notif_ids[0])\n        self.assertTrue(notif.read, 'mail_notification starred failed')\n        self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')\n\n        # Do: Admin unreads msg\n        self.mail_message.set_message_read(cr, uid, [msg.id], False)\n        msg.refresh()\n        msg_raoul.refresh()\n        # Test: msg unread for Admin, read for Raoul\n        self.assertTrue(msg.to_read, 'mail_message read failed')\n        self.assertFalse(msg_raoul.to_read, 'mail_message read failed')\n\n    def test_40_message_vote(self):\n        \"\"\" Test designed for the vote/unvote feature. \"\"\"\n        cr, uid = self.cr, self.uid\n        # Data: post a message on Pigs\n        msg_id = self.group_pigs.message_post(body='My Body', subject='1')\n        msg = self.mail_message.browse(cr, uid, msg_id)\n        msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)\n\n        # Do: Admin vote for msg\n        self.mail_message.vote_toggle(cr, uid, [msg.id])\n        msg.refresh()\n        # Test: msg has Admin as voter\n        self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')\n        # Do: Bert vote for msg\n        self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])\n        msg_raoul.refresh()\n        # Test: msg has Admin and Bert as voters\n        self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')\n        # Do: Admin unvote for msg\n        self.mail_message.vote_toggle(cr, uid, [msg.id])\n        msg.refresh()\n        msg_raoul.refresh()\n        # Test: msg has Bert as voter\n        self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')\n        self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')\n\n    @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')\n    def test_50_mail_flow_access_rights(self):\n        \"\"\" Test a Chatter-looks alike flow to test access rights \"\"\"\n        cr, uid = self.cr, self.uid\n        mail_compose = self.registry('mail.compose.message')\n        partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id\n        user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id\n\n        # Prepare groups: Pigs (employee), Jobs (public)\n        pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[self.partner_admin_id])\n        jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[self.partner_admin_id])\n\n        # ----------------------------------------\n        # CASE1: Bert, without groups\n        # ----------------------------------------\n\n        # Do: Bert reads Jobs basic fields, ok because public = read access on the group\n        self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['name', 'description'])\n        # Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages\n        jobs_message_ids = self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['message_ids'])['message_ids']\n        self.mail_message.read(cr, user_bert_id, jobs_message_ids)\n        # Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)\n        bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)\n        trigger_read = bert_jobs.name\n        for message in bert_jobs.message_ids:\n            trigger_read = message.subject\n        for partner in bert_jobs.message_follower_ids:\n            with self.assertRaises(AccessError):\n                trigger_read = partner.name\n        # Do: Bert comments Jobs, ko because no creation right\n        self.assertRaises(AccessError,\n                          self.mail_group.message_post,\n                          cr, user_bert_id, self.group_jobs_id, body='I love Pigs')\n\n        # Do: Bert writes on its own profile, ko because no message create access\n        with self.assertRaises(AccessError):\n            self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')\n            self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')\n\n        # ----------------------------------------\n        # CASE2: Raoul, employee\n        # ----------------------------------------\n\n        # Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids\n        raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)\n        trigger_read = raoul_jobs.name\n        for message in raoul_jobs.message_ids:\n            trigger_read = message.subject\n        for partner in raoul_jobs.message_follower_ids:\n            trigger_read = partner.name\n\n        # Do: Raoul comments Jobs, ok\n        self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')\n        # Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard\n        compose_id = mail_compose.create(cr, user_raoul_id,\n            {'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},\n            {'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})\n        mail_compose.send_mail(cr, user_raoul_id, [compose_id])\n        # Do: Raoul replies to a Jobs message using the composer\n        compose_id = mail_compose.create(cr, user_raoul_id,\n            {'subject': 'Subject', 'body': 'Body text'},\n            {'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})\n        mail_compose.send_mail(cr, user_raoul_id, [compose_id])\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":476146,"cells":{"repo_name":{"kind":"string","value":"tseaver/google-cloud-python"},"path":{"kind":"string","value":"logging/tests/unit/handlers/test_handlers.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"4970"},"content":{"kind":"string","value":"# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport unittest\n\n\nclass TestCloudLoggingHandler(unittest.TestCase):\n\n    PROJECT = \"PROJECT\"\n\n    @staticmethod\n    def _get_target_class():\n        from google.cloud.logging.handlers.handlers import CloudLoggingHandler\n\n        return CloudLoggingHandler\n\n    def _make_one(self, *args, **kw):\n        return self._get_target_class()(*args, **kw)\n\n    def test_ctor_defaults(self):\n        import sys\n        from google.cloud.logging.logger import _GLOBAL_RESOURCE\n        from google.cloud.logging.handlers.handlers import DEFAULT_LOGGER_NAME\n\n        client = _Client(self.PROJECT)\n        handler = self._make_one(client, transport=_Transport)\n        self.assertEqual(handler.name, DEFAULT_LOGGER_NAME)\n        self.assertIs(handler.client, client)\n        self.assertIsInstance(handler.transport, _Transport)\n        self.assertIs(handler.transport.client, client)\n        self.assertEqual(handler.transport.name, DEFAULT_LOGGER_NAME)\n        self.assertIs(handler.resource, _GLOBAL_RESOURCE)\n        self.assertIsNone(handler.labels)\n        self.assertIs(handler.stream, sys.stderr)\n\n    def test_ctor_explicit(self):\n        import io\n        from google.cloud.logging.resource import Resource\n\n        resource = Resource(\"resource_type\", {\"resource_label\": \"value\"})\n        labels = {\"handler_lable\": \"value\"}\n        name = \"test-logger\"\n        client = _Client(self.PROJECT)\n        stream = io.BytesIO()\n        handler = self._make_one(\n            client,\n            name=name,\n            transport=_Transport,\n            resource=resource,\n            labels=labels,\n            stream=stream,\n        )\n        self.assertEqual(handler.name, name)\n        self.assertIs(handler.client, client)\n        self.assertIsInstance(handler.transport, _Transport)\n        self.assertIs(handler.transport.client, client)\n        self.assertEqual(handler.transport.name, name)\n        self.assertIs(handler.resource, resource)\n        self.assertEqual(handler.labels, labels)\n        self.assertIs(handler.stream, stream)\n\n    def test_emit(self):\n        from google.cloud.logging.logger import _GLOBAL_RESOURCE\n\n        client = _Client(self.PROJECT)\n        handler = self._make_one(\n            client, transport=_Transport, resource=_GLOBAL_RESOURCE\n        )\n        logname = \"loggername\"\n        message = \"hello world\"\n        record = logging.LogRecord(logname, logging, None, None, message, None, None)\n        handler.emit(record)\n\n        self.assertEqual(\n            handler.transport.send_called_with,\n            (record, message, _GLOBAL_RESOURCE, None),\n        )\n\n\nclass TestSetupLogging(unittest.TestCase):\n    def _call_fut(self, handler, excludes=None):\n        from google.cloud.logging.handlers.handlers import setup_logging\n\n        if excludes:\n            return setup_logging(handler, excluded_loggers=excludes)\n        else:\n            return setup_logging(handler)\n\n    def test_setup_logging(self):\n        handler = _Handler(logging.INFO)\n        self._call_fut(handler)\n\n        root_handlers = logging.getLogger().handlers\n        self.assertIn(handler, root_handlers)\n\n    def test_setup_logging_excludes(self):\n        INCLUDED_LOGGER_NAME = \"includeme\"\n        EXCLUDED_LOGGER_NAME = \"excludeme\"\n\n        handler = _Handler(logging.INFO)\n        self._call_fut(handler, (EXCLUDED_LOGGER_NAME,))\n\n        included_logger = logging.getLogger(INCLUDED_LOGGER_NAME)\n        self.assertTrue(included_logger.propagate)\n\n        excluded_logger = logging.getLogger(EXCLUDED_LOGGER_NAME)\n        self.assertNotIn(handler, excluded_logger.handlers)\n        self.assertFalse(excluded_logger.propagate)\n\n    def setUp(self):\n        self._handlers_cache = logging.getLogger().handlers[:]\n\n    def tearDown(self):\n        # cleanup handlers\n        logging.getLogger().handlers = self._handlers_cache[:]\n\n\nclass _Handler(object):\n    def __init__(self, level):\n        self.level = level\n\n    def acquire(self):\n        pass  # pragma: NO COVER\n\n    def release(self):\n        pass  # pragma: NO COVER\n\n\nclass _Client(object):\n    def __init__(self, project):\n        self.project = project\n\n\nclass _Transport(object):\n    def __init__(self, client, name):\n        self.client = client\n        self.name = name\n\n    def send(self, record, message, resource, labels=None):\n        self.send_called_with = (record, message, resource, labels)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476147,"cells":{"repo_name":{"kind":"string","value":"timthelion/FreeCAD"},"path":{"kind":"string","value":"src/Mod/Ship/shipCreateTank/TaskPanel.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"6620"},"content":{"kind":"string","value":"#***************************************************************************\r\n#*                                                                         *\r\n#*   Copyright (c) 2011, 2016                                              *\r\n#*   Jose Luis Cercos Pita                             *\r\n#*                                                                         *\r\n#*   This program is free software; you can redistribute it and/or modify  *\r\n#*   it under the terms of the GNU Lesser General Public License (LGPL)    *\r\n#*   as published by the Free Software Foundation; either version 2 of     *\r\n#*   the License, or (at your option) any later version.                   *\r\n#*   for detail see the LICENCE text file.                                 *\r\n#*                                                                         *\r\n#*   This program is distributed in the hope that it will be useful,       *\r\n#*   but WITHOUT ANY WARRANTY; without even the implied warranty of        *\r\n#*   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *\r\n#*   GNU Library General Public License for more details.                  *\r\n#*                                                                         *\r\n#*   You should have received a copy of the GNU Library General Public     *\r\n#*   License along with this program; if not, write to the Free Software   *\r\n#*   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  *\r\n#*   USA                                                                   *\r\n#*                                                                         *\r\n#***************************************************************************\r\n\r\nimport FreeCAD as App\r\nimport FreeCADGui as Gui\r\nimport Units\r\nfrom PySide import QtGui, QtCore\r\nimport Tools\r\nimport TankInstance as Instance\r\nfrom shipUtils import Paths\r\nimport shipUtils.Units as USys\r\n\r\nclass TaskPanel:\r\n    def __init__(self):\r\n        \"\"\"Constructor\"\"\"\r\n        self.ui = Paths.modulePath() + \"/shipCreateTank/TaskPanel.ui\"\r\n\r\n    def accept(self):\r\n        \"\"\"Create the ship instance\"\"\"\r\n        mw = self.getMainWindow()\r\n        form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\r\n        form.ship = self.widget(QtGui.QComboBox, \"Ship\")\r\n\r\n        ship = self.ships[form.ship.currentIndex()]\r\n        Tools.createTank(self.solids, ship)\r\n\r\n        return True\r\n\r\n    def reject(self):\r\n        \"\"\"Cancel the job\"\"\"\r\n        return True\r\n\r\n    def clicked(self, index):\r\n        pass\r\n\r\n    def open(self):\r\n        pass\r\n\r\n    def needsFullSpace(self):\r\n        return True\r\n\r\n    def isAllowedAlterSelection(self):\r\n        return False\r\n\r\n    def isAllowedAlterView(self):\r\n        return True\r\n\r\n    def isAllowedAlterDocument(self):\r\n        return False\r\n\r\n    def helpRequested(self):\r\n        pass\r\n\r\n    def setupUi(self):\r\n        \"\"\"Create and configurate the user interface\"\"\"\r\n        mw = self.getMainWindow()\r\n        form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\r\n        form.ship = self.widget(QtGui.QComboBox, \"Ship\")\r\n        self.form = form\r\n        if self.initValues():\r\n            return True\r\n        self.retranslateUi()\r\n\r\n    def getMainWindow(self):\r\n        toplevel = QtGui.qApp.topLevelWidgets()\r\n        for i in toplevel:\r\n            if i.metaObject().className() == \"Gui::MainWindow\":\r\n                return i\r\n        raise Exception(\"No main window found\")\r\n\r\n    def widget(self, class_id, name):\r\n        \"\"\"Return the selected widget.\r\n\r\n        Keyword arguments:\r\n        class_id -- Class identifier\r\n        name -- Name of the widget\r\n        \"\"\"\r\n        mw = self.getMainWindow()\r\n        form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\r\n        return form.findChild(class_id, name)\r\n\r\n    def initValues(self):\r\n        \"\"\"Setup the initial values\"\"\"\r\n        # Ensure that there are at least one valid object to generate the\r\n        # tank\r\n        selObjs = Gui.Selection.getSelection()\r\n        self.solids = []\r\n        if not selObjs:\r\n            msg = QtGui.QApplication.translate(\r\n                \"ship_tank\",\r\n                \"Tanks objects can only be created on top of its geometry\"\r\n                \" (no objects selected)\",\r\n                None,\r\n                QtGui.QApplication.UnicodeUTF8)\r\n            App.Console.PrintError(msg + '\\n')\r\n            return True\r\n        for obj in selObjs:\r\n            try:\r\n                self.solids.extend(obj.Shape.Solids)\r\n            except:\r\n                continue\r\n        if not len(self.solids):\r\n            msg = QtGui.QApplication.translate(\r\n                \"ship_tank\",\r\n                \"No solids found in the selected objects\",\r\n                None,\r\n                QtGui.QApplication.UnicodeUTF8)\r\n            App.Console.PrintError(msg + '\\n')\r\n            return True\r\n\r\n        # Ensure as well that exist at least one valid ship to create the\r\n        # entity inside it\r\n        self.ships = []\r\n        for obj in App.ActiveDocument.Objects:\r\n            try:\r\n                if obj.IsShip:\r\n                    self.ships.append(obj)\r\n            except:\r\n                continue\r\n        if not len(self.ships):\r\n            msg = QtGui.QApplication.translate(\r\n                \"ship_tank\",\r\n                \"There are not ship objects to create weights into them\",\r\n                None,\r\n                QtGui.QApplication.UnicodeUTF8)\r\n            App.Console.PrintError(msg + '\\n')\r\n            return True\r\n\r\n        # Fill the ships combo box\r\n        mw = self.getMainWindow()\r\n        form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\r\n        form.ship = self.widget(QtGui.QComboBox, \"Ship\")\r\n        icon = QtGui.QIcon(QtGui.QPixmap(\":/icons/Ship_Instance.svg\"))\r\n        form.ship.clear()\r\n        for ship in self.ships:\r\n            form.ship.addItem(icon, ship.Label)\r\n        form.ship.setCurrentIndex(0)\r\n\r\n        return False\r\n\r\n    def retranslateUi(self):\r\n        \"\"\"Set the user interface locale strings.\"\"\"\r\n        self.form.setWindowTitle(QtGui.QApplication.translate(\r\n            \"ship_tank\",\r\n            \"Create a new tank\",\r\n            None,\r\n            QtGui.QApplication.UnicodeUTF8))\r\n        self.widget(QtGui.QLabel, \"ShipLabel\").setText(\r\n            QtGui.QApplication.translate(\r\n                \"ship_tank\",\r\n                \"Ship\",\r\n                None,\r\n                QtGui.QApplication.UnicodeUTF8))\r\n\r\n\r\ndef createTask():\r\n    panel = TaskPanel()\r\n    Gui.Control.showDialog(panel)\r\n    if panel.setupUi():\r\n        Gui.Control.closeDialog(panel)\r\n        return None\r\n    return panel\r\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":476148,"cells":{"repo_name":{"kind":"string","value":"gilneidp/FinalProject"},"path":{"kind":"string","value":"ALL_FILES/pox/misc/mac_blocker.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"3794"},"content":{"kind":"string","value":"# Copyright 2012 James McCauley\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGives a GUI for blocking individual MAC addresses.\n\nMeant to work with reactive components like l2_learning or l2_pairs.\n\nStart with --no-clear-tables if you don't want to clear tables on changes.\n\"\"\"\n\nfrom pox.core import core\nfrom pox.lib.revent import EventHalt\nfrom pox.lib.addresses import EthAddr\nimport pox.openflow.libopenflow_01 as of\n\nfrom Tkinter import *\n\n# Sets of blocked and unblocked MACs\nblocked = set()\nunblocked = set()\n\n# Listbox widgets\nunblocked_list = None\nblocked_list = None\n\n# If True, clear tables on every block/unblock\nclear_tables_on_change = True\n\ndef add_mac (mac):\n  if mac.is_multicast: return\n  if mac.is_bridge_filtered: return\n  if mac in blocked: return\n  if mac in unblocked: return\n  unblocked.add(mac)\n  core.tk.do(unblocked_list.insert, None, END, str(mac))\n\ndef packet_handler (event):\n  # Note the two MACs\n  add_mac(event.parsed.src)\n  add_mac(event.parsed.dst)\n\n  # Check for blocked MACs\n  if event.parsed.src in blocked:\n    return EventHalt\n  if event.parsed.dst in blocked:\n    return EventHalt\n\ndef get (l):\n  \"\"\" Get an element from a listbox \"\"\"\n  try:\n    i = l.curselection()[0]\n    mac = l.get(i)\n    return i,mac\n  except:\n    pass\n  return None,None\n\ndef clear_flows ():\n  \"\"\" Clear flows on all switches \"\"\"\n  for c in core.openflow.connections:\n    d = of.ofp_flow_mod(command = of.OFPFC_DELETE)\n    c.send(d)\n\ndef move_entry (from_list, from_set, to_list, to_set):\n  \"\"\" Move entry from one list to another \"\"\"\n  i,mac = get(from_list)\n  if mac is None: return\n  from_list.delete(i)\n  to_list.insert(END, mac)\n  mac = EthAddr(mac)\n  to_set.add(mac)\n  from_set.remove(mac)\n\n  if clear_tables_on_change:\n    # This is coming from another thread, so don't just send -- use\n    # callLater so that it happens from the coop thread.\n    core.callLater(clear_flows)\n\ndef do_block ():\n  \"\"\" Handle clicks on block button \"\"\"\n  move_entry(unblocked_list, unblocked, blocked_list, blocked)\n\ndef do_unblock ():\n  \"\"\" Handle clicks on unblock button \"\"\"\n  move_entry(blocked_list, blocked, unblocked_list, unblocked)\n\ndef setup ():\n  \"\"\" Set up GUI \"\"\"\n  global unblocked_list, blocked_list\n  top = Toplevel()\n  top.title(\"MAC Blocker\")\n\n  # Shut down POX when window is closed\n  top.protocol(\"WM_DELETE_WINDOW\", core.quit)\n\n  box1 = Frame(top)\n  box2 = Frame(top)\n  l1 = Label(box1, text=\"Allowed\")\n  l2 = Label(box2, text=\"Blocked\")\n  unblocked_list = Listbox(box1)\n  blocked_list = Listbox(box2)\n  l1.pack()\n  l2.pack()\n  unblocked_list.pack(expand=True,fill=BOTH)\n  blocked_list.pack(expand=True,fill=BOTH)\n\n  buttons = Frame(top)\n  block_button = Button(buttons, text=\"Block >>\", command=do_block)\n  unblock_button = Button(buttons, text=\"<< Unblock\", command=do_unblock)\n  block_button.pack()\n  unblock_button.pack()\n\n  opts = {\"side\":LEFT,\"fill\":BOTH,\"expand\":True}\n  box1.pack(**opts)\n  buttons.pack(**{\"side\":LEFT})\n  box2.pack(**opts)\n\n  core.getLogger().debug(\"Ready\")\n\ndef launch (no_clear_tables = False):\n  global clear_tables_on_change\n  clear_tables_on_change = not no_clear_tables\n\n  def start ():\n    core.openflow.addListenerByName(\"PacketIn\",packet_handler,priority=1)\n    core.tk.do(setup)\n\n  core.call_when_ready(start, ['openflow','tk'])\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476149,"cells":{"repo_name":{"kind":"string","value":"magicrub/MissionPlanner"},"path":{"kind":"string","value":"Lib/encodings/mbcs.py"},"copies":{"kind":"string","value":"103"},"size":{"kind":"string","value":"1258"},"content":{"kind":"string","value":"\"\"\" Python 'mbcs' Codec for Windows\r\n\r\n\r\nCloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,\r\nwhich was written by Marc-Andre Lemburg (mal@lemburg.com).\r\n\r\n(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.\r\n\r\n\"\"\"\r\n# Import them explicitly to cause an ImportError\r\n# on non-Windows systems\r\nfrom codecs import mbcs_encode, mbcs_decode\r\n# for IncrementalDecoder, IncrementalEncoder, ...\r\nimport codecs\r\n\r\n### Codec APIs\r\n\r\nencode = mbcs_encode\r\n\r\ndef decode(input, errors='strict'):\r\n    return mbcs_decode(input, errors, True)\r\n\r\nclass IncrementalEncoder(codecs.IncrementalEncoder):\r\n    def encode(self, input, final=False):\r\n        return mbcs_encode(input, self.errors)[0]\r\n\r\nclass IncrementalDecoder(codecs.BufferedIncrementalDecoder):\r\n    _buffer_decode = mbcs_decode\r\n\r\nclass StreamWriter(codecs.StreamWriter):\r\n    encode = mbcs_encode\r\n\r\nclass StreamReader(codecs.StreamReader):\r\n    decode = mbcs_decode\r\n\r\n### encodings module API\r\n\r\ndef getregentry():\r\n    return codecs.CodecInfo(\r\n        name='mbcs',\r\n        encode=encode,\r\n        decode=decode,\r\n        incrementalencoder=IncrementalEncoder,\r\n        incrementaldecoder=IncrementalDecoder,\r\n        streamreader=StreamReader,\r\n        streamwriter=StreamWriter,\r\n    )\r\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476150,"cells":{"repo_name":{"kind":"string","value":"neumerance/deploy"},"path":{"kind":"string","value":".venv/lib/python2.7/site-packages/requests/packages/urllib3/contrib/ntlmpool.py"},"copies":{"kind":"string","value":"59"},"size":{"kind":"string","value":"4740"},"content":{"kind":"string","value":"# urllib3/contrib/ntlmpool.py\n# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n#\n# This module is part of urllib3 and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\"\"\"\nNTLM authenticating pool, contributed by erikcederstran\n\nIssue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10\n\"\"\"\n\ntry:\n    from http.client import HTTPSConnection\nexcept ImportError:\n    from httplib import HTTPSConnection\nfrom logging import getLogger\nfrom ntlm import ntlm\n\nfrom urllib3 import HTTPSConnectionPool\n\n\nlog = getLogger(__name__)\n\n\nclass NTLMConnectionPool(HTTPSConnectionPool):\n    \"\"\"\n    Implements an NTLM authentication version of an urllib3 connection pool\n    \"\"\"\n\n    scheme = 'https'\n\n    def __init__(self, user, pw, authurl, *args, **kwargs):\n        \"\"\"\n        authurl is a random URL on the server that is protected by NTLM.\n        user is the Windows user, probably in the DOMAIN\\username format.\n        pw is the password for the user.\n        \"\"\"\n        super(NTLMConnectionPool, self).__init__(*args, **kwargs)\n        self.authurl = authurl\n        self.rawuser = user\n        user_parts = user.split('\\\\', 1)\n        self.domain = user_parts[0].upper()\n        self.user = user_parts[1]\n        self.pw = pw\n\n    def _new_conn(self):\n        # Performs the NTLM handshake that secures the connection. The socket\n        # must be kept open while requests are performed.\n        self.num_connections += 1\n        log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %\n                  (self.num_connections, self.host, self.authurl))\n\n        headers = {}\n        headers['Connection'] = 'Keep-Alive'\n        req_header = 'Authorization'\n        resp_header = 'www-authenticate'\n\n        conn = HTTPSConnection(host=self.host, port=self.port)\n\n        # Send negotiation message\n        headers[req_header] = (\n            'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))\n        log.debug('Request headers: %s' % headers)\n        conn.request('GET', self.authurl, None, headers)\n        res = conn.getresponse()\n        reshdr = dict(res.getheaders())\n        log.debug('Response status: %s %s' % (res.status, res.reason))\n        log.debug('Response headers: %s' % reshdr)\n        log.debug('Response data: %s [...]' % res.read(100))\n\n        # Remove the reference to the socket, so that it can not be closed by\n        # the response object (we want to keep the socket open)\n        res.fp = None\n\n        # Server should respond with a challenge message\n        auth_header_values = reshdr[resp_header].split(', ')\n        auth_header_value = None\n        for s in auth_header_values:\n            if s[:5] == 'NTLM ':\n                auth_header_value = s[5:]\n        if auth_header_value is None:\n            raise Exception('Unexpected %s response header: %s' %\n                            (resp_header, reshdr[resp_header]))\n\n        # Send authentication message\n        ServerChallenge, NegotiateFlags = \\\n            ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)\n        auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,\n                                                         self.user,\n                                                         self.domain,\n                                                         self.pw,\n                                                         NegotiateFlags)\n        headers[req_header] = 'NTLM %s' % auth_msg\n        log.debug('Request headers: %s' % headers)\n        conn.request('GET', self.authurl, None, headers)\n        res = conn.getresponse()\n        log.debug('Response status: %s %s' % (res.status, res.reason))\n        log.debug('Response headers: %s' % dict(res.getheaders()))\n        log.debug('Response data: %s [...]' % res.read()[:100])\n        if res.status != 200:\n            if res.status == 401:\n                raise Exception('Server rejected request: wrong '\n                                'username or password')\n            raise Exception('Wrong server response: %s %s' %\n                            (res.status, res.reason))\n\n        res.fp = None\n        log.debug('Connection established')\n        return conn\n\n    def urlopen(self, method, url, body=None, headers=None, retries=3,\n                redirect=True, assert_same_host=True):\n        if headers is None:\n            headers = {}\n        headers['Connection'] = 'Keep-Alive'\n        return super(NTLMConnectionPool, self).urlopen(method, url, body,\n                                                       headers, retries,\n                                                       redirect,\n                                                       assert_same_host)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476151,"cells":{"repo_name":{"kind":"string","value":"drdelta/notenso"},"path":{"kind":"string","value":"src/platform/win32/SConsTools/SwigTool.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"5611"},"content":{"kind":"string","value":"import os\r\nimport re\r\nimport SCons.Builder\r\nimport SCons.Scanner\r\n\r\nfrom Helpers import addInstanceMethodToEnv\r\n\r\ndef buildSwigExtension( env,\r\n                        swigInterfaceFile,\r\n                        source = None,\r\n                        isCpp = True,\r\n                        **kwargs ):\r\n    \"\"\"\r\n    Builds a SWIG extension by calling a SwigC/SwigCpp builder\r\n    method and then a SharedLibrary builder method.\r\n    \"\"\"\r\n\r\n    if isCpp:\r\n        # We need to dynamically determine swigWrapper and pyFile\r\n        # because the returned targets may contain a variable\r\n        # number of files--if directors are enabled.\r\n        files = env.SwigCpp( source=swigInterfaceFile )\r\n        swigWrapper = [ f for f in files\r\n                        if f.path.endswith( \".cxx\" ) ][0]\r\n        pyFile = [ f for f in files\r\n                   if f.path.endswith( \".py\" ) ][0]\r\n    else:\r\n        swigWrapper, pyFile = env.SwigC( source=swigInterfaceFile )\r\n\r\n    sourceList = [swigWrapper]\r\n\r\n    if source:\r\n        sourceList.append( source )\r\n\r\n    # If our SWIG interface file is \"foo.i\", our target file will\r\n    # be \"_foo\".\r\n    fileName = os.path.basename( swigInterfaceFile )\r\n    targetFileName = \"_%s\" % os.path.splitext( fileName )[0]\r\n\r\n    pydFile, libFile, expFile = env.SharedLibrary(\r\n        target=targetFileName,\r\n        source=sourceList,\r\n        **kwargs\r\n        )\r\n\r\n    return [pydFile, pyFile]\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# SWIG Builders and Scanner\r\n# ----------------------------------------------------------------------------\r\n\r\n# SWIG Builders\r\n\r\ndef swigBuilderModifyTargets( target, source, env ):\r\n    \"\"\"\r\n    Emitter for the Swig Builder.\r\n    \"\"\"\r\n\r\n    # Assign param to dummy variable to ensure that pychecker\r\n    # doesn't complain.\r\n    _ = env\r\n\r\n    for i in source:\r\n        name = str( i )[:-2]\r\n\r\n        # If directors are enabled, then add the \"*_wrap.h\" file as a\r\n        # target.\r\n        text = i.get_contents()\r\n        if text.find( \"\\\"director\\\"\" ) != -1:\r\n            target.append( \"%s_wrap.h\" % name )\r\n\r\n        # Add the \"*.py\" file as a target.\r\n        target.append( \"%s.py\" % name )\r\n    return target, source\r\n\r\ndef swigBuilderGenerator( source, target, env, for_signature ):\r\n    \"\"\"\r\n    Generator for the Swig Builder.\r\n    \"\"\"\r\n\r\n    # Assign param to dummy variable to ensure that pychecker\r\n    # doesn't complain.\r\n    _ = for_signature\r\n    \r\n    import os.path\r\n    sourceFile = str(source[0])\r\n    targetFile = str(target[0])\r\n    dirName = os.path.dirname( sourceFile )\r\n    if len( dirName ) == 0:\r\n        dirName = \".\"\r\n    if targetFile.endswith( \".cxx\" ):\r\n        cmdStr = \"${SWIG} -c++\"\r\n    else:\r\n        cmdStr = \"${SWIG}\"\r\n\r\n    # Read the environment's CPPPATH and turn that into the Swig\r\n    # include path.\r\n\r\n    if env.has_key( \"CPPPATH\" ):\r\n        for includeDirName in env[\"CPPPATH\"]:\r\n            # Expand out those variables and \"#\" characters.\r\n            includeDirName = env.Dir( env.subst(includeDirName) ).path\r\n            cmdStr += ' \"-I%s\"' % includeDirName\r\n\r\n    cmdStr += \" -Werror -outdir %s -python %s\"\r\n    finalCmd = cmdStr % ( dirName, sourceFile )\r\n    return finalCmd\r\n\r\nswigCBuilder = SCons.Builder.Builder(\r\n    generator = swigBuilderGenerator,\r\n    suffix = \"_wrap.c\",\r\n    src_suffix = \".i\",\r\n    emitter = swigBuilderModifyTargets\r\n    )\r\n\r\nswigCppBuilder = SCons.Builder.Builder(\r\n    generator = swigBuilderGenerator,\r\n    suffix = \"_wrap.cxx\",\r\n    src_suffix = \".i\",\r\n    emitter = swigBuilderModifyTargets\r\n    )\r\n\r\n# SWIG Scanner\r\n\r\nswigInterfaceFileRe = re.compile( r'%include\\s+\"(.*)\"' )\r\n\r\ndef swigInterfaceFileScan( node, env, path, arg = None ):\r\n    \"\"\"\r\n    Main function for Swig interface (.i) file Scanner.\r\n    \"\"\"\r\n    \r\n    # Assign param to dummy variable to ensure that pychecker\r\n    # doesn't complain.\r\n    _ = arg\r\n\r\n    contents = node.get_contents()\r\n    includedFiles = swigInterfaceFileRe.findall( contents )\r\n    implicitDependencies = [ fileName for fileName in includedFiles\r\n                             if fileName.endswith( \".h\" ) ]\r\n\r\n    theFiles = []\r\n\r\n    for fileName in implicitDependencies:\r\n        pathFound = False\r\n        for dirName in path:\r\n            relPath = env.Dir( dirName ).abspath\r\n            filePath = os.path.join( relPath, fileName )\r\n            if os.path.exists( filePath ):\r\n                theFiles.append( filePath )\r\n                pathFound = True\r\n                break\r\n        if not pathFound:\r\n            raise Exception( \"Dependency '%s' mentioned in '%s' not found.\" %\r\n                             (fileName, node.path) )\r\n\r\n    return theFiles\r\n\r\ndef swigInterfaceFilePath( env, node, unknown1, unknown2 ):\r\n    \"\"\"\r\n    Path function for Swig interface (.i) file Scanner.\r\n    \"\"\"\r\n\r\n    # Assign params to dummy variables to ensure that pychecker\r\n    # doesn't complain.\r\n    _, _ = unknown1, unknown2\r\n\r\n    return tuple( [node.path] + env[\"CPPPATH\"] )\r\n\r\nswigInterfaceFileScanner = SCons.Scanner.Scanner(\r\n    function = swigInterfaceFileScan,\r\n    path_function = swigInterfaceFilePath,\r\n    skeys = [\".i\"]\r\n    )\r\n\r\ndef generate( env ):\r\n    # Add the Builders and Scanner to the environment.\r\n\r\n    env.Append(\r\n        BUILDERS = { \"SwigC\" : swigCBuilder,\r\n                     \"SwigCpp\" : swigCppBuilder, },\r\n        SCANNERS = swigInterfaceFileScanner,\r\n        )\r\n    addInstanceMethodToEnv( env, buildSwigExtension )\r\n\r\ndef exists( env ):\r\n    if env.has_key( \"SWIG\" ):\r\n        return 1\r\n    else:\r\n        return 0\r\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476152,"cells":{"repo_name":{"kind":"string","value":"Mohamed711/Quiz-Program"},"path":{"kind":"string","value":"vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/v8/tools/testrunner/server/work_handler.py"},"copies":{"kind":"string","value":"123"},"size":{"kind":"string","value":"5569"},"content":{"kind":"string","value":"# Copyright 2012 the V8 project authors. All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n#       notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n#       copyright notice, this list of conditions and the following\n#       disclaimer in the documentation and/or other materials provided\n#       with the distribution.\n#     * Neither the name of Google Inc. nor the names of its\n#       contributors may be used to endorse or promote products derived\n#       from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport os\nimport SocketServer\nimport stat\nimport subprocess\nimport threading\n\nfrom . import compression\nfrom . import constants\nfrom . import signatures\nfrom ..network import endpoint\nfrom ..objects import workpacket\n\n\nclass WorkHandler(SocketServer.BaseRequestHandler):\n\n  def handle(self):\n    rec = compression.Receiver(self.request)\n    while not rec.IsDone():\n      data = rec.Current()\n      with self.server.job_lock:\n        self._WorkOnWorkPacket(data)\n      rec.Advance()\n\n  def _WorkOnWorkPacket(self, data):\n    server_root = self.server.daemon.root\n    v8_root = os.path.join(server_root, \"v8\")\n    os.chdir(v8_root)\n    packet = workpacket.WorkPacket.Unpack(data)\n    self.ctx = packet.context\n    self.ctx.shell_dir = os.path.join(\"out\",\n                                      \"%s.%s\" % (self.ctx.arch, self.ctx.mode))\n    if not os.path.isdir(self.ctx.shell_dir):\n      os.makedirs(self.ctx.shell_dir)\n    for binary in packet.binaries:\n      if not self._UnpackBinary(binary, packet.pubkey_fingerprint):\n        return\n\n    if not self._CheckoutRevision(packet.base_revision):\n      return\n\n    if not self._ApplyPatch(packet.patch):\n      return\n\n    tests = packet.tests\n    endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon)\n    self._SendResponse()\n\n  def _SendResponse(self, error_message=None):\n    try:\n      if error_message:\n        compression.Send([[-1, error_message]], self.request)\n      compression.Send(constants.END_OF_STREAM, self.request)\n      return\n    except Exception, e:\n      pass  # Peer is gone. There's nothing we can do.\n    # Clean up.\n    self._Call(\"git checkout -f\")\n    self._Call(\"git clean -f -d\")\n    self._Call(\"rm -rf %s\" % self.ctx.shell_dir)\n\n  def _UnpackBinary(self, binary, pubkey_fingerprint):\n    binary_name = binary[\"name\"]\n    if binary_name == \"libv8.so\":\n      libdir = os.path.join(self.ctx.shell_dir, \"lib.target\")\n      if not os.path.exists(libdir): os.makedirs(libdir)\n      target = os.path.join(libdir, binary_name)\n    else:\n      target = os.path.join(self.ctx.shell_dir, binary_name)\n    pubkeyfile = \"../trusted/%s.pem\" % pubkey_fingerprint\n    if not signatures.VerifySignature(target, binary[\"blob\"],\n                                      binary[\"sign\"], pubkeyfile):\n      self._SendResponse(\"Signature verification failed\")\n      return False\n    os.chmod(target, stat.S_IRWXU)\n    return True\n\n  def _CheckoutRevision(self, base_svn_revision):\n    get_hash_cmd = (\n        \"git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'\" %\n        base_svn_revision)\n    try:\n      base_revision = subprocess.check_output(get_hash_cmd, shell=True)\n      if not base_revision: raise ValueError\n    except:\n      self._Call(\"git fetch\")\n      try:\n        base_revision = subprocess.check_output(get_hash_cmd, shell=True)\n        if not base_revision: raise ValueError\n      except:\n        self._SendResponse(\"Base revision not found.\")\n        return False\n    code = self._Call(\"git checkout -f %s\" % base_revision)\n    if code != 0:\n      self._SendResponse(\"Error trying to check out base revision.\")\n      return False\n    code = self._Call(\"git clean -f -d\")\n    if code != 0:\n      self._SendResponse(\"Failed to reset checkout\")\n      return False\n    return True\n\n  def _ApplyPatch(self, patch):\n    if not patch: return True  # Just skip if the patch is empty.\n    patchfilename = \"_dtest_incoming_patch.patch\"\n    with open(patchfilename, \"w\") as f:\n      f.write(patch)\n    code = self._Call(\"git apply %s\" % patchfilename)\n    if code != 0:\n      self._SendResponse(\"Error applying patch.\")\n      return False\n    return True\n\n  def _Call(self, cmd):\n    return subprocess.call(cmd, shell=True)\n\n\nclass WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):\n  def __init__(self, daemon):\n    address = (daemon.ip, constants.PEER_PORT)\n    SocketServer.TCPServer.__init__(self, address, WorkHandler)\n    self.job_lock = threading.Lock()\n    self.daemon = daemon\n"},"license":{"kind":"string","value":"cc0-1.0"}}},{"rowIdx":476153,"cells":{"repo_name":{"kind":"string","value":"jmartinm/invenio-master"},"path":{"kind":"string","value":"modules/miscutil/lib/plotextractor.py"},"copies":{"kind":"string","value":"13"},"size":{"kind":"string","value":"53628"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##\n## This file is part of Invenio.\n## Copyright (C) 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\nimport sys\nimport os\nimport getopt\nimport re\nimport time\n\nfrom invenio.shellutils import run_shell_command, Timeout, run_process_with_timeout\nfrom invenio.invenio_connector import InvenioConnector\nfrom invenio.textutils import wrap_text_in_a_box, \\\n                              wait_for_user\nfrom invenio.config import CFG_TMPSHAREDDIR, CFG_SITE_URL, \\\n                           CFG_PLOTEXTRACTOR_DISALLOWED_TEX, \\\n                           CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT, \\\n                           CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT, \\\n                           CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT\nfrom invenio.bibtask import task_low_level_submission\nfrom invenio.plotextractor_getter import get_list_of_all_matching_files, \\\n                                         parse_and_download, \\\n                                         make_single_directory, \\\n                                         tarballs_by_recids, \\\n                                         tarballs_by_arXiv_id\nfrom invenio.plotextractor_converter import untar, extract_text, \\\n                                            convert_images\nfrom invenio.plotextractor_output_utils import assemble_caption, \\\n                                               find_open_and_close_braces, \\\n                                               create_MARC, get_tex_location, \\\n                                               get_image_location, \\\n                                               create_contextfiles, \\\n                                               prepare_image_data, \\\n                                               write_message, remove_dups\nfrom tempfile import mkstemp\n\n\n\"\"\"\nThis programme will take a tarball from arXiv, untar it, convert all its\nassociated images to PNG, find the captions to the images detailed in the\nincluded TeX document, and write MARCXML that reflects these associations.\n\"\"\"\n\nARXIV_HEADER = 'arXiv:'\nPLOTS_DIR = 'plots'\n\nMAIN_CAPTION_OR_IMAGE = 0\nSUB_CAPTION_OR_IMAGE = 1\n\ndef main():\n    \"\"\"\n    The main program loop.\n    \"\"\"\n    help_param = 'help'\n    verbose_param = 'verbose'\n    tarball_param = 'tarball'\n    tardir_param = 'tdir'\n    infile_param = 'input'\n    sdir_param = 'sdir'\n    extract_text_param = 'extract-text'\n    force_param = 'force'\n    upload_param = 'call-bibupload'\n    upload_mode_param = 'upload-mode'\n    yes_i_know_param = 'yes-i-know'\n    recid_param = 'recid'\n    with_docname_param = 'with-docname'\n    with_doctype_param = 'with-doctype'\n    with_docformat_param = 'with-docformat'\n    arXiv_param = 'arXiv'\n    squash_param = 'squash'\n    refno_url_param = 'refno-url'\n    refno_param = 'skip-refno'\n    clean_param = 'clean'\n    param_abbrs = 'h:t:d:s:i:a:l:xfuyr:qck'\n    params = [help_param, tarball_param + '=', tardir_param + '=', \\\n              sdir_param + '=', infile_param + '=', arXiv_param + '=', refno_url_param + '=', \\\n              extract_text_param, force_param, upload_param, yes_i_know_param, recid_param + '=', \\\n              squash_param, clean_param, refno_param, with_docname_param + '=', \\\n              with_doctype_param + '=', with_docformat_param + '=', upload_mode_param + '=']\n    try:\n        opts, args = getopt.getopt(sys.argv[1:], param_abbrs, params)\n    except getopt.GetoptError, err:\n        write_message(str(err))\n        usage()\n        sys.exit(2)\n\n    tarball = None\n    sdir = None\n    infile = None\n    tdir = None\n    xtract_text = False\n    upload_plots = False\n    force = False\n    squash = False\n    squash_path = \"\"\n    yes_i_know = False\n    recids = None\n    with_docname = None\n    with_doctype = None\n    with_docformat = None\n    arXiv = None\n    clean = False\n    refno_url = CFG_SITE_URL\n    skip_refno = False\n    upload_mode = 'append'\n\n    for opt, arg in opts:\n        if opt in ['-h', '--' + help_param]:\n            usage()\n            sys.exit()\n        elif opt in ['-t', '--' + tarball_param]:\n            tarball = arg\n        elif opt in ['-d', '--' + tardir_param]:\n            tdir = arg\n        elif opt in ['-i', '--' + infile_param]:\n            infile = arg\n        elif opt in ['-r', '--' + recid_param]:\n            recids = arg\n        elif opt in ['-a', '--' + arXiv_param]:\n            arXiv = arg\n        elif opt in ['--' + with_docname_param]:\n            with_docname = arg\n        elif opt in ['--' + with_doctype_param]:\n            with_doctype = arg\n        elif opt in ['--' + with_docformat_param]:\n            with_docformat = arg\n        elif opt in ['-s', '--' + sdir_param]:\n            sdir = arg\n        elif opt in ['-x', '--' + extract_text_param]:\n            xtract_text = True\n        elif opt in ['-f', '--' + force_param]:\n            force = True\n        elif opt in ['-u', '--' + upload_param]:\n            upload_plots = True\n        elif opt in ['--' + upload_mode_param]:\n            upload_mode = arg\n        elif opt in ['-q', '--' + squash_param]:\n            squash = True\n        elif opt in ['-y', '--' + yes_i_know_param]:\n            yes_i_know = True\n        elif opt in ['-c', '--' + clean_param]:\n            clean = True\n        elif opt in ['-l', '--' + refno_url_param]:\n            refno_url = arg\n        elif opt in ['-k', '--' + refno_param]:\n            skip_refno = True\n        else:\n            usage()\n            sys.exit()\n\n    allowed_upload_modes = ('insert', 'append', 'correct', 'replace')\n    if not upload_mode in allowed_upload_modes:\n        write_message('Specified upload mode %s is not valid. Must be in %s' % \\\n                      (upload_mode, ', '.join(allowed_upload_modes)))\n        usage()\n        sys.exit()\n\n    if sdir == None:\n        sdir = CFG_TMPSHAREDDIR\n    elif not os.path.isdir(sdir):\n        try:\n            os.makedirs(sdir)\n        except:\n            write_message('Error: We can\\'t use this sdir.  using ' + \\\n                      'CFG_TMPSHAREDDIR')\n            sdir = CFG_TMPSHAREDDIR\n\n    if skip_refno:\n        refno_url = \"\"\n\n    tars_and_gzips = []\n\n    if tarball != None:\n        tars_and_gzips.append(tarball)\n    if tdir != None:\n        filetypes = ['gzip compressed', 'tar archive', 'Tar archive'] # FIXME\n        write_message('Currently processing any tarballs in ' + tdir)\n        tars_and_gzips.extend(get_list_of_all_matching_files(tdir, filetypes))\n    if infile != None:\n        tars_and_gzips.extend(parse_and_download(infile, sdir))\n    if recids != None:\n        tars_and_gzips.extend(tarballs_by_recids(recids, sdir, with_docname, with_doctype, with_docformat))\n    if arXiv != None:\n        tars_and_gzips.extend(tarballs_by_arXiv_id([arXiv], sdir))\n    if tars_and_gzips == []:\n        write_message('Error: no tarballs to process!')\n        sys.exit(1)\n\n    if squash:\n        squash_fd, squash_path = mkstemp(suffix=\"_\" + time.strftime(\"%Y%m%d%H%M%S\") + \".xml\", \\\n                                  prefix=\"plotextractor_\", dir=sdir)\n        os.write(squash_fd, '\\n\\n')\n        os.close(squash_fd)\n\n    for tarball in tars_and_gzips:\n        recid = None\n        if isinstance(tarball, tuple):\n            tarball, recid = tarball\n        process_single(tarball, sdir=sdir, xtract_text=xtract_text, \\\n                       upload_plots=upload_plots, force=force, squash=squash_path, \\\n                       yes_i_know=yes_i_know, refno_url=refno_url, \\\n                       clean=clean, recid=recid, upload_mode=upload_mode)\n    if squash:\n        squash_fd = open(squash_path, \"a\")\n        squash_fd.write(\"\\n\")\n        squash_fd.close()\n        write_message(\"generated %s\" % (squash_path,))\n        if upload_plots:\n            upload_to_site(squash_path, yes_i_know, upload_mode)\n\ndef process_single(tarball, sdir=CFG_TMPSHAREDDIR, xtract_text=False, \\\n                   upload_plots=False, force=False, squash=\"\", \\\n                   yes_i_know=False, refno_url=\"\", \\\n                   clean=False, recid=None, upload_mode='append'):\n    \"\"\"\n    Processes one tarball end-to-end.\n\n    @param: tarball (string): the absolute location of the tarball we wish\n        to process\n    @param: sdir (string): where we should put all the intermediate files for\n        the processing.  if you're uploading, this directory should be one\n        of the ones specified in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, else\n        the upload won't work\n    @param: xtract_text (boolean): true iff you want to run pdftotext on the\n        pdf versions of the tarfiles.  this programme assumes that the pdfs\n        are named the same as the tarballs but with a .pdf extension.\n    @param: upload_plots (boolean): true iff you want to bibupload the plots\n        extracted by this process\n    @param: force (boolean): force creation of new xml file\n    @param: squash: write MARCXML output into a specified 'squash' file\n        instead of single files.\n    @param: yes_i_know: if True, no user interaction if upload_plots is True\n    @param: refno_url: URL to the invenio-instance to query for refno.\n    @param: clean: if True, everything except the original tarball, plots and\n            context- files will be removed\n    @param recid: the record ID linked to this tarball. Overrides C{refno_url}\n    @param upload_mode: the mode in which to call bibupload (when C{upload_plots}\n                        is set to True.\n    @return: marc_name(string): path to generated marcxml file\n    \"\"\"\n    sub_dir, refno = get_defaults(tarball, sdir, refno_url, recid)\n    if not squash:\n        marc_name = os.path.join(sub_dir, '%s.xml' % (refno,))\n        if (force or not os.path.exists(marc_name)):\n            marc_fd = open(marc_name, 'w')\n            marc_fd.write('\\n\\n')\n            marc_fd.close()\n    else:\n        marc_name = squash\n    if xtract_text:\n        extract_text(tarball)\n    try:\n        extracted_files_list, image_list, tex_files = untar(tarball, sub_dir)\n    except Timeout:\n        write_message('Timeout during tarball extraction on %s' % (tarball,))\n        return\n    if tex_files == [] or tex_files == None:\n        write_message('%s is not a tarball' % (os.path.split(tarball)[-1],))\n        run_shell_command('rm -r %s', (sub_dir,))\n        return\n\n    converted_image_list = convert_images(image_list)\n    write_message('converted %d of %d images found for %s' % (len(converted_image_list), \\\n                                                              len(image_list), \\\n                                                              os.path.basename(tarball)))\n    extracted_image_data = []\n\n    for tex_file in tex_files:\n        # Extract images, captions and labels\n        partly_extracted_image_data = extract_captions(tex_file, sub_dir, \\\n                                                converted_image_list)\n        if partly_extracted_image_data != []:\n            # Add proper filepaths and do various cleaning\n            cleaned_image_data = prepare_image_data(partly_extracted_image_data, \\\n                                                  tex_file, converted_image_list)\n            # Using prev. extracted info, get contexts for each image found\n            extracted_image_data.extend((extract_context(tex_file, cleaned_image_data)))\n    extracted_image_data = remove_dups(extracted_image_data)\n    if extracted_image_data == []:\n        write_message('No plots detected in %s' % (refno,))\n    else:\n        if refno_url == \"\":\n            refno = None\n        create_contextfiles(extracted_image_data)\n        marc_xml = create_MARC(extracted_image_data, tarball, refno)\n        if not squash:\n            marc_xml += \"\\n\"\n        if marc_name != None:\n            marc_fd = open(marc_name, 'a')\n            marc_fd.write('%s\\n' % (marc_xml,))\n            marc_fd.close()\n            if not squash:\n                write_message('generated %s' % (marc_name,))\n                if upload_plots:\n                    upload_to_site(marc_name, yes_i_know, upload_mode)\n    if clean:\n        clean_up(extracted_files_list, image_list)\n    write_message('work complete on %s' % (os.path.split(tarball)[-1],))\n    return marc_name\n\ndef clean_up(extracted_files_list, image_list):\n    \"\"\"\n    Removes all the intermediate stuff.\n\n    @param: extracted_files_list ([string, string, ...]): list of all extracted files\n    @param: image_list ([string, string, ...]): list of the images to keep\n\n    \"\"\"\n    for extracted_file in extracted_files_list:\n        # Remove everything that is not in the image_list or is not a directory\n        if extracted_file not in image_list and extracted_file[-1] != os.sep:\n            run_shell_command('rm %s', (extracted_file,))\n\ndef get_defaults(tarball, sdir, refno_url, recid=None):\n    \"\"\"\n    A function for parameter-checking.\n\n    @param: tarball (string): the location of the tarball to be extracted\n    @param: sdir (string): the location of the scratch directory for untarring,\n        conversions, and the ultimate destination of the MARCXML\n    @param: refno_url (string): server location on where to look for refno\n\n    @param recid: (int) if set, overrides C{refno_url} and consider this record\n    @return sdir, refno (string, string): the same\n        arguments it was sent as is appropriate.\n    \"\"\"\n\n    if not sdir or recid:\n        # Missing sdir: using default directory: CFG_TMPDIR\n        sdir = CFG_TMPSHAREDDIR\n    else:\n        sdir = os.path.split(tarball)[0]\n\n    # make a subdir in the scratch directory for each tarball\n    sdir = make_single_directory(sdir, \\\n                                 os.path.split(tarball)[-1] + '_' + PLOTS_DIR)\n    if recid:\n        refno = str(recid)\n    elif refno_url != \"\":\n        refno = get_reference_number(tarball, refno_url)\n        if refno == None:\n            refno = os.path.basename(tarball)\n            write_message('Error: can\\'t find record id for %s' % (refno,))\n    else:\n        refno = os.path.basename(tarball)\n        write_message(\"Skipping ref-no check\")\n    return sdir, refno\n\ndef get_reference_number(tarball, refno_url):\n    \"\"\"\n    Attempts to determine the reference number of the file by searching.\n\n    @param: tarball (string): the name of the tarball as downloaded from\n        arXiv\n    @param: refno_url (string): url of repository to check for a\n        reference number for this record. If not set; returns None\n\n    @return: refno (string): the reference number of the paper\n    \"\"\"\n    if refno_url:\n        server = InvenioConnector(refno_url)\n        # we just need the name of the file\n        tarball = os.path.split(tarball)[1]\n        prefix = '037__a:'\n        # the name right now looks like arXiv:hep-ph_9703009\n        # or arXiv:0910.0476\n        if tarball.startswith(ARXIV_HEADER):\n            if len(tarball.split('_')) > 1:\n                tarball = tarball.split(':')[1]\n                arXiv_record = tarball.replace('_', '/')\n            else:\n                arXiv_record = tarball\n\n            result = server.search(p=prefix + arXiv_record, of='id')\n\n            if len(result) == 0:\n                return None\n\n            return str(result[0])\n\n        arXiv_record = re.findall('(([a-zA-Z\\\\-]+/\\\\d+)|(\\\\d+\\\\.\\\\d+))', tarball)\n        if len(arXiv_record) > 1:\n            arXiv_record = arXiv_record[0]\n            result = server.search(p=prefix + arXiv_record, of='id')\n\n            if len(result) > 0:\n                return str(result[0])\n\n        tarball_mod = tarball.replace('_', '/')\n        arXiv_record = re.findall('(([a-zA-Z\\\\-]+/\\\\d+)|(\\\\d+\\\\.\\\\d+))', \\\n                                  tarball_mod)\n        if len(arXiv_record) > 1:\n            arXiv_record = arXiv_record[0]\n            result = server.search(p=prefix + arXiv_record, of='id')\n\n            if len(result) > 0:\n                return str(result[0])\n    return None\n\ndef rotate_image(filename, line, sdir, image_list):\n    \"\"\"\n    Given a filename and a line, figure out what it is that the author\n    wanted to do wrt changing the rotation of the image and convert the\n    file so that this rotation is reflected in its presentation.\n\n    @param: filename (string): the name of the file as specified in the TeX\n    @param: line (string): the line where the rotate command was found\n\n    @output: the image file rotated in accordance with the rotate command\n    @return: True if something was rotated\n    \"\"\"\n\n    file_loc = get_image_location(filename, sdir, image_list)\n    degrees = re.findall('(angle=[-\\\\d]+|rotate=[-\\\\d]+)', line)\n\n    if len(degrees) < 1:\n        return False\n\n    degrees = degrees[0].split('=')[-1].strip()\n\n    if file_loc == None or file_loc == 'ERROR' or\\\n            not re.match('-*\\\\d+', degrees):\n        return False\n\n    degrees = str(0 - int(degrees))\n    cmd_list = ['mogrify', '-rotate', degrees, file_loc]\n    dummy, dummy, cmd_err = run_process_with_timeout(cmd_list)\n    if cmd_err != '':\n        return True\n    else:\n        return True\n\ndef get_context(lines, backwards=False):\n    \"\"\"\n    Given a relevant string from a TeX file, this function will extract text\n    from it as far as it is deemed contextually relevant, either backwards or forwards\n    in the text. The level of relevance allowed is configurable. When it reaches some\n    point in the text that is determined to be out of scope from the current context,\n    like text that is identified as a new paragraph, a complex TeX structure\n    ('/begin', '/end', etc.) etc., it will return the previously allocated text.\n\n    For use when extracting text with contextual value for an figure or plot.\n\n    @param lines (string): string to examine\n    @param reversed (bool): are we searching backwards?\n\n    @return context (string): extracted context\n    \"\"\"\n    tex_tag = re.compile(r\".*\\\\(\\w+).*\")\n    sentence = re.compile(r\"(?<=[.?!])[\\s]+(?=[A-Z])\")\n    context = []\n\n    word_list = lines.split()\n    if backwards:\n        word_list.reverse()\n\n    # For each word we do the following:\n    #   1. Check if we have reached word limit\n    #   2. If not, see if this is a TeX tag and see if its 'illegal'\n    #   3. Otherwise, add word to context\n    for word in word_list:\n        if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:\n            break\n        match = tex_tag.match(word)\n        if (match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX):\n            # TeX Construct matched, return\n            if backwards:\n                # When reversed we need to go back and\n                # remove unwanted data within brackets\n                temp_word = \"\"\n                while len(context):\n                    temp_word = context.pop()\n                    if '}' in temp_word:\n                        break\n            break\n        context.append(word)\n\n    if backwards:\n        context.reverse()\n    text = \" \".join(context)\n    sentence_list = sentence.split(text)\n\n    if backwards:\n        sentence_list.reverse()\n\n    if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:\n        return \" \".join(sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])\n    else:\n        return \" \".join(sentence_list)\n\ndef extract_context(tex_file, extracted_image_data):\n    \"\"\"\n    Given a .tex file and a label name, this function will extract the text before\n    and after for all the references made to this label in the text. The number\n    of characters to extract before and after is configurable.\n\n    @param tex_file (list): path to .tex file\n    @param extracted_image_data ([(string, string, list), ...]):\n        a list of tuples of images matched to labels and captions from\n        this document.\n\n    @return extracted_image_data ([(string, string, list, list),\n        (string, string, list, list),...)]: the same list, but now containing\n        extracted contexts\n    \"\"\"\n    if os.path.isdir(tex_file) or not os.path.exists(tex_file):\n        return []\n    fd = open(tex_file)\n    lines = fd.read()\n    fd.close()\n\n    # Generate context for each image and its assoc. labels\n    new_image_data = []\n    for image, caption, label in extracted_image_data:\n        context_list = []\n\n        # Generate a list of index tuples for all matches\n        indicies = [match.span() \\\n                    for match in re.finditer(r\"(\\\\(?:fig|ref)\\{%s\\})\" % (re.escape(label),), \\\n                                                          lines)]\n        for startindex, endindex in indicies:\n            # Retrive all lines before label until beginning of file\n            i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT\n            if i < 0:\n                text_before = lines[:startindex]\n            else:\n                text_before = lines[i:startindex]\n            context_before = get_context(text_before, backwards=True)\n\n            # Retrive all lines from label until end of file and get context\n            i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT\n            text_after = lines[endindex:i]\n            context_after = get_context(text_after)\n            context_list.append(context_before + ' \\\\ref{' + label + '} ' + context_after)\n        new_image_data.append((image, caption, label, context_list))\n    return new_image_data\n\ndef extract_captions(tex_file, sdir, image_list, primary=True):\n    \"\"\"\n    Take the TeX file and the list of images in the tarball (which all,\n    presumably, are used in the TeX file) and figure out which captions\n    in the text are associated with which images\n    @param: lines (list): list of lines of the TeX file\n\n    @param: tex_file (string): the name of the TeX file which mentions\n        the images\n    @param: sdir (string): path to current sub-directory\n    @param: image_list (list): list of images in tarball\n    @param: primary (bool): is this the primary call to extract_caption?\n\n    @return: images_and_captions_and_labels ([(string, string, list),\n        (string, string, list), ...]):\n        a list of tuples representing the names of images and their\n        corresponding figure labels from the TeX file\n    \"\"\"\n    if os.path.isdir(tex_file) or not os.path.exists(tex_file):\n        return []\n    fd = open(tex_file)\n    lines = fd.readlines()\n    fd.close()\n\n    # possible figure lead-ins\n    figure_head = '\\\\begin{figure'  # also matches figure*\n    figure_tail = '\\\\end{figure'  # also matches figure*\n    picture_head = '\\\\begin{picture}'\n    displaymath_head = '\\\\begin{displaymath}'\n    subfloat_head = '\\\\subfloat'\n    subfig_head = '\\\\subfigure'\n    includegraphics_head = '\\\\includegraphics'\n\n    epsfig_head = '\\\\epsfig'\n    input_head = '\\\\input'\n    # possible caption lead-ins\n    caption_head = '\\\\caption'\n    figcaption_head = '\\\\figcaption'\n\n    label_head = '\\\\label'\n\n    rotate = 'rotate='\n    angle = 'angle='\n\n    eps_tail = '.eps'\n    ps_tail = '.ps'\n\n    doc_head = '\\\\begin{document}'\n    doc_tail = '\\\\end{document}'\n\n    extracted_image_data = []\n    cur_image = ''\n    caption = ''\n    labels = []\n    active_label = \"\"\n\n    # cut out shit before the doc head\n    if primary:\n        for line_index in range(len(lines)):\n            if lines[line_index].find(doc_head) < 0:\n                lines[line_index] = ''\n            else:\n                break\n\n    # are we using commas in filenames here?\n    commas_okay = False\n    for dummy1, dummy2, filenames in \\\n            os.walk(os.path.split(os.path.split(tex_file)[0])[0]):\n        for filename in filenames:\n            if filename.find(',') > -1:\n                commas_okay = True\n                break\n\n    # a comment is a % not preceded by a \\\n    comment = re.compile(\"(? -1:\n            return extracted_image_data\n\n        \"\"\"\n        FIGURE -\n        structure of a figure:\n        \\begin{figure}\n        \\formatting...\n        \\includegraphics[someoptions]{FILENAME}\n        \\caption{CAPTION}  %caption and includegraphics may be switched!\n        \\end{figure}\n        \"\"\"\n\n        index = line.find(figure_head)\n        if index > -1:\n            in_figure_tag = 1\n            # some punks don't like to put things in the figure tag.  so we\n            # just want to see if there is anything that is sitting outside\n            # of it when we find it\n            cur_image, caption, extracted_image_data = \\\n                    put_it_together(cur_image, caption, active_label, extracted_image_data, \\\n                                    line_index, lines)\n\n        # here, you jerks, just make it so that it's fecking impossible to\n        # figure out your damn inclusion types\n\n        index = max([line.find(eps_tail), line.find(ps_tail), \\\n                     line.find(epsfig_head)])\n        if index > -1:\n            if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:\n                ext = True\n            else:\n                ext = False\n            filenames = intelligently_find_filenames(line, ext=ext,\n                                                     commas_okay=commas_okay)\n\n            # try to look ahead!  sometimes there are better matches after\n            if line_index < len(lines) - 1:\n                filenames.extend(\\\n                          intelligently_find_filenames(lines[line_index + 1],\n                                                      commas_okay=commas_okay))\n            if line_index < len(lines) - 2:\n                filenames.extend(\\\n                          intelligently_find_filenames(lines[line_index + 2],\n                                                      commas_okay=commas_okay))\n\n            for filename in filenames:\n                filename = str(filename)\n                if cur_image == '':\n                    cur_image = filename\n                elif type(cur_image) == list:\n                    if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n                        cur_image[SUB_CAPTION_OR_IMAGE].append(filename)\n                    else:\n                        cur_image[SUB_CAPTION_OR_IMAGE] = [filename]\n                else:\n                    cur_image = ['', [cur_image, filename]]\n\n        \"\"\"\n        Rotate and angle\n        \"\"\"\n        index = max(line.find(rotate), line.find(angle))\n        if index > -1:\n            # which is the image associated to it?\n            filenames = intelligently_find_filenames(line,\n                                                     commas_okay=commas_okay)\n            # try the line after and the line before\n            if line_index + 1 < len(lines):\n                filenames.extend(intelligently_find_filenames(lines[line_index + 1],\n                                                      commas_okay=commas_okay))\n            if line_index > 1:\n                filenames.extend(intelligently_find_filenames(lines[line_index - 1],\n                                                      commas_okay=commas_okay))\n\n            already_tried = []\n            for filename in filenames:\n                if filename != 'ERROR' and not filename in already_tried:\n                    if rotate_image(filename, line, sdir, image_list):\n                        break\n                    already_tried.append(filename)\n\n        \"\"\"\n        INCLUDEGRAPHICS -\n        structure of includegraphics:\n        \\includegraphics[someoptions]{FILENAME}\n        \"\"\"\n        index = line.find(includegraphics_head)\n        if index > -1:\n            open_curly, open_curly_line, close_curly, dummy = \\\n                    find_open_and_close_braces(line_index, index, '{', lines)\n\n            filename = lines[open_curly_line][open_curly + 1:close_curly]\n\n            if cur_image == '':\n                cur_image = filename\n            elif type(cur_image) == list:\n                if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n                    cur_image[SUB_CAPTION_OR_IMAGE].append(filename)\n                else:\n                    cur_image[SUB_CAPTION_OR_IMAGE] = [filename]\n            else:\n                cur_image = ['', [cur_image, filename]]\n\n        \"\"\"\n        {\\input{FILENAME}}\n        \\caption{CAPTION}\n\n        This input is ambiguous, since input is also used for things like\n        inclusion of data from other LaTeX files directly.\n        \"\"\"\n        index = line.find(input_head)\n        if index > -1:\n            new_tex_names = intelligently_find_filenames(line, TeX=True, \\\n                                                         commas_okay=commas_okay)\n\n            for new_tex_name in new_tex_names:\n                if new_tex_name != 'ERROR':\n                    new_tex_file = get_tex_location(new_tex_name, tex_file)\n                    if new_tex_file != None and primary: #to kill recursion\n                        extracted_image_data.extend(extract_captions(\\\n                                                      new_tex_file, sdir, \\\n                                                      image_list,\n                                                      primary=False))\n\n        \"\"\"PICTURE\"\"\"\n\n        index = line.find(picture_head)\n        if index > -1:\n            # structure of a picture:\n            # \\begin{picture}\n            # ....not worrying about this now\n            #write_message('found picture tag')\n            #FIXME\n            pass\n\n\n\n        \"\"\"DISPLAYMATH\"\"\"\n\n        index = line.find(displaymath_head)\n        if index > -1:\n            # structure of a displaymath:\n            # \\begin{displaymath}\n            # ....not worrying about this now\n            #write_message('found displaymath tag')\n            #FIXME\n            pass\n\n        \"\"\"\n        CAPTIONS -\n        structure of a caption:\n        \\caption[someoptions]{CAPTION}\n        or\n        \\caption{CAPTION}\n        or\n        \\caption{{options}{CAPTION}}\n        \"\"\"\n\n        index = max([line.find(caption_head), line.find(figcaption_head)])\n        if index > -1:\n            open_curly, open_curly_line, close_curly, close_curly_line = \\\n                    find_open_and_close_braces(line_index, index, '{', lines)\n\n            cap_begin = open_curly + 1\n\n            cur_caption = assemble_caption(open_curly_line, cap_begin, \\\n                        close_curly_line, close_curly, lines)\n\n            if caption == '':\n                caption = cur_caption\n            elif type(caption) == list:\n                if type(caption[SUB_CAPTION_OR_IMAGE]) == list:\n                    caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)\n                else:\n                    caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]\n            elif caption != cur_caption:\n                caption = ['', [caption, cur_caption]]\n\n        \"\"\"\n        SUBFLOATS -\n        structure of a subfloat (inside of a figure tag):\n        \\subfloat[CAPTION]{options{FILENAME}}\n\n        also associated with the overall caption of the enclosing figure\n        \"\"\"\n\n        index = line.find(subfloat_head)\n        if index > -1:\n            # if we are dealing with subfloats, we need a different\n            # sort of structure to keep track of captions and subcaptions\n            if type(cur_image) != list:\n                cur_image = [cur_image, []]\n            if type(caption) != list:\n                caption = [caption, []]\n\n            open_square, open_square_line, close_square, close_square_line = \\\n                    find_open_and_close_braces(line_index, index, '[', lines)\n            cap_begin = open_square + 1\n\n            sub_caption = assemble_caption(open_square_line, \\\n                    cap_begin, close_square_line, close_square, lines)\n            caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)\n\n            open_curly, open_curly_line, close_curly, dummy = \\\n                    find_open_and_close_braces(close_square_line, \\\n                    close_square, '{', lines)\n            sub_image = lines[open_curly_line][open_curly + 1:close_curly]\n\n            cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)\n\n        \"\"\"\n        SUBFIGURES -\n        structure of a subfigure (inside a figure tag):\n        \\subfigure[CAPTION]{\n        \\includegraphics[options]{FILENAME}}\n\n        also associated with the overall caption of the enclosing figure\n        \"\"\"\n\n        index = line.find(subfig_head)\n        if index > -1:\n            # like with subfloats, we need a different structure for keepin\n            # track of this stuff\n            if type(cur_image) != list:\n                cur_image = [cur_image, []]\n            if type(caption) != list:\n                caption = [caption, []]\n\n            open_square, open_square_line, close_square, close_square_line = \\\n                    find_open_and_close_braces(line_index, index, '[', lines)\n            cap_begin = open_square + 1\n\n            sub_caption = assemble_caption(open_square_line, \\\n                    cap_begin, close_square_line, close_square, lines)\n            caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)\n\n            index_cpy = index\n\n            # find the graphics tag to get the filename\n            # it is okay if we eat lines here\n            index = line.find(includegraphics_head)\n            while index == -1 and (line_index + 1) < len(lines):\n                line_index = line_index + 1\n                line = lines[line_index]\n                index = line.find(includegraphics_head)\n            if line_index == len(lines):\n                # didn't find the image name on line\n                line_index = index_cpy\n\n            open_curly, open_curly_line, close_curly, dummy = \\\n                    find_open_and_close_braces(line_index, \\\n                    index, '{', lines)\n            sub_image = lines[open_curly_line][open_curly + 1:close_curly]\n\n            cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)\n\n        \"\"\"\n        LABELS -\n        structure of a label:\n        \\label{somelabelnamewhichprobablyincludesacolon}\n\n        Labels are used to tag images and will later be used in ref tags\n        to reference them.  This is interesting because in effect the refs\n        to a plot are additional caption for it.\n\n        Notes: labels can be used for many more things than just plots.\n        We'll have to experiment with how to best associate a label with an\n        image.. if it's in the caption, it's easy.  If it's in a figure, it's\n        still okay... but the images that aren't in figure tags are numerous.\n        \"\"\"\n        index = line.find(label_head)\n        if index > -1 and in_figure_tag:\n            open_curly, open_curly_line, close_curly, dummy = \\\n                    find_open_and_close_braces(line_index, \\\n                    index, '{', lines)\n            label = lines[open_curly_line][open_curly + 1:close_curly]\n            if label not in labels:\n                active_label = label\n            labels.append(label)\n\n        \"\"\"\n        FIGURE\n\n        important: we put the check for the end of the figure at the end\n        of the loop in case some pathological person puts everything in one\n        line\n        \"\"\"\n\n        index = max([line.find(figure_tail), line.find(doc_tail)])\n        if index > -1:\n            in_figure_tag = 0\n\n            cur_image, caption, extracted_image_data = \\\n                    put_it_together(cur_image, caption, active_label, extracted_image_data, \\\n                                    line_index, lines)\n\n        \"\"\"\n        END DOCUMENT\n\n        we shouldn't look at anything after the end document tag is found\n        \"\"\"\n\n        index = line.find(doc_tail)\n        if index > -1:\n            break\n\n    return extracted_image_data\n\ndef put_it_together(cur_image, caption, context, extracted_image_data, line_index, \\\n                    lines):\n    \"\"\"\n    Takes the current image(s) and caption(s) and assembles them into\n    something useful in the extracted_image_data list.\n\n    @param: cur_image (string || list): the image currently being dealt with, or\n        the list of images, in the case of subimages\n    @param: caption (string || list): the caption or captions currently in scope\n    @param: extracted_image_data ([(string, string), (string, string), ...]):\n        a list of tuples of images matched to captions from this document.\n    @param: line_index (int): the index where we are in the lines (for\n        searchback and searchforward purposes)\n    @param: lines ([string, string, ...]): the lines in the TeX\n\n    @return: (cur_image, caption, extracted_image_data): the same arguments it\n        was sent, processed appropriately\n    \"\"\"\n\n    if type(cur_image) == list:\n        if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':\n            cur_image[MAIN_CAPTION_OR_IMAGE] = ''\n        for image in cur_image[SUB_CAPTION_OR_IMAGE]:\n            if image == 'ERROR':\n                cur_image[SUB_CAPTION_OR_IMAGE].remove(image)\n\n    if cur_image != '' and caption != '':\n\n        if type(cur_image) == list and type(caption) == list:\n\n            if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\\\n                    caption[MAIN_CAPTION_OR_IMAGE] != '':\n                extracted_image_data.append(\n                    (cur_image[MAIN_CAPTION_OR_IMAGE],\n                     caption[MAIN_CAPTION_OR_IMAGE],\n                     context))\n            if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:\n                # why is the main image a list?\n                # it's a good idea to attach the main caption to other\n                # things, but the main image can only be used once\n                cur_image[MAIN_CAPTION_OR_IMAGE] = ''\n\n            if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n                if type(caption[SUB_CAPTION_OR_IMAGE]) == list:\n                    for index in \\\n                            range(len(cur_image[SUB_CAPTION_OR_IMAGE])):\n                        if index < len(caption[SUB_CAPTION_OR_IMAGE]):\n                            long_caption = \\\n                                caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \\\n                                caption[SUB_CAPTION_OR_IMAGE][index]\n                        else:\n                            long_caption = \\\n                                caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \\\n                                'Caption not extracted'\n                        extracted_image_data.append(\n                            (cur_image[SUB_CAPTION_OR_IMAGE][index],\n                             long_caption, context))\n\n                else:\n                    long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \\\n                        ' : ' + caption[SUB_CAPTION_OR_IMAGE]\n                    for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:\n                        extracted_image_data.append(\n                            (sub_image, long_caption, context))\n\n            else:\n                if type(caption[SUB_CAPTION_OR_IMAGE]) == list:\n                    long_caption = caption[MAIN_CAPTION_OR_IMAGE]\n                    for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:\n                        long_caption = long_caption + ' : ' + sub_cap\n                    extracted_image_data.append(\n                       (cur_image[SUB_CAPTION_OR_IMAGE], long_caption, context))\n                else:\n                    #wtf are they lists for?\n                    extracted_image_data.append(\n                        (cur_image[SUB_CAPTION_OR_IMAGE],\n                         caption[SUB_CAPTION_OR_IMAGE], context))\n\n        elif type(cur_image) == list:\n            if cur_image[MAIN_CAPTION_OR_IMAGE] != '':\n                extracted_image_data.append(\n                    (cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))\n            if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:\n                for image in cur_image[SUB_CAPTION_OR_IMAGE]:\n                    extracted_image_data.append((image, caption, context))\n            else:\n                extracted_image_data.append(\n                    (cur_image[SUB_CAPTION_OR_IMAGE], caption, context))\n\n        elif type(caption) == list:\n            if caption[MAIN_CAPTION_OR_IMAGE] != '':\n                extracted_image_data.append(\n                    (cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))\n            if type(caption[SUB_CAPTION_OR_IMAGE]) == list:\n                # multiple caps for one image:\n                long_caption = caption[MAIN_CAPTION_OR_IMAGE]\n                for subcap in caption[SUB_CAPTION_OR_IMAGE]:\n                    if long_caption != '':\n                        long_caption += ' : '\n                    long_caption += subcap\n                extracted_image_data.append((cur_image, long_caption, context))\n            else:\n                extracted_image_data.append(\n                    (cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))\n\n        else:\n            extracted_image_data.append((cur_image, caption, context))\n\n    elif cur_image != '' and caption == '':\n        # we may have missed the caption somewhere.\n        REASONABLE_SEARCHBACK = 25\n        REASONABLE_SEARCHFORWARD = 5\n        curly_no_tag_preceding = '(?= len(lines):\n                    break\n\n                fwd_line = lines[line_index + searchforward]\n                m = re.search(curly_no_tag_preceding, fwd_line)\n\n                if m != None:\n                    open_curly = m.start()\n                    open_curly, open_curly_line, close_curly, \\\n                    close_curly_line = find_open_and_close_braces(\\\n                    line_index + searchforward, open_curly, '{', lines)\n\n                    cap_begin = open_curly + 1\n\n                    caption = assemble_caption(open_curly_line, \\\n                              cap_begin, close_curly_line, close_curly, lines)\n\n                    if type(cur_image) == list:\n                        extracted_image_data.append(\n                            (cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))\n                        for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:\n                            extracted_image_data.append((sub_img, caption, context))\n                    else:\n                        extracted_image_data.append((cur_image, caption, context))\n                    break\n\n        if caption == '':\n            if type(cur_image) == list:\n                extracted_image_data.append(\n                    (cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found', context))\n                for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:\n                    extracted_image_data.append((sub_img, 'No caption', context))\n            else:\n                extracted_image_data.append(\n\t               (cur_image, 'No caption found', context))\n\n\n    elif caption != '' and cur_image == '':\n        if type(caption) == list:\n            long_caption = caption[MAIN_CAPTION_OR_IMAGE]\n            for subcap in caption[SUB_CAPTION_OR_IMAGE]:\n                long_caption = long_caption + ': ' + subcap\n        else:\n            long_caption = caption\n        extracted_image_data.append(('', 'noimg' + long_caption, context))\n\n\n    # if we're leaving the figure, no sense keeping the data\n    cur_image = ''\n    caption = ''\n\n    return (cur_image, caption, extracted_image_data)\n\ndef intelligently_find_filenames(line, TeX=False, ext=False, commas_okay=False):\n    \"\"\"\n    Find the filename in the line.  We don't support all filenames!  Just eps\n    and ps for now.\n\n    @param: line (string): the line we want to get a filename out of\n\n    @return: filename ([string, ...]): what is probably the name of the file(s)\n    \"\"\"\n\n    files_included = ['ERROR']\n\n    if commas_okay:\n        valid_for_filename = '\\\\s*[A-Za-z0-9\\\\-\\\\=\\\\+/\\\\\\\\_\\\\.,%#]+'\n    else:\n        valid_for_filename = '\\\\s*[A-Za-z0-9\\\\-\\\\=\\\\+/\\\\\\\\_\\\\.%#]+'\n\n    if ext:\n        valid_for_filename = valid_for_filename + '\\.e*ps[texfi2]*'\n\n    if TeX:\n        valid_for_filename = valid_for_filename + '[\\.latex]*'\n\n    file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)\n\n    if len(file_inclusion) > 0:\n        # right now it looks like '=FILENAME,' or '=FILENAME '\n        for file_included in file_inclusion:\n            files_included.append(file_included[1:-1])\n\n    file_inclusion = re.findall('(?:[ps]*file=|figure=)' + \\\n                                valid_for_filename + '[,\\\\]} ]*', line)\n\n    if len(file_inclusion) > 0:\n        # still has the =\n        for file_included in file_inclusion:\n            part_before_equals = file_included.split('=')[0]\n            if len(part_before_equals) != file_included:\n                file_included = file_included[len(part_before_equals) + 1:].strip()\n            if not file_included in files_included:\n                files_included.append(file_included)\n\n    file_inclusion = re.findall('[\"\\'{\\\\[]' + valid_for_filename + '[}\\\\],\"\\']', \\\n                line)\n\n    if len(file_inclusion) > 0:\n        # right now it's got the {} or [] or \"\" or '' around it still\n        for file_included in file_inclusion:\n            file_included = file_included[1:-1]\n            file_included = file_included.strip()\n            if not file_included in files_included:\n                files_included.append(file_included)\n\n    file_inclusion = re.findall('^' + valid_for_filename + '$', line)\n\n    if len(file_inclusion) > 0:\n        for file_included in file_inclusion:\n            file_included = file_included.strip()\n            if not file_included in files_included:\n                files_included.append(file_included)\n\n    file_inclusion = re.findall('^' + valid_for_filename + '[,\\\\} $]', line)\n\n    if len(file_inclusion) > 0:\n        for file_included in file_inclusion:\n            file_included = file_included.strip()\n            if not file_included in files_included:\n                files_included.append(file_included)\n\n    file_inclusion = re.findall('\\\\s*' + valid_for_filename + '\\\\s*$', line)\n\n    if len(file_inclusion) > 0:\n        for file_included in file_inclusion:\n            file_included = file_included.strip()\n            if not file_included in files_included:\n                files_included.append(file_included)\n\n    if files_included != ['ERROR']:\n        files_included = files_included[1:] # cut off the dummy\n\n    for file_included in files_included:\n        if file_included == '':\n            files_included.remove(file_included)\n        if ' ' in file_included:\n            for subfile in file_included.split(' '):\n                if not subfile in files_included:\n                    files_included.append(subfile)\n        if ',' in file_included:\n            for subfile in file_included.split(' '):\n                if not subfile in files_included:\n                    files_included.append(subfile)\n\n    return files_included\n\ndef upload_to_site(marcxml, yes_i_know, upload_mode=\"append\"):\n    \"\"\"\n    makes the appropriate calls to bibupload to get the MARCXML record onto\n    the site. Uploads in \"correct\" mode.\n\n    @param: marcxml (string): the absolute location of the MARCXML that was\n        generated by this programme\n    @param: yes_i_know (boolean): if true, no confirmation.  if false, prompt.\n\n    @output: a new record on the invenio site\n\n    @return: None\n    \"\"\"\n    if not yes_i_know:\n        wait_for_user(wrap_text_in_a_box('You are going to upload new ' + \\\n                                         'plots to the server.'))\n    task_low_level_submission('bibupload', 'admin', upload_mode and '--' + upload_mode or '', marcxml)\n\nhelp_string = \"\"\"\n    name: plotextractor\n    usage:\n            python plotextractor.py -d tar/dir -s scratch/dir\n            python plotextractor.py -i inputfile -u\n            python plotextractor.py --arXiv=arXiv_id\n            python plotextractor.py --recid=recids\n\n    example:\n            python plotextractor.py -d /some/path/with/tarballs\n            python plotextractor.py -i input.txt --no-sdir --extract-text\n            python plotextractor.py --arXiv=hep-ex/0101001\n            python plotextractor.py --recid=13-20,29\n\n    options:\n        -d, --tardir=\n            if you wish to do a batch of tarballs, search the tree\n            rooted at this directory for them\n\n        -s, --scratchdir=\n            the directory for scratchwork (untarring, conversion, etc.).\n            make sure that this directory is one of the allowed dirs in\n            CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS to avoid errors.  with an\n            sdir selected, one xml file will be generated for the whole\n            batch of files processed, and it will live in this sdir.\n\n        -i, --input=\n            if you wish to give an input file for downloading files from\n            arXiv (or wherever), this is the pointer to that file, which\n            should contain urls to download, no more than 1 per line.  each\n            line should be the url of a tarball or gzipped tarball, and\n            each downloaded item will then be processed.\n\n        -x, --extract-text\n            if there is a pdf with the same base name as the tarball for each\n            tarball this is being run on, running with the -x parameter will\n            run pdftotext on each of these pdfs and store the result in the\n            folder\n\n        -f, --force\n            if you want to overwrite everything that was done before, just\n            force the script to overwrite it.  otherwise it will only run on\n            things that haven't been run on yet (for use with tardir).\n\n        -c, --clean\n            if you wish to do delete all non-essential files that were extracted.\n\n        -u, --call-bibupload, --yes-i-know\n            if you want to upload the plots, ask to call bibupload.  appending\n            the --yes-i-know flag bypasses bibupload's prompt to upload\n\n        --upload-mode=\n            if you use --call-bibupload option, allows to specify in which\n            mode BibUpload should process the input. Can take values:\n            'insert', 'append', 'correct' or 'replace'\n\n        -l, --refno-url\n            Specify an URL to the invenio-instance to query for refno.\n            Defaults to CFG_SITE_URL.\n\n        -k, --skip-refno\n            allows you to skip any refno check\n\n        -r, --recid=\n            if you want to process the tarball of one recid, use this tag.  it\n            will also accept ranges (i.e. --recid=13-20)\n\n        --with-docname=\n            allow to choose files to process on the basis of their docname,\n            when used with --recid option\n\n        --with-doctype=\n            allow to choose files to process on the basis of their doctype,\n            when used with --recid option\n\n        --with-docformat=\n            allow to choose files to process on the basis of their format,\n            when used with --recid option\n\n        -a, --arXiv=\n            if you want to process the tarball of one arXiv id, use this tag.\n\n        -t, --tarball=\n            for processing one tarball.\n\n        -q, --squash\n            if you want to squash all MARC into a single MARC file (for easier\n            and faster bibuploading)\n\n        -h, --help\n            Print this help and exit.\n\n    description: extracts plots from a tarfile from arXiv and generates\n        MARCXML that links figures and their captions.  converts all\n        images to PNG format.\n\"\"\"\n\ndef usage():\n    write_message(help_string)\n\nif __name__ == '__main__':\n    main()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476154,"cells":{"repo_name":{"kind":"string","value":"twiest/openshift-tools"},"path":{"kind":"string","value":"openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/lib_openshift/library/oc_version.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"49326"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# pylint: disable=missing-docstring\n# flake8: noqa: T001\n#     ___ ___ _  _ ___ ___    _ _____ ___ ___\n#    / __| __| \\| | __| _ \\  /_\\_   _| __|   \\\n#   | (_ | _|| .` | _||   / / _ \\| | | _|| |) |\n#    \\___|___|_|\\_|___|_|_\\/_/_\\_\\_|_|___|___/_ _____\n#   |   \\ / _ \\  | \\| |/ _ \\_   _| | __|   \\_ _|_   _|\n#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | |\n#   |___/ \\___/  |_|\\_|\\___/ |_|   |___|___/___| |_|\n#\n# Copyright 2016 Red Hat, Inc. and/or its affiliates\n# and other contributors as indicated by the @author tags.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-\n'''\n   OpenShiftCLI class that wraps the oc commands in a subprocess\n'''\n# pylint: disable=too-many-lines\n\nfrom __future__ import print_function\nimport atexit\nimport copy\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport tempfile\n# pylint: disable=import-error\ntry:\n    import ruamel.yaml as yaml\nexcept ImportError:\n    import yaml\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-\n\n# -*- -*- -*- Begin included fragment: doc/version -*- -*- -*-\n\nDOCUMENTATION = '''\n---\nmodule: oc_version\nshort_description: Return the current openshift version\ndescription:\n  - Return the openshift installed version.  `oc version`\noptions:\n  state:\n    description:\n    - Currently list is only supported state.\n    required: true\n    default: list\n    choices: [\"list\"]\n    aliases: []\n  kubeconfig:\n    description:\n    - The path for the kubeconfig file to use for authentication\n    required: false\n    default: /etc/origin/master/admin.kubeconfig\n    aliases: []\n  debug:\n    description:\n    - Turn on debug output.\n    required: false\n    default: False\n    aliases: []\nauthor:\n- \"Kenny Woodson \"\nextends_documentation_fragment: []\n'''\n\nEXAMPLES = '''\noc_version:\n- name: get oc version\n  oc_version:\n  register: oc_version\n'''\n\n# -*- -*- -*- End included fragment: doc/version -*- -*- -*-\n\n# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-\n\n\nclass YeditException(Exception):  # pragma: no cover\n    ''' Exception class for Yedit '''\n    pass\n\n\n# pylint: disable=too-many-public-methods\nclass Yedit(object):  # pragma: no cover\n    ''' Class to modify yaml files '''\n    re_valid_key = r\"(((\\[-?\\d+\\])|([0-9a-zA-Z%s/_-]+)).?)+$\"\n    re_key = r\"(?:\\[(-?\\d+)\\])|([0-9a-zA-Z{}/_-]+)\"\n    com_sep = set(['.', '#', '|', ':'])\n\n    # pylint: disable=too-many-arguments\n    def __init__(self,\n                 filename=None,\n                 content=None,\n                 content_type='yaml',\n                 separator='.',\n                 backup=False):\n        self.content = content\n        self._separator = separator\n        self.filename = filename\n        self.__yaml_dict = content\n        self.content_type = content_type\n        self.backup = backup\n        self.load(content_type=self.content_type)\n        if self.__yaml_dict is None:\n            self.__yaml_dict = {}\n\n    @property\n    def separator(self):\n        ''' getter method for separator '''\n        return self._separator\n\n    @separator.setter\n    def separator(self, inc_sep):\n        ''' setter method for separator '''\n        self._separator = inc_sep\n\n    @property\n    def yaml_dict(self):\n        ''' getter method for yaml_dict '''\n        return self.__yaml_dict\n\n    @yaml_dict.setter\n    def yaml_dict(self, value):\n        ''' setter method for yaml_dict '''\n        self.__yaml_dict = value\n\n    @staticmethod\n    def parse_key(key, sep='.'):\n        '''parse the key allowing the appropriate separator'''\n        common_separators = list(Yedit.com_sep - set([sep]))\n        return re.findall(Yedit.re_key.format(''.join(common_separators)), key)\n\n    @staticmethod\n    def valid_key(key, sep='.'):\n        '''validate the incoming key'''\n        common_separators = list(Yedit.com_sep - set([sep]))\n        if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):\n            return False\n\n        return True\n\n    @staticmethod\n    def remove_entry(data, key, sep='.'):\n        ''' remove data at location key '''\n        if key == '' and isinstance(data, dict):\n            data.clear()\n            return True\n        elif key == '' and isinstance(data, list):\n            del data[:]\n            return True\n\n        if not (key and Yedit.valid_key(key, sep)) and \\\n           isinstance(data, (list, dict)):\n            return None\n\n        key_indexes = Yedit.parse_key(key, sep)\n        for arr_ind, dict_key in key_indexes[:-1]:\n            if dict_key and isinstance(data, dict):\n                data = data.get(dict_key)\n            elif (arr_ind and isinstance(data, list) and\n                  int(arr_ind) <= len(data) - 1):\n                data = data[int(arr_ind)]\n            else:\n                return None\n\n        # process last index for remove\n        # expected list entry\n        if key_indexes[-1][0]:\n            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501\n                del data[int(key_indexes[-1][0])]\n                return True\n\n        # expected dict entry\n        elif key_indexes[-1][1]:\n            if isinstance(data, dict):\n                del data[key_indexes[-1][1]]\n                return True\n\n    @staticmethod\n    def add_entry(data, key, item=None, sep='.'):\n        ''' Get an item from a dictionary with key notation a.b.c\n            d = {'a': {'b': 'c'}}}\n            key = a#b\n            return c\n        '''\n        if key == '':\n            pass\n        elif (not (key and Yedit.valid_key(key, sep)) and\n              isinstance(data, (list, dict))):\n            return None\n\n        key_indexes = Yedit.parse_key(key, sep)\n        for arr_ind, dict_key in key_indexes[:-1]:\n            if dict_key:\n                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501\n                    data = data[dict_key]\n                    continue\n\n                elif data and not isinstance(data, dict):\n                    raise YeditException(\"Unexpected item type found while going through key \" +\n                                         \"path: {} (at key: {})\".format(key, dict_key))\n\n                data[dict_key] = {}\n                data = data[dict_key]\n\n            elif (arr_ind and isinstance(data, list) and\n                  int(arr_ind) <= len(data) - 1):\n                data = data[int(arr_ind)]\n            else:\n                raise YeditException(\"Unexpected item type found while going through key path: {}\".format(key))\n\n        if key == '':\n            data = item\n\n        # process last index for add\n        # expected list entry\n        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501\n            data[int(key_indexes[-1][0])] = item\n\n        # expected dict entry\n        elif key_indexes[-1][1] and isinstance(data, dict):\n            data[key_indexes[-1][1]] = item\n\n        # didn't add/update to an existing list, nor add/update key to a dict\n        # so we must have been provided some syntax like a.b.c[] = \"data\" for a\n        # non-existent array\n        else:\n            raise YeditException(\"Error adding to object at path: {}\".format(key))\n\n        return data\n\n    @staticmethod\n    def get_entry(data, key, sep='.'):\n        ''' Get an item from a dictionary with key notation a.b.c\n            d = {'a': {'b': 'c'}}}\n            key = a.b\n            return c\n        '''\n        if key == '':\n            pass\n        elif (not (key and Yedit.valid_key(key, sep)) and\n              isinstance(data, (list, dict))):\n            return None\n\n        key_indexes = Yedit.parse_key(key, sep)\n        for arr_ind, dict_key in key_indexes:\n            if dict_key and isinstance(data, dict):\n                data = data.get(dict_key)\n            elif (arr_ind and isinstance(data, list) and\n                  int(arr_ind) <= len(data) - 1):\n                data = data[int(arr_ind)]\n            else:\n                return None\n\n        return data\n\n    @staticmethod\n    def _write(filename, contents):\n        ''' Actually write the file contents to disk. This helps with mocking. '''\n\n        tmp_filename = filename + '.yedit'\n\n        with open(tmp_filename, 'w') as yfd:\n            yfd.write(contents)\n\n        os.rename(tmp_filename, filename)\n\n    def write(self):\n        ''' write to file '''\n        if not self.filename:\n            raise YeditException('Please specify a filename.')\n\n        if self.backup and self.file_exists():\n            shutil.copy(self.filename, self.filename + '.orig')\n\n        # Try to set format attributes if supported\n        try:\n            self.yaml_dict.fa.set_block_style()\n        except AttributeError:\n            pass\n\n        # Try to use RoundTripDumper if supported.\n        try:\n            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))\n        except AttributeError:\n            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))\n\n        return (True, self.yaml_dict)\n\n    def read(self):\n        ''' read from file '''\n        # check if it exists\n        if self.filename is None or not self.file_exists():\n            return None\n\n        contents = None\n        with open(self.filename) as yfd:\n            contents = yfd.read()\n\n        return contents\n\n    def file_exists(self):\n        ''' return whether file exists '''\n        if os.path.exists(self.filename):\n            return True\n\n        return False\n\n    def load(self, content_type='yaml'):\n        ''' return yaml file '''\n        contents = self.read()\n\n        if not contents and not self.content:\n            return None\n\n        if self.content:\n            if isinstance(self.content, dict):\n                self.yaml_dict = self.content\n                return self.yaml_dict\n            elif isinstance(self.content, str):\n                contents = self.content\n\n        # check if it is yaml\n        try:\n            if content_type == 'yaml' and contents:\n                # Try to set format attributes if supported\n                try:\n                    self.yaml_dict.fa.set_block_style()\n                except AttributeError:\n                    pass\n\n                # Try to use RoundTripLoader if supported.\n                try:\n                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)\n                except AttributeError:\n                    self.yaml_dict = yaml.safe_load(contents)\n\n                # Try to set format attributes if supported\n                try:\n                    self.yaml_dict.fa.set_block_style()\n                except AttributeError:\n                    pass\n\n            elif content_type == 'json' and contents:\n                self.yaml_dict = json.loads(contents)\n        except yaml.YAMLError as err:\n            # Error loading yaml or json\n            raise YeditException('Problem with loading yaml file. {}'.format(err))\n\n        return self.yaml_dict\n\n    def get(self, key):\n        ''' get a specified key'''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, key, self.separator)\n        except KeyError:\n            entry = None\n\n        return entry\n\n    def pop(self, path, key_or_item):\n        ''' remove a key, value pair from a dict or an item for a list'''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if entry is None:\n            return (False, self.yaml_dict)\n\n        if isinstance(entry, dict):\n            # AUDIT:maybe-no-member makes sense due to fuzzy types\n            # pylint: disable=maybe-no-member\n            if key_or_item in entry:\n                entry.pop(key_or_item)\n                return (True, self.yaml_dict)\n            return (False, self.yaml_dict)\n\n        elif isinstance(entry, list):\n            # AUDIT:maybe-no-member makes sense due to fuzzy types\n            # pylint: disable=maybe-no-member\n            ind = None\n            try:\n                ind = entry.index(key_or_item)\n            except ValueError:\n                return (False, self.yaml_dict)\n\n            entry.pop(ind)\n            return (True, self.yaml_dict)\n\n        return (False, self.yaml_dict)\n\n    def delete(self, path):\n        ''' remove path from a dict'''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if entry is None:\n            return (False, self.yaml_dict)\n\n        result = Yedit.remove_entry(self.yaml_dict, path, self.separator)\n        if not result:\n            return (False, self.yaml_dict)\n\n        return (True, self.yaml_dict)\n\n    def exists(self, path, value):\n        ''' check if value exists at path'''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if isinstance(entry, list):\n            if value in entry:\n                return True\n            return False\n\n        elif isinstance(entry, dict):\n            if isinstance(value, dict):\n                rval = False\n                for key, val in value.items():\n                    if entry[key] != val:\n                        rval = False\n                        break\n                else:\n                    rval = True\n                return rval\n\n            return value in entry\n\n        return entry == value\n\n    def append(self, path, value):\n        '''append value to a list'''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if entry is None:\n            self.put(path, [])\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        if not isinstance(entry, list):\n            return (False, self.yaml_dict)\n\n        # AUDIT:maybe-no-member makes sense due to loading data from\n        # a serialized format.\n        # pylint: disable=maybe-no-member\n        entry.append(value)\n        return (True, self.yaml_dict)\n\n    # pylint: disable=too-many-arguments\n    def update(self, path, value, index=None, curr_value=None):\n        ''' put path, value into a dict '''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if isinstance(entry, dict):\n            # AUDIT:maybe-no-member makes sense due to fuzzy types\n            # pylint: disable=maybe-no-member\n            if not isinstance(value, dict):\n                raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +\n                                     'value=[{}] type=[{}]'.format(value, type(value)))\n\n            entry.update(value)\n            return (True, self.yaml_dict)\n\n        elif isinstance(entry, list):\n            # AUDIT:maybe-no-member makes sense due to fuzzy types\n            # pylint: disable=maybe-no-member\n            ind = None\n            if curr_value:\n                try:\n                    ind = entry.index(curr_value)\n                except ValueError:\n                    return (False, self.yaml_dict)\n\n            elif index is not None:\n                ind = index\n\n            if ind is not None and entry[ind] != value:\n                entry[ind] = value\n                return (True, self.yaml_dict)\n\n            # see if it exists in the list\n            try:\n                ind = entry.index(value)\n            except ValueError:\n                # doesn't exist, append it\n                entry.append(value)\n                return (True, self.yaml_dict)\n\n            # already exists, return\n            if ind is not None:\n                return (False, self.yaml_dict)\n        return (False, self.yaml_dict)\n\n    def put(self, path, value):\n        ''' put path, value into a dict '''\n        try:\n            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)\n        except KeyError:\n            entry = None\n\n        if entry == value:\n            return (False, self.yaml_dict)\n\n        # deepcopy didn't work\n        # Try to use ruamel.yaml and fallback to pyyaml\n        try:\n            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,\n                                                      default_flow_style=False),\n                                 yaml.RoundTripLoader)\n        except AttributeError:\n            tmp_copy = copy.deepcopy(self.yaml_dict)\n\n        # set the format attributes if available\n        try:\n            tmp_copy.fa.set_block_style()\n        except AttributeError:\n            pass\n\n        result = Yedit.add_entry(tmp_copy, path, value, self.separator)\n        if result is None:\n            return (False, self.yaml_dict)\n\n        # When path equals \"\" it is a special case.\n        # \"\" refers to the root of the document\n        # Only update the root path (entire document) when its a list or dict\n        if path == '':\n            if isinstance(result, list) or isinstance(result, dict):\n                self.yaml_dict = result\n                return (True, self.yaml_dict)\n\n            return (False, self.yaml_dict)\n\n        self.yaml_dict = tmp_copy\n\n        return (True, self.yaml_dict)\n\n    def create(self, path, value):\n        ''' create a yaml file '''\n        if not self.file_exists():\n            # deepcopy didn't work\n            # Try to use ruamel.yaml and fallback to pyyaml\n            try:\n                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,\n                                                          default_flow_style=False),\n                                     yaml.RoundTripLoader)\n            except AttributeError:\n                tmp_copy = copy.deepcopy(self.yaml_dict)\n\n            # set the format attributes if available\n            try:\n                tmp_copy.fa.set_block_style()\n            except AttributeError:\n                pass\n\n            result = Yedit.add_entry(tmp_copy, path, value, self.separator)\n            if result is not None:\n                self.yaml_dict = tmp_copy\n                return (True, self.yaml_dict)\n\n        return (False, self.yaml_dict)\n\n    @staticmethod\n    def get_curr_value(invalue, val_type):\n        '''return the current value'''\n        if invalue is None:\n            return None\n\n        curr_value = invalue\n        if val_type == 'yaml':\n            curr_value = yaml.load(invalue)\n        elif val_type == 'json':\n            curr_value = json.loads(invalue)\n\n        return curr_value\n\n    @staticmethod\n    def parse_value(inc_value, vtype=''):\n        '''determine value type passed'''\n        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',\n                      'on', 'On', 'ON', ]\n        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',\n                       'off', 'Off', 'OFF']\n\n        # It came in as a string but you didn't specify value_type as string\n        # we will convert to bool if it matches any of the above cases\n        if isinstance(inc_value, str) and 'bool' in vtype:\n            if inc_value not in true_bools and inc_value not in false_bools:\n                raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))\n        elif isinstance(inc_value, bool) and 'str' in vtype:\n            inc_value = str(inc_value)\n\n        # There is a special case where '' will turn into None after yaml loading it so skip\n        if isinstance(inc_value, str) and inc_value == '':\n            pass\n        # If vtype is not str then go ahead and attempt to yaml load it.\n        elif isinstance(inc_value, str) and 'str' not in vtype:\n            try:\n                inc_value = yaml.safe_load(inc_value)\n            except Exception:\n                raise YeditException('Could not determine type of incoming value. ' +\n                                     'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))\n\n        return inc_value\n\n    @staticmethod\n    def process_edits(edits, yamlfile):\n        '''run through a list of edits and process them one-by-one'''\n        results = []\n        for edit in edits:\n            value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))\n            if edit.get('action') == 'update':\n                # pylint: disable=line-too-long\n                curr_value = Yedit.get_curr_value(\n                    Yedit.parse_value(edit.get('curr_value')),\n                    edit.get('curr_value_format'))\n\n                rval = yamlfile.update(edit['key'],\n                                       value,\n                                       edit.get('index'),\n                                       curr_value)\n\n            elif edit.get('action') == 'append':\n                rval = yamlfile.append(edit['key'], value)\n\n            else:\n                rval = yamlfile.put(edit['key'], value)\n\n            if rval[0]:\n                results.append({'key': edit['key'], 'edit': rval[1]})\n\n        return {'changed': len(results) > 0, 'results': results}\n\n    # pylint: disable=too-many-return-statements,too-many-branches\n    @staticmethod\n    def run_ansible(params):\n        '''perform the idempotent crud operations'''\n        yamlfile = Yedit(filename=params['src'],\n                         backup=params['backup'],\n                         separator=params['separator'])\n\n        state = params['state']\n\n        if params['src']:\n            rval = yamlfile.load()\n\n            if yamlfile.yaml_dict is None and state != 'present':\n                return {'failed': True,\n                        'msg': 'Error opening file [{}].  Verify that the '.format(params['src']) +\n                               'file exists, that it is has correct permissions, and is valid yaml.'}\n\n        if state == 'list':\n            if params['content']:\n                content = Yedit.parse_value(params['content'], params['content_type'])\n                yamlfile.yaml_dict = content\n\n            if params['key']:\n                rval = yamlfile.get(params['key']) or {}\n\n            return {'changed': False, 'result': rval, 'state': state}\n\n        elif state == 'absent':\n            if params['content']:\n                content = Yedit.parse_value(params['content'], params['content_type'])\n                yamlfile.yaml_dict = content\n\n            if params['update']:\n                rval = yamlfile.pop(params['key'], params['value'])\n            else:\n                rval = yamlfile.delete(params['key'])\n\n            if rval[0] and params['src']:\n                yamlfile.write()\n\n            return {'changed': rval[0], 'result': rval[1], 'state': state}\n\n        elif state == 'present':\n            # check if content is different than what is in the file\n            if params['content']:\n                content = Yedit.parse_value(params['content'], params['content_type'])\n\n                # We had no edits to make and the contents are the same\n                if yamlfile.yaml_dict == content and \\\n                   params['value'] is None:\n                    return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}\n\n                yamlfile.yaml_dict = content\n\n            # If we were passed a key, value then\n            # we enapsulate it in a list and process it\n            # Key, Value passed to the module : Converted to Edits list #\n            edits = []\n            _edit = {}\n            if params['value'] is not None:\n                _edit['value'] = params['value']\n                _edit['value_type'] = params['value_type']\n                _edit['key'] = params['key']\n\n                if params['update']:\n                    _edit['action'] = 'update'\n                    _edit['curr_value'] = params['curr_value']\n                    _edit['curr_value_format'] = params['curr_value_format']\n                    _edit['index'] = params['index']\n\n                elif params['append']:\n                    _edit['action'] = 'append'\n\n                edits.append(_edit)\n\n            elif params['edits'] is not None:\n                edits = params['edits']\n\n            if edits:\n                results = Yedit.process_edits(edits, yamlfile)\n\n                # if there were changes and a src provided to us we need to write\n                if results['changed'] and params['src']:\n                    yamlfile.write()\n\n                return {'changed': results['changed'], 'result': results['results'], 'state': state}\n\n            # no edits to make\n            if params['src']:\n                # pylint: disable=redefined-variable-type\n                rval = yamlfile.write()\n                return {'changed': rval[0],\n                        'result': rval[1],\n                        'state': state}\n\n            # We were passed content but no src, key or value, or edits.  Return contents in memory\n            return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}\n        return {'failed': True, 'msg': 'Unkown state passed'}\n\n# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-\n\n# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-\n# pylint: disable=too-many-lines\n# noqa: E301,E302,E303,T001\n\n\nclass OpenShiftCLIError(Exception):\n    '''Exception class for openshiftcli'''\n    pass\n\n\nADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]\n\n\ndef locate_oc_binary():\n    ''' Find and return oc binary file '''\n    # https://github.com/openshift/openshift-ansible/issues/3410\n    # oc can be in /usr/local/bin in some cases, but that may not\n    # be in $PATH due to ansible/sudo\n    paths = os.environ.get(\"PATH\", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS\n\n    oc_binary = 'oc'\n\n    # Use shutil.which if it is available, otherwise fallback to a naive path search\n    try:\n        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))\n        if which_result is not None:\n            oc_binary = which_result\n    except AttributeError:\n        for path in paths:\n            if os.path.exists(os.path.join(path, oc_binary)):\n                oc_binary = os.path.join(path, oc_binary)\n                break\n\n    return oc_binary\n\n\n# pylint: disable=too-few-public-methods\nclass OpenShiftCLI(object):\n    ''' Class to wrap the command line tools '''\n    def __init__(self,\n                 namespace,\n                 kubeconfig='/etc/origin/master/admin.kubeconfig',\n                 verbose=False,\n                 all_namespaces=False):\n        ''' Constructor for OpenshiftCLI '''\n        self.namespace = namespace\n        self.verbose = verbose\n        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)\n        self.all_namespaces = all_namespaces\n        self.oc_binary = locate_oc_binary()\n\n    # Pylint allows only 5 arguments to be passed.\n    # pylint: disable=too-many-arguments\n    def _replace_content(self, resource, rname, content, force=False, sep='.'):\n        ''' replace the current object with the content '''\n        res = self._get(resource, rname)\n        if not res['results']:\n            return res\n\n        fname = Utils.create_tmpfile(rname + '-')\n\n        yed = Yedit(fname, res['results'][0], separator=sep)\n        changes = []\n        for key, value in content.items():\n            changes.append(yed.put(key, value))\n\n        if any([change[0] for change in changes]):\n            yed.write()\n\n            atexit.register(Utils.cleanup, [fname])\n\n            return self._replace(fname, force)\n\n        return {'returncode': 0, 'updated': False}\n\n    def _replace(self, fname, force=False):\n        '''replace the current object with oc replace'''\n        # We are removing the 'resourceVersion' to handle\n        # a race condition when modifying oc objects\n        yed = Yedit(fname)\n        results = yed.delete('metadata.resourceVersion')\n        if results[0]:\n            yed.write()\n\n        cmd = ['replace', '-f', fname]\n        if force:\n            cmd.append('--force')\n        return self.openshift_cmd(cmd)\n\n    def _create_from_content(self, rname, content):\n        '''create a temporary file and then call oc create on it'''\n        fname = Utils.create_tmpfile(rname + '-')\n        yed = Yedit(fname, content=content)\n        yed.write()\n\n        atexit.register(Utils.cleanup, [fname])\n\n        return self._create(fname)\n\n    def _create(self, fname):\n        '''call oc create on a filename'''\n        return self.openshift_cmd(['create', '-f', fname])\n\n    def _delete(self, resource, name=None, selector=None):\n        '''call oc delete on a resource'''\n        cmd = ['delete', resource]\n        if selector is not None:\n            cmd.append('--selector={}'.format(selector))\n        elif name is not None:\n            cmd.append(name)\n        else:\n            raise OpenShiftCLIError('Either name or selector is required when calling delete.')\n\n        return self.openshift_cmd(cmd)\n\n    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501\n        '''process a template\n\n           template_name: the name of the template to process\n           create: whether to send to oc create after processing\n           params: the parameters for the template\n           template_data: the incoming template's data; instead of a file\n        '''\n        cmd = ['process']\n        if template_data:\n            cmd.extend(['-f', '-'])\n        else:\n            cmd.append(template_name)\n        if params:\n            param_str = [\"{}={}\".format(key, str(value).replace(\"'\", r'\"')) for key, value in params.items()]\n            cmd.append('-v')\n            cmd.extend(param_str)\n\n        results = self.openshift_cmd(cmd, output=True, input_data=template_data)\n\n        if results['returncode'] != 0 or not create:\n            return results\n\n        fname = Utils.create_tmpfile(template_name + '-')\n        yed = Yedit(fname, results['results'])\n        yed.write()\n\n        atexit.register(Utils.cleanup, [fname])\n\n        return self.openshift_cmd(['create', '-f', fname])\n\n    def _get(self, resource, name=None, selector=None):\n        '''return a resource by name '''\n        cmd = ['get', resource]\n        if selector is not None:\n            cmd.append('--selector={}'.format(selector))\n        elif name is not None:\n            cmd.append(name)\n\n        cmd.extend(['-o', 'json'])\n\n        rval = self.openshift_cmd(cmd, output=True)\n\n        # Ensure results are retuned in an array\n        if 'items' in rval:\n            rval['results'] = rval['items']\n        elif not isinstance(rval['results'], list):\n            rval['results'] = [rval['results']]\n\n        return rval\n\n    def _schedulable(self, node=None, selector=None, schedulable=True):\n        ''' perform oadm manage-node scheduable '''\n        cmd = ['manage-node']\n        if node:\n            cmd.extend(node)\n        else:\n            cmd.append('--selector={}'.format(selector))\n\n        cmd.append('--schedulable={}'.format(schedulable))\n\n        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501\n\n    def _list_pods(self, node=None, selector=None, pod_selector=None):\n        ''' perform oadm list pods\n\n            node: the node in which to list pods\n            selector: the label selector filter if provided\n            pod_selector: the pod selector filter if provided\n        '''\n        cmd = ['manage-node']\n        if node:\n            cmd.extend(node)\n        else:\n            cmd.append('--selector={}'.format(selector))\n\n        if pod_selector:\n            cmd.append('--pod-selector={}'.format(pod_selector))\n\n        cmd.extend(['--list-pods', '-o', 'json'])\n\n        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')\n\n    # pylint: disable=too-many-arguments\n    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):\n        ''' perform oadm manage-node evacuate '''\n        cmd = ['manage-node']\n        if node:\n            cmd.extend(node)\n        else:\n            cmd.append('--selector={}'.format(selector))\n\n        if dry_run:\n            cmd.append('--dry-run')\n\n        if pod_selector:\n            cmd.append('--pod-selector={}'.format(pod_selector))\n\n        if grace_period:\n            cmd.append('--grace-period={}'.format(int(grace_period)))\n\n        if force:\n            cmd.append('--force')\n\n        cmd.append('--evacuate')\n\n        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')\n\n    def _version(self):\n        ''' return the openshift version'''\n        return self.openshift_cmd(['version'], output=True, output_type='raw')\n\n    def _import_image(self, url=None, name=None, tag=None):\n        ''' perform image import '''\n        cmd = ['import-image']\n\n        image = '{0}'.format(name)\n        if tag:\n            image += ':{0}'.format(tag)\n\n        cmd.append(image)\n\n        if url:\n            cmd.append('--from={0}/{1}'.format(url, image))\n\n        cmd.append('-n{0}'.format(self.namespace))\n\n        cmd.append('--confirm')\n        return self.openshift_cmd(cmd)\n\n    def _run(self, cmds, input_data):\n        ''' Actually executes the command. This makes mocking easier. '''\n        curr_env = os.environ.copy()\n        curr_env.update({'KUBECONFIG': self.kubeconfig})\n        proc = subprocess.Popen(cmds,\n                                stdin=subprocess.PIPE,\n                                stdout=subprocess.PIPE,\n                                stderr=subprocess.PIPE,\n                                env=curr_env)\n\n        stdout, stderr = proc.communicate(input_data)\n\n        return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')\n\n    # pylint: disable=too-many-arguments,too-many-branches\n    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):\n        '''Base command for oc '''\n        cmds = [self.oc_binary]\n\n        if oadm:\n            cmds.append('adm')\n\n        cmds.extend(cmd)\n\n        if self.all_namespaces:\n            cmds.extend(['--all-namespaces'])\n        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501\n            cmds.extend(['-n', self.namespace])\n\n        if self.verbose:\n            print(' '.join(cmds))\n\n        try:\n            returncode, stdout, stderr = self._run(cmds, input_data)\n        except OSError as ex:\n            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)\n\n        rval = {\"returncode\": returncode,\n                \"cmd\": ' '.join(cmds)}\n\n        if output_type == 'json':\n            rval['results'] = {}\n            if output and stdout:\n                try:\n                    rval['results'] = json.loads(stdout)\n                except ValueError as verr:\n                    if \"No JSON object could be decoded\" in verr.args:\n                        rval['err'] = verr.args\n        elif output_type == 'raw':\n            rval['results'] = stdout if output else ''\n\n        if self.verbose:\n            print(\"STDOUT: {0}\".format(stdout))\n            print(\"STDERR: {0}\".format(stderr))\n\n        if 'err' in rval or returncode != 0:\n            rval.update({\"stderr\": stderr,\n                         \"stdout\": stdout})\n\n        return rval\n\n\nclass Utils(object):  # pragma: no cover\n    ''' utilities for openshiftcli modules '''\n\n    @staticmethod\n    def _write(filename, contents):\n        ''' Actually write the file contents to disk. This helps with mocking. '''\n\n        with open(filename, 'w') as sfd:\n            sfd.write(contents)\n\n    @staticmethod\n    def create_tmp_file_from_contents(rname, data, ftype='yaml'):\n        ''' create a file in tmp with name and contents'''\n\n        tmp = Utils.create_tmpfile(prefix=rname)\n\n        if ftype == 'yaml':\n            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage\n            # pylint: disable=no-member\n            if hasattr(yaml, 'RoundTripDumper'):\n                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))\n            else:\n                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))\n\n        elif ftype == 'json':\n            Utils._write(tmp, json.dumps(data))\n        else:\n            Utils._write(tmp, data)\n\n        # Register cleanup when module is done\n        atexit.register(Utils.cleanup, [tmp])\n        return tmp\n\n    @staticmethod\n    def create_tmpfile_copy(inc_file):\n        '''create a temporary copy of a file'''\n        tmpfile = Utils.create_tmpfile('lib_openshift-')\n        Utils._write(tmpfile, open(inc_file).read())\n\n        # Cleanup the tmpfile\n        atexit.register(Utils.cleanup, [tmpfile])\n\n        return tmpfile\n\n    @staticmethod\n    def create_tmpfile(prefix='tmp'):\n        ''' Generates and returns a temporary file name '''\n\n        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:\n            return tmp.name\n\n    @staticmethod\n    def create_tmp_files_from_contents(content, content_type=None):\n        '''Turn an array of dict: filename, content into a files array'''\n        if not isinstance(content, list):\n            content = [content]\n        files = []\n        for item in content:\n            path = Utils.create_tmp_file_from_contents(item['path'] + '-',\n                                                       item['data'],\n                                                       ftype=content_type)\n            files.append({'name': os.path.basename(item['path']),\n                          'path': path})\n        return files\n\n    @staticmethod\n    def cleanup(files):\n        '''Clean up on exit '''\n        for sfile in files:\n            if os.path.exists(sfile):\n                if os.path.isdir(sfile):\n                    shutil.rmtree(sfile)\n                elif os.path.isfile(sfile):\n                    os.remove(sfile)\n\n    @staticmethod\n    def exists(results, _name):\n        ''' Check to see if the results include the name '''\n        if not results:\n            return False\n\n        if Utils.find_result(results, _name):\n            return True\n\n        return False\n\n    @staticmethod\n    def find_result(results, _name):\n        ''' Find the specified result by name'''\n        rval = None\n        for result in results:\n            if 'metadata' in result and result['metadata']['name'] == _name:\n                rval = result\n                break\n\n        return rval\n\n    @staticmethod\n    def get_resource_file(sfile, sfile_type='yaml'):\n        ''' return the service file '''\n        contents = None\n        with open(sfile) as sfd:\n            contents = sfd.read()\n\n        if sfile_type == 'yaml':\n            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage\n            # pylint: disable=no-member\n            if hasattr(yaml, 'RoundTripLoader'):\n                contents = yaml.load(contents, yaml.RoundTripLoader)\n            else:\n                contents = yaml.safe_load(contents)\n        elif sfile_type == 'json':\n            contents = json.loads(contents)\n\n        return contents\n\n    @staticmethod\n    def filter_versions(stdout):\n        ''' filter the oc version output '''\n\n        version_dict = {}\n        version_search = ['oc', 'openshift', 'kubernetes']\n\n        for line in stdout.strip().split('\\n'):\n            for term in version_search:\n                if not line:\n                    continue\n                if line.startswith(term):\n                    version_dict[term] = line.split()[-1]\n\n        # horrible hack to get openshift version in Openshift 3.2\n        #  By default \"oc version in 3.2 does not return an \"openshift\" version\n        if \"openshift\" not in version_dict:\n            version_dict[\"openshift\"] = version_dict[\"oc\"]\n\n        return version_dict\n\n    @staticmethod\n    def add_custom_versions(versions):\n        ''' create custom versions strings '''\n\n        versions_dict = {}\n\n        for tech, version in versions.items():\n            # clean up \"-\" from version\n            if \"-\" in version:\n                version = version.split(\"-\")[0]\n\n            if version.startswith('v'):\n                versions_dict[tech + '_numeric'] = version[1:].split('+')[0]\n                # \"v3.3.0.33\" is what we have, we want \"3.3\"\n                versions_dict[tech + '_short'] = version[1:4]\n\n        return versions_dict\n\n    @staticmethod\n    def openshift_installed():\n        ''' check if openshift is installed '''\n        import rpm\n\n        transaction_set = rpm.TransactionSet()\n        rpmquery = transaction_set.dbMatch(\"name\", \"atomic-openshift\")\n\n        return rpmquery.count() > 0\n\n    # Disabling too-many-branches.  This is a yaml dictionary comparison function\n    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements\n    @staticmethod\n    def check_def_equal(user_def, result_def, skip_keys=None, debug=False):\n        ''' Given a user defined definition, compare it with the results given back by our query.  '''\n\n        # Currently these values are autogenerated and we do not need to check them\n        skip = ['metadata', 'status']\n        if skip_keys:\n            skip.extend(skip_keys)\n\n        for key, value in result_def.items():\n            if key in skip:\n                continue\n\n            # Both are lists\n            if isinstance(value, list):\n                if key not in user_def:\n                    if debug:\n                        print('User data does not have key [%s]' % key)\n                        print('User data: %s' % user_def)\n                    return False\n\n                if not isinstance(user_def[key], list):\n                    if debug:\n                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))\n                    return False\n\n                if len(user_def[key]) != len(value):\n                    if debug:\n                        print(\"List lengths are not equal.\")\n                        print(\"key=[%s]: user_def[%s] != value[%s]\" % (key, len(user_def[key]), len(value)))\n                        print(\"user_def: %s\" % user_def[key])\n                        print(\"value: %s\" % value)\n                    return False\n\n                for values in zip(user_def[key], value):\n                    if isinstance(values[0], dict) and isinstance(values[1], dict):\n                        if debug:\n                            print('sending list - list')\n                            print(type(values[0]))\n                            print(type(values[1]))\n                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)\n                        if not result:\n                            print('list compare returned false')\n                            return False\n\n                    elif value != user_def[key]:\n                        if debug:\n                            print('value should be identical')\n                            print(user_def[key])\n                            print(value)\n                        return False\n\n            # recurse on a dictionary\n            elif isinstance(value, dict):\n                if key not in user_def:\n                    if debug:\n                        print(\"user_def does not have key [%s]\" % key)\n                    return False\n                if not isinstance(user_def[key], dict):\n                    if debug:\n                        print(\"dict returned false: not instance of dict\")\n                    return False\n\n                # before passing ensure keys match\n                api_values = set(value.keys()) - set(skip)\n                user_values = set(user_def[key].keys()) - set(skip)\n                if api_values != user_values:\n                    if debug:\n                        print(\"keys are not equal in dict\")\n                        print(user_values)\n                        print(api_values)\n                    return False\n\n                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)\n                if not result:\n                    if debug:\n                        print(\"dict returned false\")\n                        print(result)\n                    return False\n\n            # Verify each key, value pair is the same\n            else:\n                if key not in user_def or value != user_def[key]:\n                    if debug:\n                        print(\"value not equal; user_def does not have key\")\n                        print(key)\n                        print(value)\n                        if key in user_def:\n                            print(user_def[key])\n                    return False\n\n        if debug:\n            print('returning true')\n        return True\n\nclass OpenShiftCLIConfig(object):\n    '''Generic Config'''\n    def __init__(self, rname, namespace, kubeconfig, options):\n        self.kubeconfig = kubeconfig\n        self.name = rname\n        self.namespace = namespace\n        self._options = options\n\n    @property\n    def config_options(self):\n        ''' return config options '''\n        return self._options\n\n    def to_option_list(self, ascommalist=''):\n        '''return all options as a string\n           if ascommalist is set to the name of a key, and\n           the value of that key is a dict, format the dict\n           as a list of comma delimited key=value pairs'''\n        return self.stringify(ascommalist)\n\n    def stringify(self, ascommalist=''):\n        ''' return the options hash as cli params in a string\n            if ascommalist is set to the name of a key, and\n            the value of that key is a dict, format the dict\n            as a list of comma delimited key=value pairs '''\n        rval = []\n        for key in sorted(self.config_options.keys()):\n            data = self.config_options[key]\n            if data['include'] \\\n               and (data['value'] or isinstance(data['value'], int)):\n                if key == ascommalist:\n                    val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])\n                else:\n                    val = data['value']\n                rval.append('--{}={}'.format(key.replace('_', '-'), val))\n\n        return rval\n\n\n# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-\n\n# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-\n\n\n# pylint: disable=too-many-instance-attributes\nclass OCVersion(OpenShiftCLI):\n    ''' Class to wrap the oc command line tools '''\n    # pylint allows 5\n    # pylint: disable=too-many-arguments\n    def __init__(self,\n                 config,\n                 debug):\n        ''' Constructor for OCVersion '''\n        super(OCVersion, self).__init__(None, config)\n        self.debug = debug\n\n    def get(self):\n        '''get and return version information '''\n\n        results = {}\n\n        version_results = self._version()\n\n        if version_results['returncode'] == 0:\n            filtered_vers = Utils.filter_versions(version_results['results'])\n            custom_vers = Utils.add_custom_versions(filtered_vers)\n\n            results['returncode'] = version_results['returncode']\n            results.update(filtered_vers)\n            results.update(custom_vers)\n\n            return results\n\n        raise OpenShiftCLIError('Problem detecting openshift version.')\n\n    @staticmethod\n    def run_ansible(params):\n        '''run the idempotent ansible code'''\n        oc_version = OCVersion(params['kubeconfig'], params['debug'])\n\n        if params['state'] == 'list':\n\n            #pylint: disable=protected-access\n            result = oc_version.get()\n            return {'state': params['state'],\n                    'results': result,\n                    'changed': False}\n\n# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-\n\n# -*- -*- -*- Begin included fragment: ansible/oc_version.py -*- -*- -*-\n\ndef main():\n    ''' ansible oc module for version '''\n\n    module = AnsibleModule(\n        argument_spec=dict(\n            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),\n            state=dict(default='list', type='str',\n                       choices=['list']),\n            debug=dict(default=False, type='bool'),\n        ),\n        supports_check_mode=True,\n    )\n\n    rval = OCVersion.run_ansible(module.params)\n    if 'failed' in rval:\n        module.fail_json(**rval)\n\n\n    module.exit_json(**rval)\n\n\nif __name__ == '__main__':\n    main()\n\n# -*- -*- -*- End included fragment: ansible/oc_version.py -*- -*- -*-\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476155,"cells":{"repo_name":{"kind":"string","value":"ropik/chromium"},"path":{"kind":"string","value":"third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_SPARC_asm.py"},"copies":{"kind":"string","value":"33"},"size":{"kind":"string","value":"9068"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# (C) Copyright IBM Corporation 2004\n# All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# on the rights to use, copy, modify, merge, publish, distribute, sub\n# license, and/or sell copies of the Software, and to permit persons to whom\n# the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice (including the next\n# paragraph) shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL\n# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n# Authors:\n#    Ian Romanick \n\nimport license\nimport gl_XML, glX_XML\nimport sys, getopt\n\nclass PrintGenericStubs(gl_XML.gl_print_base):\n\tdef __init__(self):\n\t\tgl_XML.gl_print_base.__init__(self)\n\t\tself.name = \"gl_SPARC_asm.py (from Mesa)\"\n\t\tself.license = license.bsd_license_template % ( \\\n\"\"\"Copyright (C) 1999-2003  Brian Paul   All Rights Reserved.\n(C) Copyright IBM Corporation 2004\"\"\", \"BRIAN PAUL, IBM\")\n\n\n\tdef printRealHeader(self):\n\t\tprint '#include \"glapi/glapioffsets.h\"'\n\t\tprint ''\n\t\tprint '#ifdef __arch64__'\n\t\tprint '#define GL_OFF(N)\\t((N) * 8)'\n\t\tprint '#define GL_LL\\t\\tldx'\n\t\tprint '#define GL_TIE_LD(SYM)\\t%tie_ldx(SYM)'\n\t\tprint '#define GL_STACK_SIZE\\t128'\n\t\tprint '#else'\n\t\tprint '#define GL_OFF(N)\\t((N) * 4)'\n\t\tprint '#define GL_LL\\t\\tld'\n\t\tprint '#define GL_TIE_LD(SYM)\\t%tie_ld(SYM)'\n\t\tprint '#define GL_STACK_SIZE\\t64'\n\t\tprint '#endif'\n\t\tprint ''\n\t\tprint '#define GLOBL_FN(x) .globl x ; .type x, @function'\n\t\tprint '#define HIDDEN(x) .hidden x'\n\t\tprint ''\n\t\tprint '\\t.register %g2, #scratch'\n\t\tprint '\\t.register %g3, #scratch'\n\t\tprint ''\n\t\tprint '\\t.text'\n\t\tprint ''\n\t\tprint '\\tGLOBL_FN(__glapi_sparc_icache_flush)'\n\t\tprint '\\tHIDDEN(__glapi_sparc_icache_flush)'\n\t\tprint '\\t.type\\t__glapi_sparc_icache_flush, @function'\n\t\tprint '__glapi_sparc_icache_flush: /* %o0 = insn_addr */'\n\t\tprint '\\tflush\\t%o0'\n\t\tprint '\\tretl'\n\t\tprint '\\t nop'\n\t\tprint ''\n\t\tprint '\\t.align\\t32'\n\t\tprint ''\n\t\tprint '\\t.type\\t__glapi_sparc_get_pc, @function'\n\t\tprint '__glapi_sparc_get_pc:'\n\t\tprint '\\tretl'\n\t\tprint '\\t add\\t%o7, %g2, %g2'\n\t\tprint '\\t.size\\t__glapi_sparc_get_pc, .-__glapi_sparc_get_pc'\n\t\tprint ''\n\t\tprint '#ifdef GLX_USE_TLS'\n\t\tprint ''\n\t\tprint '\\tGLOBL_FN(__glapi_sparc_get_dispatch)'\n\t\tprint '\\tHIDDEN(__glapi_sparc_get_dispatch)'\n\t\tprint '__glapi_sparc_get_dispatch:'\n\t\tprint '\\tmov\\t%o7, %g1'\n\t\tprint '\\tsethi\\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'\n\t\tprint '\\tcall\\t__glapi_sparc_get_pc'\n\t\tprint '\\tadd\\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'\n\t\tprint '\\tmov\\t%g1, %o7'\n\t\tprint '\\tsethi\\t%tie_hi22(_glapi_tls_Dispatch), %g1'\n\t\tprint '\\tadd\\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'\n\t\tprint '\\tGL_LL\\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'\n\t\tprint '\\tretl'\n\t\tprint '\\t mov\\t%g2, %o0'\n\t\tprint ''\n\t\tprint '\\t.data'\n\t\tprint '\\t.align\\t32'\n\t\tprint ''\n\t\tprint '\\t/* --> sethi %hi(_glapi_tls_Dispatch), %g1 */'\n\t\tprint '\\t/* --> or %g1, %lo(_glapi_tls_Dispatch), %g1 */'\n\t\tprint '\\tGLOBL_FN(__glapi_sparc_tls_stub)'\n\t\tprint '\\tHIDDEN(__glapi_sparc_tls_stub)'\n\t\tprint '__glapi_sparc_tls_stub: /* Call offset in %g3 */'\n\t\tprint '\\tmov\\t%o7, %g1'\n\t\tprint '\\tsethi\\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'\n\t\tprint '\\tcall\\t__glapi_sparc_get_pc'\n\t\tprint '\\tadd\\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'\n\t\tprint '\\tmov\\t%g1, %o7'\n\t\tprint '\\tsrl\\t%g3, 10, %g3'\n\t\tprint '\\tsethi\\t%tie_hi22(_glapi_tls_Dispatch), %g1'\n\t\tprint '\\tadd\\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'\n\t\tprint '\\tGL_LL\\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'\n\t\tprint '\\tGL_LL\\t[%g7+%g2], %g1'\n\t\tprint '\\tGL_LL\\t[%g1 + %g3], %g1'\n\t\tprint '\\tjmp\\t%g1'\n\t\tprint '\\t nop'\n\t\tprint '\\t.size\\t__glapi_sparc_tls_stub, .-__glapi_sparc_tls_stub'\n\t\tprint ''\n\t\tprint '#define GL_STUB(fn, off)\\t\\t\\t\\t\\\\'\n\t\tprint '\\tGLOBL_FN(fn);\\t\\t\\t\\t\\t\\\\'\n\t\tprint 'fn:\\tba\\t__glapi_sparc_tls_stub;\\t\\t\\t\\\\'\n\t\tprint '\\t sethi\\tGL_OFF(off), %g3;\\t\\t\\t\\\\'\n\t\tprint '\\t.size\\tfn,.-fn;'\n\t\tprint ''\n\t\tprint '#elif defined(PTHREADS)'\n\t\tprint ''\n\t\tprint '\\t/* 64-bit 0x00 --> sethi %hh(_glapi_Dispatch), %g1 */'\n\t\tprint '\\t/* 64-bit 0x04 --> sethi %lm(_glapi_Dispatch), %g2 */'\n\t\tprint '\\t/* 64-bit 0x08 --> or %g1, %hm(_glapi_Dispatch), %g1 */'\n\t\tprint '\\t/* 64-bit 0x0c --> sllx %g1, 32, %g1 */'\n\t\tprint '\\t/* 64-bit 0x10 --> add %g1, %g2, %g1 */'\n\t\tprint '\\t/* 64-bit 0x14 --> ldx [%g1 + %lo(_glapi_Dispatch)], %g1 */'\n\t\tprint ''\n\t\tprint '\\t/* 32-bit 0x00 --> sethi %hi(_glapi_Dispatch), %g1 */'\n\t\tprint '\\t/* 32-bit 0x04 --> ld [%g1 + %lo(_glapi_Dispatch)], %g1 */'\n\t\tprint ''\n\t\tprint '\\t.data'\n\t\tprint '\\t.align\\t32'\n\t\tprint ''\n\t\tprint '\\tGLOBL_FN(__glapi_sparc_pthread_stub)'\n\t\tprint '\\tHIDDEN(__glapi_sparc_pthread_stub)'\n\t\tprint '__glapi_sparc_pthread_stub: /* Call offset in %g3 */'\n\t\tprint '\\tmov\\t%o7, %g1'\n\t\tprint '\\tsethi\\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'\n\t\tprint '\\tcall\\t__glapi_sparc_get_pc'\n\t\tprint '\\t add\\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'\n\t\tprint '\\tmov\\t%g1, %o7'\n\t\tprint '\\tsethi\\t%hi(_glapi_Dispatch), %g1'\n\t\tprint '\\tor\\t%g1, %lo(_glapi_Dispatch), %g1'\n\t\tprint '\\tsrl\\t%g3, 10, %g3'\n\t\tprint '\\tGL_LL\\t[%g2+%g1], %g2'\n\t\tprint '\\tGL_LL\\t[%g2], %g1'\n\t\tprint '\\tcmp\\t%g1, 0'\n\t\tprint '\\tbe\\t2f'\n\t\tprint '\\t nop'\n\t\tprint '1:\\tGL_LL\\t[%g1 + %g3], %g1'\n\t\tprint '\\tjmp\\t%g1'\n\t\tprint '\\t nop'\n\t\tprint '2:\\tsave\\t%sp, GL_STACK_SIZE, %sp'\n\t\tprint '\\tmov\\t%g3, %l0'\n\t\tprint '\\tcall\\t_glapi_get_dispatch'\n\t\tprint '\\t nop'\n\t\tprint '\\tmov\\t%o0, %g1'\n\t\tprint '\\tmov\\t%l0, %g3'\n\t\tprint '\\tba\\t1b'\n\t\tprint '\\t restore %g0, %g0, %g0'\n\t\tprint '\\t.size\\t__glapi_sparc_pthread_stub, .-__glapi_sparc_pthread_stub'\n\t\tprint ''\n\t\tprint '#define GL_STUB(fn, off)\\t\\t\\t\\\\'\n\t\tprint '\\tGLOBL_FN(fn);\\t\\t\\t\\t\\\\'\n\t\tprint 'fn:\\tba\\t__glapi_sparc_pthread_stub;\\t\\\\'\n\t\tprint '\\t sethi\\tGL_OFF(off), %g3;\\t\\t\\\\'\n\t\tprint '\\t.size\\tfn,.-fn;'\n\t\tprint ''\n\t\tprint '#else /* Non-threaded version. */'\n\t\tprint ''\n\t\tprint '\\t.type\t__glapi_sparc_nothread_stub, @function'\n\t\tprint '__glapi_sparc_nothread_stub: /* Call offset in %g3 */'\n\t\tprint '\\tmov\\t%o7, %g1'\n\t\tprint '\\tsethi\\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'\n\t\tprint '\\tcall\\t__glapi_sparc_get_pc'\n\t\tprint '\\t add\\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'\n\t\tprint '\\tmov\\t%g1, %o7'\n\t\tprint '\\tsrl\\t%g3, 10, %g3'\n\t\tprint '\\tsethi\\t%hi(_glapi_Dispatch), %g1'\n\t\tprint '\\tor\\t%g1, %lo(_glapi_Dispatch), %g1'\n\t\tprint '\\tGL_LL\\t[%g2+%g1], %g2'\n\t\tprint '\\tGL_LL\\t[%g2], %g1'\n\t\tprint '\\tGL_LL\\t[%g1 + %g3], %g1'\n\t\tprint '\\tjmp\\t%g1'\n\t\tprint '\\t nop'\n\t\tprint '\\t.size\\t__glapi_sparc_nothread_stub, .-__glapi_sparc_nothread_stub'\n\t\tprint ''\n\t\tprint '#define GL_STUB(fn, off)\\t\\t\\t\\\\'\n\t\tprint '\\tGLOBL_FN(fn);\\t\\t\\t\\t\\\\'\n\t\tprint 'fn:\\tba\\t__glapi_sparc_nothread_stub;\\t\\\\'\n\t\tprint '\\t sethi\\tGL_OFF(off), %g3;\\t\\t\\\\'\n\t\tprint '\\t.size\\tfn,.-fn;'\n\t\tprint ''\n\t\tprint '#endif'\n\t\tprint ''\n\t\tprint '#define GL_STUB_ALIAS(fn, alias)\t\t\\\\'\n\t\tprint '\t.globl\tfn;\t\t\t\t\\\\'\n\t\tprint '\t.set\tfn, alias'\n\t\tprint ''\n\t\tprint '\\t.text'\n\t\tprint '\\t.align\\t32'\n\t\tprint ''\n\t\tprint '\\t.globl\\tgl_dispatch_functions_start'\n\t\tprint '\\tHIDDEN(gl_dispatch_functions_start)'\n\t\tprint 'gl_dispatch_functions_start:'\n\t\tprint ''\n\t\treturn\n\n\tdef printRealFooter(self):\n\t\tprint ''\n\t\tprint '\\t.globl\\tgl_dispatch_functions_end'\n\t\tprint '\\tHIDDEN(gl_dispatch_functions_end)'\n\t\tprint 'gl_dispatch_functions_end:'\n\t\treturn\n\n\tdef printBody(self, api):\n\t\tfor f in api.functionIterateByOffset():\n\t\t\tname = f.dispatch_name()\n\n\t\t\tprint '\\tGL_STUB(gl%s, _gloffset_%s)' % (name, f.name)\n\n\t\t\tif not f.is_static_entry_point(f.name):\n\t\t\t\tprint '\\tHIDDEN(gl%s)' % (name)\n\n\t\tfor f in api.functionIterateByOffset():\n\t\t\tname = f.dispatch_name()\n\n\t\t\tif f.is_static_entry_point(f.name):\n\t\t\t\tfor n in f.entry_points:\n\t\t\t\t\tif n != f.name:\n\t\t\t\t\t\ttext = '\\tGL_STUB_ALIAS(gl%s, gl%s)' % (n, f.name)\n\n\t\t\t\t\t\tif f.has_different_protocol(n):\n\t\t\t\t\t\t\tprint '#ifndef GLX_INDIRECT_RENDERING'\n\t\t\t\t\t\t\tprint text\n\t\t\t\t\t\t\tprint '#endif'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint text\n\n\t\treturn\n\n\ndef show_usage():\n\tprint \"Usage: %s [-f input_file_name] [-m output_mode]\" % sys.argv[0]\n\tsys.exit(1)\n\nif __name__ == '__main__':\n\tfile_name = \"gl_API.xml\"\n\tmode = \"generic\"\n\n\ttry:\n\t\t(args, trail) = getopt.getopt(sys.argv[1:], \"m:f:\")\n\texcept Exception,e:\n\t\tshow_usage()\n\n\tfor (arg,val) in args:\n\t\tif arg == '-m':\n\t\t\tmode = val\n\t\telif arg == \"-f\":\n\t\t\tfile_name = val\n\n\tif mode == \"generic\":\n\t\tprinter = PrintGenericStubs()\n\telse:\n\t\tprint \"ERROR: Invalid mode \\\"%s\\\" specified.\" % mode\n\t\tshow_usage()\n\n\tapi = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())\n\tprinter.Print(api)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476156,"cells":{"repo_name":{"kind":"string","value":"ShiYw/Sigil"},"path":{"kind":"string","value":"3rdparty/python/Lib/lib2to3/fixes/fix_throw.py"},"copies":{"kind":"string","value":"203"},"size":{"kind":"string","value":"1582"},"content":{"kind":"string","value":"\"\"\"Fixer for generator.throw(E, V, T).\n\ng.throw(E)       -> g.throw(E)\ng.throw(E, V)    -> g.throw(E(V))\ng.throw(E, V, T) -> g.throw(E(V).with_traceback(T))\n\ng.throw(\"foo\"[, V[, T]]) will warn about string exceptions.\"\"\"\n# Author: Collin Winter\n\n# Local imports\nfrom .. import pytree\nfrom ..pgen2 import token\nfrom .. import fixer_base\nfrom ..fixer_util import Name, Call, ArgList, Attr, is_tuple\n\nclass FixThrow(fixer_base.BaseFix):\n    BM_compatible = True\n    PATTERN = \"\"\"\n    power< any trailer< '.' 'throw' >\n           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >\n    >\n    |\n    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >\n    \"\"\"\n\n    def transform(self, node, results):\n        syms = self.syms\n\n        exc = results[\"exc\"].clone()\n        if exc.type is token.STRING:\n            self.cannot_convert(node, \"Python 3 does not support string exceptions\")\n            return\n\n        # Leave \"g.throw(E)\" alone\n        val = results.get(\"val\")\n        if val is None:\n            return\n\n        val = val.clone()\n        if is_tuple(val):\n            args = [c.clone() for c in val.children[1:-1]]\n        else:\n            val.prefix = \"\"\n            args = [val]\n\n        throw_args = results[\"args\"]\n\n        if \"tb\" in results:\n            tb = results[\"tb\"].clone()\n            tb.prefix = \"\"\n\n            e = Call(exc, args)\n            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]\n            throw_args.replace(pytree.Node(syms.power, with_tb))\n        else:\n            throw_args.replace(Call(exc, args))\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476157,"cells":{"repo_name":{"kind":"string","value":"davidbliu/maestro-nng"},"path":{"kind":"string","value":"maestro/__main__.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4482"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright (C) 2013 SignalFuse, Inc.\n#\n# Docker container orchestration utility.\n\nimport argparse\nimport jinja2\nimport logging\nimport sys\nimport os\nimport yaml\n\nfrom . import exceptions, maestro\nfrom . import name, version\n\n# Define the commands\nACCEPTED_COMMANDS = ['status', 'fullstatus', 'start', 'stop', 'restart',\n                     'logs']\nDEFAULT_MAESTRO_FILE = 'maestro.yaml'\n\n\ndef load_config(options):\n    \"\"\"Preprocess the input config file through Jinja2 before loading it as\n    JSON.\"\"\"\n    if options.file == '-':\n        template = jinja2.Template(sys.stdin.read())\n    else:\n        env = jinja2.Environment(\n            loader=jinja2.FileSystemLoader(os.path.dirname(options.file)),\n            extensions=['jinja2.ext.with_'])\n        template = env.get_template(os.path.basename(options.file))\n\n    return yaml.load(template.render(env=os.environ))\n\n\ndef create_parser():\n    parser = argparse.ArgumentParser(prog=name, description=(\n        '{} v{}, Docker container orchestrator.'.format(\n            name.title(), version)))\n    parser.add_argument('command', nargs='?',\n                        choices=ACCEPTED_COMMANDS,\n                        default='status',\n                        help='orchestration command to execute')\n    parser.add_argument('things', nargs='*', metavar='thing',\n                        help='container(s) or service(s) to act on')\n    parser.add_argument('-f', '--file', nargs='?', metavar='FILE',\n                        default=DEFAULT_MAESTRO_FILE,\n                        help=('read environment description from FILE ' +\n                              '(use - for stdin)'))\n    parser.add_argument('-c', '--completion', metavar='CMD',\n                        help=('list commands, services or containers in ' +\n                              'environment based on CMD'))\n    parser.add_argument('-r', '--refresh-images', action='store_const',\n                        const=True, default=False,\n                        help='force refresh of container images from registry')\n    parser.add_argument('-F', '--follow', action='store_const',\n                        const=True, default=False,\n                        help='follow logs as they are generated')\n    parser.add_argument('-n', metavar='LINES', default=15,\n                        help='Only show the last LINES lines for logs')\n    parser.add_argument('-o', '--only', action='store_const',\n                        const=True, default=False,\n                        help='only affect the selected container or service')\n    parser.add_argument('-v', '--version', action='store_const',\n                        const=True, default=False,\n                        help='show program version and exit')\n    return parser\n\n\ndef main(args=None):\n    print 'these are the args'\n    print args\n    print 'those are hello'\n    options = create_parser().parse_args(args)\n\n    # If the version is requested, show it and exit right away.\n    if options.version:\n        print('{}-{}'.format(name, version))\n        return 0\n\n    try:\n        config = load_config(options)\n    except jinja2.exceptions.TemplateNotFound:\n        logging.error('Environment description file %s not found!',\n                      options.file)\n        sys.exit(1)\n    except:\n        logging.error('Error reading environment description file %s!',\n                      options.file)\n        sys.exit(1)\n\n    # Shutup urllib3, wherever it comes from.\n    (logging.getLogger('requests.packages.urllib3.connectionpool')\n            .setLevel(logging.WARN))\n    (logging.getLogger('urllib3.connectionpool')\n            .setLevel(logging.WARN))\n\n    c = maestro.Conductor(config)\n\n\n    if options.completion is not None:\n        args = filter(lambda x: not x.startswith('-'),\n                      options.completion.split(' '))\n        if len(args) == 2:\n            prefix = args[1]\n            choices = ACCEPTED_COMMANDS\n        elif len(args) >= 3:\n            prefix = args[len(args)-1]\n            choices = c.services + c.containers\n        else:\n            return 0\n\n        print(' '.join(filter(lambda x: x.startswith(prefix), set(choices))))\n        return 0\n\n    try:\n        options.things = set(options.things)\n        getattr(c, options.command)(**vars(options))\n    except exceptions.MaestroException as e:\n        sys.stderr.write('{}\\n'.format(e))\n        return 1\n    except KeyboardInterrupt:\n        return 1\n\n\nif __name__ == '__main__':\n    sys.exit(main())\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":476158,"cells":{"repo_name":{"kind":"string","value":"chen2aaron/SnirteneCodes"},"path":{"kind":"string","value":"Fragment/MySQLdb_Pra.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1034"},"content":{"kind":"string","value":"import MySQLdb\nconn = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"123456\",db=\"cc\",port=3306,charset=\"utf8\")\ncur = conn.cursor()\n# cur.execute(\"insert into users (username,password,email) values (%s,%s,%s)\",(\"python\",\"123456\",\"python@gmail.com\"))\n# conn.commit()\n\n# cur.executemany(\"insert into users (username,password,email) values (%s,%s,%s)\",((\"google\",\"111222\",\"g@gmail.com\"),(\"facebook\",\"222333\",\"f@face.book\"),(\"github\",\"333444\",\"git@hub.com\"),(\"docker\",\"444555\",\"doc@ker.com\")))\n# conn.commit()\ncur.execute(\"select * from users\")\nlines = cur.fetchall()\nfor line in lines:\n    print line\ncur.execute(\"select * from users where id=7\")\nline_first = cur.fetchone()\nprint line_first\nprint lines\nprint cur.fetchall()\ncur.execute(\"select * from users\")\nprint cur.fetchone()\nprint cur.fetchone()\nprint cur.fetchone()\nprint \"--------------\"\ncur.scroll(1)\nprint cur.fetchone()\ncur.scroll(-2)\nprint cur.fetchone()\ncur.scroll(1,\"absolute\")\nprint cur.fetchone()\nprint cur.fetchone()\nprint cur.fetchone()\ncur.close()\nconn.close()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476159,"cells":{"repo_name":{"kind":"string","value":"rxuriguera/bibtexIndexMaker"},"path":{"kind":"string","value":"src/bibim/ie/tests/test_context.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3474"},"content":{"kind":"string","value":"\n# Copyright 2010 Ramon Xuriguera\n#\n# This file is part of BibtexIndexMaker. \n#\n# BibtexIndexMaker is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# BibtexIndexMaker is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with BibtexIndexMaker. If not, see .\n\nimport unittest #@UnresolvedImport\n\nfrom bibim.ie.context import ContextResolver\nfrom bibim.util.beautifulsoup import BeautifulSoup\nfrom bibim.util.helpers import ContentCleaner\n\nhtml = \"\"\"\n       \n       \n       \n           \n               \n                   \n                   \n               \n               \n                   \n                   \n               \n               \n                   \n                              \n           
    Field 01:Value 01
    Field 02:Value 02
    Field 03 33\n Value 03
    \n \n \n \"\"\"\n\n\nclass ContextResolverTest(unittest.TestCase):\n def setUp(self):\n self.cr = ContextResolver()\n self.soup = ContentCleaner().clean_content(html)\n self.element01 = self.soup.find('td', text='Value 01').parent\n self.element02 = self.soup.find('td', text='Value 03').parent\n \n def tearDown(self):\n pass\n\n def test_get_context(self):\n context = self.cr.get_context(self.element01)\n self.failUnless(context[u'Field 01:'] == 1)\n \n def test_get_tree_context(self):\n context = self.cr.get_context(self.element02)\n self.failUnless(context[u'Field 03'] == 1)\n self.failUnless(context[u'33'] == 1)\n\n def test_merge_contexts(self):\n context01 = {u'Field 01:':1}\n context02 = {u'Field 01:':3, u'Field 02:':1, u'Field 03:':4}\n merged = self.cr.merge_context(context01, context02)\n self.failUnless(merged == {u'Field 02:': 1, u'Field 01:': 4,\n u'Field 03:': 4})\n \n def test_clean_context(self):\n context = {'a':2, 'b':3, 'c':1,\n 'this string is quite long. yes indeed':4}\n result = self.cr.clean_context(context)\n self.failUnless(result == {'a':2, 'b':3})\n \n def test_get_top_words(self):\n context = {u'a':3, 'b':5, 'c':1, u'd':2, 'e':4}\n expected = ['b', 'e', u'a']\n result = self.cr.get_top_strings(context, 3)\n self.failUnless(result == expected)\n \n def test_check_context(self):\n context01 = {'a':3, 'b':5, 'c':1, 'd':2, 'e':4}\n context02 = {'a':1, 'x':3}\n result = self.cr.check_context(context01, context02)\n self.failUnless(result)\n \n context02 = {'x':3}\n result = self.cr.check_context(context01, context02)\n self.failIf(result)\n \n context01 = {}\n result = self.cr.check_context(context01, context02)\n self.failUnless(result)\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476160,"cells":{"repo_name":{"kind":"string","value":"Modified-MW-DF/modified-MDF"},"path":{"kind":"string","value":"MWDF Project/MasterworkDwarfFortress/Utilities/Quickfort/src/qfconvert/xml2obj.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"3404"},"content":{"kind":"string","value":"## {{{ http://code.activestate.com/recipes/534109/ (r8)\r\nimport re\r\nimport xml.sax.handler\r\n\r\n\r\ndef xml2obj(src):\r\n \"\"\"\r\n A simple function to converts XML data into native Python object.\r\n \"\"\"\r\n\r\n non_id_char = re.compile('[^_0-9a-zA-Z]')\r\n\r\n def _name_mangle(name):\r\n return non_id_char.sub('_', name)\r\n\r\n class DataNode(object):\r\n def __init__(self):\r\n self._attrs = {} # XML attributes and child elements\r\n self.data = None # child text data\r\n\r\n def __len__(self):\r\n # treat single element as a list of 1\r\n return 1\r\n\r\n def __getitem__(self, key):\r\n if isinstance(key, basestring):\r\n return self._attrs.get(key, None)\r\n else:\r\n return [self][key]\r\n\r\n def __contains__(self, name):\r\n return name in self._attrs\r\n\r\n def __nonzero__(self):\r\n return bool(self._attrs or self.data)\r\n\r\n def __getattr__(self, name):\r\n if name.startswith('__'):\r\n # need to do this for Python special methods???\r\n raise AttributeError(name)\r\n return self._attrs.get(name, None)\r\n\r\n def _add_xml_attr(self, name, value):\r\n if name in self._attrs:\r\n # multiple attribute of the same name are represented by a list\r\n children = self._attrs[name]\r\n if not isinstance(children, list):\r\n children = [children]\r\n self._attrs[name] = children\r\n children.append(value)\r\n else:\r\n self._attrs[name] = value\r\n\r\n def __str__(self):\r\n return self.data or ''\r\n\r\n def __repr__(self):\r\n items = sorted(self._attrs.items())\r\n if self.data:\r\n items.append(('data', self.data))\r\n return u'{%s}' % ', '.join(\r\n [u'%s:%s' % (k, repr(v)) for k, v in items]\r\n )\r\n\r\n class TreeBuilder(xml.sax.handler.ContentHandler):\r\n def __init__(self):\r\n self.stack = []\r\n self.root = DataNode()\r\n self.current = self.root\r\n self.text_parts = []\r\n\r\n def startElement(self, name, attrs):\r\n self.stack.append((self.current, self.text_parts))\r\n self.current = DataNode()\r\n self.text_parts = []\r\n # xml attributes --> python attributes\r\n for k, v in attrs.items():\r\n self.current._add_xml_attr(_name_mangle(k), v)\r\n\r\n def endElement(self, name):\r\n text = ''.join(self.text_parts).strip()\r\n if text:\r\n self.current.data = text\r\n if self.current._attrs:\r\n obj = self.current\r\n else:\r\n # a text only node is simply represented by the string\r\n obj = text or ''\r\n self.current, self.text_parts = self.stack.pop()\r\n self.current._add_xml_attr(_name_mangle(name), obj)\r\n\r\n def characters(self, content):\r\n self.text_parts.append(content)\r\n\r\n builder = TreeBuilder()\r\n\r\n if isinstance(src, basestring):\r\n xml.sax.parseString(src, builder)\r\n else:\r\n xml.sax.parse(src, builder)\r\n return builder.root._attrs.values()[0]\r\n## end of http://code.activestate.com/recipes/534109/ }}}\r\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476161,"cells":{"repo_name":{"kind":"string","value":"shsingh/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/cloud/misc/cloud_init_data_facts.py"},"copies":{"kind":"string","value":"101"},"size":{"kind":"string","value":"3392"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# (c) 2018, René Moser \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: cloud_init_data_facts\nshort_description: Retrieve facts of cloud-init.\ndescription:\n - Gathers facts by reading the status.json and result.json of cloud-init.\nversion_added: 2.6\nauthor: René Moser (@resmo)\noptions:\n filter:\n description:\n - Filter facts\n choices: [ status, result ]\nnotes:\n - See http://cloudinit.readthedocs.io/ for more information about cloud-init.\n'''\n\nEXAMPLES = '''\n- name: Gather all facts of cloud init\n cloud_init_data_facts:\n register: result\n\n- debug:\n var: result\n\n- name: Wait for cloud init to finish\n cloud_init_data_facts:\n filter: status\n register: res\n until: \"res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage\"\n retries: 50\n delay: 5\n'''\n\nRETURN = '''\n---\ncloud_init_data_facts:\n description: Facts of result and status.\n returned: success\n type: dict\n sample: '{\n \"status\": {\n \"v1\": {\n \"datasource\": \"DataSourceCloudStack\",\n \"errors\": []\n },\n \"result\": {\n \"v1\": {\n \"datasource\": \"DataSourceCloudStack\",\n \"init\": {\n \"errors\": [],\n \"finished\": 1522066377.0185432,\n \"start\": 1522066375.2648022\n },\n \"init-local\": {\n \"errors\": [],\n \"finished\": 1522066373.70919,\n \"start\": 1522066373.4726632\n },\n \"modules-config\": {\n \"errors\": [],\n \"finished\": 1522066380.9097016,\n \"start\": 1522066379.0011985\n },\n \"modules-final\": {\n \"errors\": [],\n \"finished\": 1522066383.56594,\n \"start\": 1522066382.3449218\n },\n \"stage\": null\n }\n }'\n'''\n\nimport os\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_text\n\n\nCLOUD_INIT_PATH = \"/var/lib/cloud/data/\"\n\n\ndef gather_cloud_init_data_facts(module):\n res = {\n 'cloud_init_data_facts': dict()\n }\n\n for i in ['result', 'status']:\n filter = module.params.get('filter')\n if filter is None or filter == i:\n res['cloud_init_data_facts'][i] = dict()\n json_file = CLOUD_INIT_PATH + i + '.json'\n\n if os.path.exists(json_file):\n f = open(json_file, 'rb')\n contents = to_text(f.read(), errors='surrogate_or_strict')\n f.close()\n\n if contents:\n res['cloud_init_data_facts'][i] = module.from_json(contents)\n return res\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n filter=dict(choices=['result', 'status']),\n ),\n supports_check_mode=True,\n )\n\n facts = gather_cloud_init_data_facts(module)\n result = dict(changed=False, ansible_facts=facts, **facts)\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476162,"cells":{"repo_name":{"kind":"string","value":"jit/pyew"},"path":{"kind":"string","value":"vstruct/unittest.py"},"copies":{"kind":"string","value":"17"},"size":{"kind":"string","value":"2352"},"content":{"kind":"string","value":"\nimport vstruct\n\nfrom cStringIO import StringIO\n\nfrom vstruct.primitives import *\n\ndef test(vs, hexstr):\n vshex = vs.vsEmit().encode('hex')\n if vshex != hexstr:\n raise Exception('FAIL')\n print 'PASS!'\n\nv = vstruct.VStruct()\nv.uint8 = v_uint8(1)\nv.uint16 = v_uint16(2)\nv.uint24 = v_uint24(3)\nv.uint32 = v_uint32(4)\nv.uint64 = v_uint64(5)\nv.vbytes = v_bytes(vbytes='ABCD')\n\ntest(v,'01020003000004000000050000000000000041424344')\nprint v.tree()\n\n\nv.uint8 = 99\nv.uint16 = 100\nv.uint24 = 101\nv.uint32 = 102\nv.uint64 = 103\nv.vbytes = '\\x00\\x00\\x00\\x00'\n\ntest(v,'63640065000066000000670000000000000000000000')\nprint v.tree()\n\n\n# =================================================================\nv = vstruct.VStruct()\nv._vs_field_align = True\nv.uint8 = v_uint8(0x42, bigend=True)\nv.uint16 = v_uint16(0x4243, bigend=True)\nv.uint24 = v_uint24(0x424344, bigend=True)\nv.uint32 = v_uint32(0x42434445, bigend=True)\nv.uint64 = v_uint64(0x4243444546474849, bigend=True)\n\ntest(v, '420042430000424344000000424344454243444546474849')\nprint v.tree()\n\n\n# ===============================================================\n\nv = vstruct.VStruct()\nv.strfield = v_str(size=30)\nv.unifield = v_wstr(size=30)\n\nv.strfield = 'wootwoot!'\nv.unifield = 'bazbaz'\n\ntest(v, '776f6f74776f6f7421000000000000000000000000000000000000000000620061007a00620061007a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000')\nprint v.tree()\n\nv.vsParse('B'*90)\n\n# ===============================================================\ndef updatelen(vs):\n vs.vsGetField('strfield').vsSetLength(vs.lenfield)\n\nv = vstruct.VStruct()\nv.lenfield = v_uint8(0x30)\nv.strfield = v_str(size=30)\nv.vsAddParseCallback('lenfield', updatelen)\n\nv.vsParse('\\x01' + 'A' * 30)\ntest(v, '0141')\nprint v.tree()\n\n\n# ==============================================================\n\nclass woot(vstruct.VStruct):\n def __init__(self):\n vstruct.VStruct.__init__(self)\n self.lenfield = v_uint8()\n self.strfield = v_str(size=0x20)\n\n def pcb_lenfield(self):\n self.vsGetField('strfield').vsSetLength(self.lenfield)\n\nv = woot()\nv.vsParse('\\x01' + 'A'*30)\ntest(v, '0141')\nprint v.tree()\n\n# ==============================================================\n\nv = woot()\nsio = StringIO('\\x01' + 'A' * 30)\nv.vsParseFd(sio)\ntest(v, '0141')\nprint v.tree()\n\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476163,"cells":{"repo_name":{"kind":"string","value":"ksh/gpirecertification"},"path":{"kind":"string","value":"tests/functional/model_student_work.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"3838"},"content":{"kind":"string","value":"# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functional tests for models/review.py.\"\"\"\n\n__author__ = [\n 'johncox@google.com (John Cox)',\n]\n\nfrom models import entities\nfrom models import models\nfrom models import student_work\nfrom tests.functional import actions\nfrom google.appengine.ext import db\n\n\nclass ReferencedModel(entities.BaseEntity):\n pass\n\n\nclass UnvalidatedReference(entities.BaseEntity):\n referenced_model_key = student_work.KeyProperty()\n\n\nclass ValidatedReference(entities.BaseEntity):\n referenced_model_key = student_work.KeyProperty(kind=ReferencedModel.kind())\n\n\nclass KeyPropertyTest(actions.TestBase):\n \"\"\"Tests KeyProperty.\"\"\"\n\n def setUp(self): # From superclass. pylint: disable-msg=g-bad-name\n super(KeyPropertyTest, self).setUp()\n self.referenced_model_key = ReferencedModel().put()\n\n def test_validation_and_datastore_round_trip_of_keys_succeeds(self):\n \"\"\"Tests happy path for both validation and (de)serialization.\"\"\"\n model_with_reference = ValidatedReference(\n referenced_model_key=self.referenced_model_key)\n model_with_reference_key = model_with_reference.put()\n model_with_reference_from_datastore = db.get(model_with_reference_key)\n self.assertEqual(\n self.referenced_model_key,\n model_with_reference_from_datastore.referenced_model_key)\n custom_model_from_datastore = db.get(\n model_with_reference_from_datastore.referenced_model_key)\n self.assertEqual(\n self.referenced_model_key, custom_model_from_datastore.key())\n self.assertTrue(isinstance(\n model_with_reference_from_datastore.referenced_model_key,\n db.Key))\n\n def test_type_not_validated_if_kind_not_passed(self):\n model_key = db.Model().put()\n unvalidated = UnvalidatedReference(referenced_model_key=model_key)\n self.assertEqual(model_key, unvalidated.referenced_model_key)\n\n def test_validation_fails(self):\n model_key = db.Model().put()\n self.assertRaises(\n db.BadValueError, ValidatedReference,\n referenced_model_key='not_a_key')\n self.assertRaises(\n db.BadValueError, ValidatedReference,\n referenced_model_key=model_key)\n\n\nclass ReviewTest(actions.TestBase):\n\n def test_constructor_sets_key_name(self):\n \"\"\"Tests construction of key_name, put of entity with key_name set.\"\"\"\n unit_id = 'unit_id'\n reviewer_key = models.Student(key_name='reviewer@example.com').put()\n review_key = student_work.Review(\n reviewer_key=reviewer_key, unit_id=unit_id).put()\n self.assertEqual(\n student_work.Review.key_name(unit_id, reviewer_key),\n review_key.name())\n\n\nclass SubmissionTest(actions.TestBase):\n\n def test_constructor_sets_key_name(self):\n \"\"\"Tests construction of key_name, put of entity with key_name set.\"\"\"\n unit_id = 'unit_id'\n reviewee_key = models.Student(key_name='reviewee@example.com').put()\n review_key = student_work.Submission(\n reviewee_key=reviewee_key, unit_id=unit_id).put()\n self.assertEqual(\n student_work.Submission.key_name(unit_id, reviewee_key),\n review_key.name())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476164,"cells":{"repo_name":{"kind":"string","value":"eprivalov/sendec"},"path":{"kind":"string","value":"loginsys/forms.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2847"},"content":{"kind":"string","value":"from __future__ import unicode_literals\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import auth\n\nclass UserCreationFormNew(forms.ModelForm):\n \"\"\"\n A form that creates a user, with no privileges, from the given username and\n password.\n \"\"\"\n error_messages = {\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n password1 = forms.CharField(label=_(\"Password\"),\n widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Password',\n 'required': 'required'\n }))\n password2 = forms.CharField(label=_(\"Password confirmation\"),\n widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'required': 'required',\n 'placeholder': 'Confirm password'\n }),\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n class Meta:\n model = User\n fields = (\"username\",)\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2\n\n def save(self, commit=True):\n user = super(UserCreationFormNew, self).save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"])\n if commit:\n user.save()\n return user\n\n\nclass UserAuthenticationForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ()\n error_messages = {\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n username = forms.CharField(label=_(\"Username\"),\n widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'required': 'required',\n 'name': 'username',\n 'placeholder': \"Username\"\n }))\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput(attrs={\n 'class': 'form-control',\n 'required': 'required',\n 'placeholder': 'Password',\n 'name': 'password'\n }))\n\n def clean(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n user = auth.authenticate(username=username, password=password)\n if not user or not user.is_active:\n raise forms.ValidationError(\"Sorry, that login was invalid. Please try again.\")\n return self.cleaned_data\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476165,"cells":{"repo_name":{"kind":"string","value":"nadeemsyed/swift"},"path":{"kind":"string","value":"test/unit/common/middleware/test_quotas.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"15565"},"content":{"kind":"string","value":"# Copyright (c) 2010-2012 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom swift.common.swob import Request, HTTPUnauthorized, HTTPOk\nfrom swift.common.middleware import container_quotas, copy\nfrom test.unit.common.middleware.helpers import FakeSwift\n\n\nclass FakeCache(object):\n\n def __init__(self, val):\n if 'status' not in val:\n val['status'] = 200\n self.val = val\n\n def get(self, *args):\n return self.val\n\n\nclass FakeApp(object):\n\n def __init__(self):\n pass\n\n def __call__(self, env, start_response):\n start_response('200 OK', [])\n return []\n\n\nclass FakeMissingApp(object):\n\n def __init__(self):\n pass\n\n def __call__(self, env, start_response):\n start_response('404 Not Found', [])\n return []\n\n\ndef start_response(*args):\n pass\n\n\nclass TestContainerQuotas(unittest.TestCase):\n\n def test_split_path_empty_container_path_segment(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n req = Request.blank('/v1/a//something/something_else',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': {'key': 'value'}})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n def test_not_handled(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n def test_no_quotas(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': FakeCache({}),\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n def test_exceed_bytes_quota(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_not_exceed_bytes_quota(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n def test_exceed_counts_quota(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_not_exceed_counts_quota(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 200)\n\n def test_invalid_quotas(self):\n req = Request.blank(\n '/v1/a/c',\n environ={'REQUEST_METHOD': 'POST',\n 'HTTP_X_CONTAINER_META_QUOTA_BYTES': 'abc'})\n res = req.get_response(\n container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))\n self.assertEqual(res.status_int, 400)\n\n req = Request.blank(\n '/v1/a/c',\n environ={'REQUEST_METHOD': 'POST',\n 'HTTP_X_CONTAINER_META_QUOTA_COUNT': 'abc'})\n res = req.get_response(\n container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))\n self.assertEqual(res.status_int, 400)\n\n def test_valid_quotas(self):\n req = Request.blank(\n '/v1/a/c',\n environ={'REQUEST_METHOD': 'POST',\n 'HTTP_X_CONTAINER_META_QUOTA_BYTES': '123'})\n res = req.get_response(\n container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))\n self.assertEqual(res.status_int, 200)\n\n req = Request.blank(\n '/v1/a/c',\n environ={'REQUEST_METHOD': 'POST',\n 'HTTP_X_CONTAINER_META_QUOTA_COUNT': '123'})\n res = req.get_response(\n container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))\n self.assertEqual(res.status_int, 200)\n\n def test_delete_quotas(self):\n req = Request.blank(\n '/v1/a/c',\n environ={'REQUEST_METHOD': 'POST',\n 'HTTP_X_CONTAINER_META_QUOTA_BYTES': None})\n res = req.get_response(\n container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))\n self.assertEqual(res.status_int, 200)\n\n def test_missing_container(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeMissingApp(), {})\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100'})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 404)\n\n def test_auth_fail(self):\n app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'},\n 'write_acl': None})\n req = Request.blank(\n '/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,\n 'CONTENT_LENGTH': '100',\n 'swift.authorize': lambda *args: HTTPUnauthorized()})\n res = req.get_response(app)\n self.assertEqual(res.status_int, 401)\n\n\nclass ContainerQuotaCopyingTestCases(unittest.TestCase):\n\n def setUp(self):\n self.app = FakeSwift()\n self.cq_filter = container_quotas.filter_factory({})(self.app)\n self.copy_filter = copy.filter_factory({})(self.cq_filter)\n\n def test_exceed_bytes_quota_copy_verb(self):\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n\n req = Request.blank('/v1/a/c2/o2',\n environ={'REQUEST_METHOD': 'COPY',\n 'swift.cache': cache},\n headers={'Destination': '/c/o'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_not_exceed_bytes_quota_copy_verb(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n self.app.register(\n 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank('/v1/a/c2/o2',\n environ={'REQUEST_METHOD': 'COPY',\n 'swift.cache': cache},\n headers={'Destination': '/c/o'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 200)\n\n def test_exceed_counts_quota_copy_verb(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})\n req = Request.blank('/v1/a/c2/o2',\n environ={'REQUEST_METHOD': 'COPY',\n 'swift.cache': cache},\n headers={'Destination': '/c/o'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_exceed_counts_quota_copy_cross_account_verb(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')\n a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},\n 'status': 200, 'object_count': 1}\n a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},\n 'status': 200, 'object_count': 1}\n req = Request.blank('/v1/a/c2/o2',\n environ={'REQUEST_METHOD': 'COPY',\n 'swift.infocache': {\n 'container/a/c': a_c_cache,\n 'container/a2/c': a2_c_cache}},\n headers={'Destination': '/c/o',\n 'Destination-Account': 'a2'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_exceed_counts_quota_copy_cross_account_PUT_verb(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')\n a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},\n 'status': 200, 'object_count': 1}\n a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},\n 'status': 200, 'object_count': 1}\n req = Request.blank('/v1/a2/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.infocache': {\n 'container/a/c': a_c_cache,\n 'container/a2/c': a2_c_cache}},\n headers={'X-Copy-From': '/c2/o2',\n 'X-Copy-From-Account': 'a'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_exceed_bytes_quota_copy_from(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})\n\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': '/c2/o2'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_not_exceed_bytes_quota_copy_from(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n self.app.register(\n 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': '/c2/o2'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 200)\n\n def test_bytes_quota_copy_from_no_src(self):\n self.app.register('GET', '/v1/a/c2/o3', HTTPOk, {}, 'passed')\n self.app.register(\n 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': '/c2/o3'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 200)\n\n def test_bytes_quota_copy_from_bad_src(self):\n cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': 'bad_path'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 412)\n\n def test_exceed_counts_quota_copy_from(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': '/c2/o2'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 413)\n self.assertEqual(res.body, 'Upload exceeds quota.')\n\n def test_not_exceed_counts_quota_copy_from(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n self.app.register(\n 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})\n req = Request.blank('/v1/a/c/o',\n environ={'REQUEST_METHOD': 'PUT',\n 'swift.cache': cache},\n headers={'x-copy-from': '/c2/o2'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 200)\n\n def test_not_exceed_counts_quota_copy_verb(self):\n self.app.register('GET', '/v1/a/c2/o2', HTTPOk,\n {'Content-Length': '10'}, 'passed')\n self.app.register(\n 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')\n cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})\n req = Request.blank('/v1/a/c2/o2',\n environ={'REQUEST_METHOD': 'COPY',\n 'swift.cache': cache},\n headers={'Destination': '/c/o'})\n res = req.get_response(self.copy_filter)\n self.assertEqual(res.status_int, 200)\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476166,"cells":{"repo_name":{"kind":"string","value":"dancingdan/tensorflow"},"path":{"kind":"string","value":"tensorflow/compiler/tests/pooling_ops_test.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"20324"},"content":{"kind":"string","value":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for pooling operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import googletest\n\n\ndef NHWCToNCHW(input_tensor):\n \"\"\"Convert the input from NHWC format to NCHW.\n\n Args:\n input_tensor: a 4-D tensor, or a 4-element array representing the same.\n\n Returns:\n the converted tensor or a shape array\n \"\"\"\n if isinstance(input_tensor, ops.Tensor):\n return array_ops.transpose(input_tensor, [0, 3, 1, 2])\n else:\n return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]\n\n\ndef NCHWToNHWC(input_tensor):\n \"\"\"Convert the input from NCHW format to NHWC.\n\n Args:\n input_tensor: a 4-D tensor, or a 4-element array representing the same.\n\n Returns:\n the converted tensor or a shape array\n \"\"\"\n if isinstance(input_tensor, ops.Tensor):\n return array_ops.transpose(input_tensor, [0, 2, 3, 1])\n else:\n return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]\n\n\ndef GetTestConfigs():\n \"\"\"Get all the valid tests configs to run.\n\n Returns:\n all the valid test configs\n \"\"\"\n test_configs = [\"NHWC\", \"NCHW\"]\n return test_configs\n\n\nclass PoolingTest(xla_test.XLATestCase):\n\n def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,\n data_format, expected):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Function to be called, currently only co.MaxPool.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n data_format: The data format we use to run the pooling operation.\n expected: An array containing the expected operation outputs.\n \"\"\"\n total_size = np.prod(input_sizes)\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)\n x = x.reshape(input_sizes)\n with self.cached_session() as sess:\n with self.test_scope():\n inputs = array_ops.placeholder(dtypes.float32)\n t = inputs\n if data_format == \"NCHW\":\n t = NHWCToNCHW(t)\n ksize = NHWCToNCHW(ksize)\n strides = NHWCToNCHW(strides)\n t = pool_func(t,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n t = NCHWToNHWC(t)\n actual = sess.run(t, {inputs: x})\n self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)\n\n def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,\n expected):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Function to be called, co.MaxPool, co.AvgPool,\n or the Lua version.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n \"\"\"\n for data_format in GetTestConfigs():\n self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,\n data_format, expected)\n\n def testMaxPoolValidPadding(self):\n expected_output = [13.0, 14.0, 15.0]\n self._VerifyValues(nn_ops.max_pool,\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n def testMaxPoolSamePadding(self):\n expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]\n self._VerifyValues(nn_ops.max_pool,\n input_sizes=[1, 2, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output)\n\n def testMaxPoolSamePaddingNonSquareWindow(self):\n # input is:\n # [1.0, 2.0\n # 3.0 4.0]\n #\n # Window of [x, x] should do:\n #\n # [max(1.0, 2.0), max(2.0, padded0),\n # max(3.0, 4.0), max(4.0, padded0)]\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 2, 2, 1],\n ksize=[1, 1, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n expected=[2.0, 2.0, 4.0, 4.0])\n\n def testMaxPoolValidPaddingUnevenStride(self):\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 2, 1],\n padding=\"VALID\",\n expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 1, 1],\n padding=\"VALID\",\n expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])\n\n def testMaxPoolSamePaddingFilter4(self):\n expected_output = [\n 21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,\n 61.0, 62.0, 63.0, 64.0\n ]\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 4, 4, 4],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output)\n\n def testMaxPoolSamePaddingFilter8(self):\n expected_output = [\n 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,\n 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,\n 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,\n 191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,\n 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,\n 307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,\n 317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,\n 407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,\n 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,\n 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,\n 469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,\n 487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,\n 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0\n ]\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output)\n\n # Tests for DepthwiseMaxPooling on CPU only.\n def testDepthwiseMaxPool1x1DepthWindow1(self):\n # input is:\n # [1.0, ..., 10.0] along depth,\n #\n # We maxpool by depth in patches of 2.\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 1, 1, 10],\n ksize=[1, 1, 1, 2],\n strides=[1, 1, 1, 2],\n padding=\"SAME\",\n expected=[2.0, 4.0, 6.0, 8.0, 10.0])\n\n def testDepthwiseMaxPool2x2DepthWindow3(self):\n # input is:\n #\n # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2\n # output. Each node has contiguous values, so the depthwise max\n # should be multiples of 3.0.\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 2, 2, 6],\n ksize=[1, 1, 1, 3],\n strides=[1, 1, 1, 3],\n padding=\"SAME\",\n expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])\n\n def testKernelSmallerThanStrideValid(self):\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 7, 7, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 3, 3, 1],\n padding=\"VALID\",\n expected=[9, 12, 30, 33])\n\n def testKernelSmallerThanStrideSame(self):\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[1, 3, 7, 9])\n\n self._VerifyValues(\n nn_ops.max_pool,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=[1, 3, 9, 11])\n\n # Average pooling\n def testAvgPoolValidPadding(self):\n expected_output = [7, 8, 9]\n self._VerifyValues(\n nn_ops.avg_pool,\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n def testAvgPoolSamePadding(self):\n expected_output = [7., 8., 9., 11.5, 12.5, 13.5]\n self._VerifyValues(\n nn_ops.avg_pool,\n input_sizes=[1, 2, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n expected=expected_output)\n\n\nclass PoolGradTest(xla_test.XLATestCase):\n\n CPU_DEVICE = \"/job:localhost/replica:0/task:0/cpu:0\"\n\n def _VerifyOneTest(self,\n pool_func,\n pool_grad_func,\n input_sizes,\n ksize,\n strides,\n padding,\n data_format,\n pool_grad_grad_func=None):\n \"\"\"Verifies the output values of the pooling gradient function.\n\n Args:\n pool_func: Forward pooling function\n pool_grad_func: Pooling gradient function for pool_grad_func\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n data_format: The data format we use to run the pooling operation.\n pool_grad_grad_func: Second-order gradient function, if available.\n \"\"\"\n total_size = np.prod(input_sizes)\n # TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally\n # maximal at 16 bits. Switch to np.random.randn when resolved.\n x = np.arange(1, total_size + 1, dtype=np.float32)\n x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly\n # Verify some specifically interesting values...\n x[np.random.choice(total_size)] = np.inf\n x[np.random.choice(total_size)] = -np.inf\n # TODO(b/74222344): Fix nan handling for max pool grad.\n # x[np.random.choice(total_size)] = np.nan\n x = x.reshape(input_sizes)\n with self.cached_session() as sess:\n # Use the forward pool function to compute some corresponding outputs\n # (needed for the CPU device, and we need the shape in both cases).\n with ops.device(self.CPU_DEVICE):\n inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n outputs = pool_func(\n inputs,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=\"NHWC\")\n\n output_vals = np.array(sess.run(outputs, {inputs: x}))\n output_gradient_vals = np.arange(\n 1, output_vals.size + 1, dtype=np.float32)\n output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)\n output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)\n output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)\n\n # Use the Tensorflow CPU pooling gradient to compute the expected input\n # gradients.\n with ops.device(self.CPU_DEVICE):\n output_gradients = array_ops.placeholder(\n dtypes.float32, shape=output_vals.shape)\n expected_input_gradients = pool_grad_func(\n inputs,\n outputs,\n output_gradients,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=\"NHWC\")\n expected_input_gradient_vals = sess.run(\n expected_input_gradients,\n {inputs: x,\n output_gradients: output_gradient_vals})\n\n output_grad_gradients = array_ops.placeholder(\n dtypes.float32, shape=expected_input_gradient_vals.shape)\n if pool_grad_grad_func is not None:\n expected_grad_gradients = pool_grad_grad_func(\n inputs,\n outputs,\n output_grad_gradients,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=\"NHWC\")\n expected_grad_gradients_vals = sess.run(expected_grad_gradients, {\n inputs: x,\n output_grad_gradients: output_grad_grad_vals\n })\n\n # Run the gradient op on the XLA device\n with self.test_scope():\n outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\n xla_inputs = inputs\n xla_outputs = outputs\n xla_output_gradients = output_gradients\n xla_output_grad_gradients = output_grad_gradients\n xla_ksize = ksize\n xla_strides = strides\n if data_format == \"NCHW\":\n xla_inputs = NHWCToNCHW(inputs)\n xla_outputs = NHWCToNCHW(outputs)\n xla_output_gradients = NHWCToNCHW(output_gradients)\n xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)\n xla_ksize = NHWCToNCHW(ksize)\n xla_strides = NHWCToNCHW(strides)\n actual_input_gradients = pool_grad_func(\n xla_inputs,\n xla_outputs,\n xla_output_gradients,\n ksize=xla_ksize,\n strides=xla_strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n actual_input_gradients = NCHWToNHWC(actual_input_gradients)\n if pool_grad_grad_func is not None:\n actual_grad_gradients = pool_grad_grad_func(\n xla_inputs,\n xla_outputs,\n xla_output_grad_gradients,\n ksize=xla_ksize,\n strides=xla_strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)\n actual_input_gradients_vals = sess.run(actual_input_gradients, {\n inputs: x,\n outputs: output_vals,\n output_gradients: output_gradient_vals\n })\n # Compare the Tensorflow and XLA results.\n self.assertAllClose(\n expected_input_gradient_vals,\n actual_input_gradients_vals,\n rtol=1e-4,\n atol=1e-6)\n self.assertShapeEqual(actual_input_gradients_vals, inputs)\n\n if pool_grad_grad_func is not None:\n actual_grad_gradients_vals = sess.run(\n actual_grad_gradients, {\n inputs: x,\n outputs: output_vals,\n output_grad_gradients: output_grad_grad_vals\n })\n\n # Compare the Tensorflow and XLA results.\n self.assertAllClose(\n expected_grad_gradients_vals,\n actual_grad_gradients_vals,\n rtol=1e-4,\n atol=1e-6)\n self.assertShapeEqual(actual_grad_gradients_vals, outputs)\n\n def _VerifyValues(self,\n pool_func,\n pool_grad_func,\n input_sizes,\n ksize,\n strides,\n padding,\n pool_grad_grad_func=None):\n \"\"\"Verifies the output values of the pooling function.\n\n Args:\n pool_func: Pooling function to be called, e.g., tf.nn.max_pool\n pool_grad_func: Corresponding pooling gradient function.\n input_sizes: Input tensor dimensions.\n ksize: The kernel size dimensions\n strides: The stride dimensions\n padding: Padding type.\n pool_grad_grad_func: Second-order gradient function, if available.\n \"\"\"\n for data_format in GetTestConfigs():\n self._VerifyOneTest(\n pool_func,\n pool_grad_func,\n input_sizes,\n ksize,\n strides,\n padding,\n data_format,\n pool_grad_grad_func=pool_grad_grad_func)\n\n def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):\n # VALID padding\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 3, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n # SAME padding\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 2, 3, 3],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n # SAME padding, non square window\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 2, 2, 1],\n ksize=[1, 1, 2, 1],\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n # VALID padding, uneven stride\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 1, 2, 1],\n padding=\"VALID\",\n pool_grad_grad_func=pool_grad_grad_func)\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 1, 1],\n padding=\"VALID\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n # SAME padding, size 4 input\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 4, 4, 4],\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n # SAME padding, size 8 input\n self._VerifyValues(\n forward_op,\n backward_op,\n input_sizes=[1, 8, 8, 8],\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n pool_grad_grad_func=pool_grad_grad_func)\n\n def testMaxPool(self):\n self._TestPooling(\n nn_ops.max_pool,\n gen_nn_ops.max_pool_grad,\n pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)\n\n def testAvgPool(self):\n # Wrapper around AvgPoolGrad that ignores extra arguments needed by\n # MaxPoolGrad.\n def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,\n data_format):\n del outputs # Unused by average-pooling gradients.\n return gen_nn_ops.avg_pool_grad(\n inputs.get_shape().as_list(),\n output_gradients,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format)\n\n self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)\n\n # The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than\n # the stride size, so we only run the following tests on MaxPoolGrad.\n\n def testMaxPoolKernelSmallerThanStrideValid(self):\n self._VerifyValues(\n nn_ops.max_pool,\n gen_nn_ops.max_pool_grad,\n input_sizes=[1, 7, 7, 1],\n ksize=[1, 2, 2, 1],\n strides=[1, 3, 3, 1],\n padding=\"VALID\")\n\n def testMaxPoolKernelSmallerThanStrideSame(self):\n self._VerifyValues(\n nn_ops.max_pool,\n gen_nn_ops.max_pool_grad,\n input_sizes=[1, 3, 3, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\")\n\n self._VerifyValues(\n nn_ops.max_pool,\n gen_nn_ops.max_pool_grad,\n input_sizes=[1, 4, 4, 1],\n ksize=[1, 1, 1, 1],\n strides=[1, 2, 2, 1],\n padding=\"SAME\")\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476167,"cells":{"repo_name":{"kind":"string","value":"a-doumoulakis/tensorflow"},"path":{"kind":"string","value":"tensorflow/examples/tutorials/mnist/mnist_deep.py"},"copies":{"kind":"string","value":"33"},"size":{"kind":"string","value":"6130"},"content":{"kind":"string","value":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A deep MNIST classifier using convolutional layers.\n\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/pros\n\"\"\"\n# Disable linter warnings to maintain consistency with tutorial.\n# pylint: disable=invalid-name\n# pylint: disable=g-bad-import-order\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport tempfile\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\n\nFLAGS = None\n\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). keep_prob is a scalar placeholder for the probability of\n dropout.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of\n # features.\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n return y_conv, keep_prob\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef main(_):\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 784])\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n # Build the graph for the deep net\n y_conv, keep_prob = deepnn(x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,\n logits=y_conv)\n cross_entropy = tf.reduce_mean(cross_entropy)\n\n with tf.name_scope('adam_optimizer'):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n\n graph_location = tempfile.mkdtemp()\n print('Saving graph to: %s' % graph_location)\n train_writer = tf.summary.FileWriter(graph_location)\n train_writer.add_graph(tf.get_default_graph())\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(20000):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n print('step %d, training accuracy %g' % (i, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\n print('test accuracy %g' % accuracy.eval(feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory for storing input data')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476168,"cells":{"repo_name":{"kind":"string","value":"HyperloopTeam/FullOpenMDAO"},"path":{"kind":"string","value":"lib/python2.7/site-packages/requests-2.2.1-py2.7.egg/requests/packages/urllib3/response.py"},"copies":{"kind":"string","value":"316"},"size":{"kind":"string","value":"10537"},"content":{"kind":"string","value":"# urllib3/response.py\n# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n#\n# This module is part of urllib3 and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\nimport logging\nimport zlib\nimport io\n\nfrom .exceptions import DecodeError\nfrom .packages.six import string_types as basestring, binary_type\nfrom .util import is_fp_closed\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DeflateDecoder(object):\n\n def __init__(self):\n self._first_try = True\n self._data = binary_type()\n self._obj = zlib.decompressobj()\n\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n def decompress(self, data):\n if not self._first_try:\n return self._obj.decompress(data)\n\n self._data += data\n try:\n return self._obj.decompress(data)\n except zlib.error:\n self._first_try = False\n self._obj = zlib.decompressobj(-zlib.MAX_WBITS)\n try:\n return self.decompress(self._data)\n finally:\n self._data = None\n\n\ndef _get_decoder(mode):\n if mode == 'gzip':\n return zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n return DeflateDecoder()\n\n\nclass HTTPResponse(io.IOBase):\n \"\"\"\n HTTP Response container.\n\n Backwards-compatible to httplib's HTTPResponse but the response ``body`` is\n loaded and decoded on-demand when the ``data`` property is accessed.\n\n Extra parameters for behaviour not present in httplib.HTTPResponse:\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, attempts to decode specific content-encoding's based on headers\n (like 'gzip' and 'deflate') will be skipped and raw data will be used\n instead.\n\n :param original_response:\n When this HTTPResponse wrapper is generated from an httplib.HTTPResponse\n object, it's convenient to include the original for debug purposes. It's\n otherwise unused.\n \"\"\"\n\n CONTENT_DECODERS = ['gzip', 'deflate']\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n def __init__(self, body='', headers=None, status=0, version=0, reason=None,\n strict=0, preload_content=True, decode_content=True,\n original_response=None, pool=None, connection=None):\n self.headers = headers or {}\n self.status = status\n self.version = version\n self.reason = reason\n self.strict = strict\n self.decode_content = decode_content\n\n self._decoder = None\n self._body = body if body and isinstance(body, basestring) else None\n self._fp = None\n self._original_response = original_response\n self._fp_bytes_read = 0\n\n self._pool = pool\n self._connection = connection\n\n if hasattr(body, 'read'):\n self._fp = body\n\n if preload_content and not self._body:\n self._body = self.read(decode_content=decode_content)\n\n def get_redirect_location(self):\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get('location')\n\n return False\n\n def release_conn(self):\n if not self._pool or not self._connection:\n return\n\n self._pool._put_conn(self._connection)\n self._connection = None\n\n @property\n def data(self):\n # For backwords-compat with earlier urllib3 0.4 and earlier.\n if self._body:\n return self._body\n\n if self._fp:\n return self.read(cache_content=True)\n\n def tell(self):\n \"\"\"\n Obtain the number of bytes pulled over the wire so far. May differ from\n the amount of content returned by :meth:``HTTPResponse.read`` if bytes\n are encoded on the wire (e.g, compressed).\n \"\"\"\n return self._fp_bytes_read\n\n def read(self, amt=None, decode_content=None, cache_content=False):\n \"\"\"\n Similar to :meth:`httplib.HTTPResponse.read`, but with two additional\n parameters: ``decode_content`` and ``cache_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param cache_content:\n If True, will save the returned data such that the same result is\n returned despite of the state of the underlying file object. This\n is useful if you want the ``.data`` property to continue working\n after having ``.read()`` the file object. (Overridden if ``amt`` is\n set.)\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 2616\n # Section 3.5\n content_encoding = self.headers.get('content-encoding', '').lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n if decode_content is None:\n decode_content = self.decode_content\n\n if self._fp is None:\n return\n\n flush_decoder = False\n\n try:\n if amt is None:\n # cStringIO doesn't like amt=None\n data = self._fp.read()\n flush_decoder = True\n else:\n cache_content = False\n data = self._fp.read(amt)\n if amt != 0 and not data: # Platform-specific: Buggy versions of Python.\n # Close the connection when no data is returned\n #\n # This is redundant to what httplib/http.client _should_\n # already do. However, versions of python released before\n # December 15, 2012 (http://bugs.python.org/issue16298) do not\n # properly close the connection in all cases. There is no harm\n # in redundantly calling close.\n self._fp.close()\n flush_decoder = True\n\n self._fp_bytes_read += len(data)\n\n try:\n if decode_content and self._decoder:\n data = self._decoder.decompress(data)\n except (IOError, zlib.error) as e:\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e)\n\n if flush_decoder and decode_content and self._decoder:\n buf = self._decoder.decompress(binary_type())\n data += buf + self._decoder.flush()\n\n if cache_content:\n self._body = data\n\n return data\n\n finally:\n if self._original_response and self._original_response.isclosed():\n self.release_conn()\n\n def stream(self, amt=2**16, decode_content=None):\n \"\"\"\n A generator wrapper for the read() method. A call will block until\n ``amt`` bytes have been read from the connection or until the\n connection is closed.\n\n :param amt:\n How much of the content to read. The generator will return up to\n much data per iteration, but may return less. This is particularly\n likely when using compressed data. However, the empty string will\n never be returned.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n while not is_fp_closed(self._fp):\n data = self.read(amt=amt, decode_content=decode_content)\n\n if data:\n yield data\n\n\n @classmethod\n def from_httplib(ResponseCls, r, **response_kw):\n \"\"\"\n Given an :class:`httplib.HTTPResponse` instance ``r``, return a\n corresponding :class:`urllib3.response.HTTPResponse` object.\n\n Remaining parameters are passed to the HTTPResponse constructor, along\n with ``original_response=r``.\n \"\"\"\n\n # Normalize headers between different versions of Python\n headers = {}\n for k, v in r.getheaders():\n # Python 3: Header keys are returned capitalised\n k = k.lower()\n\n has_value = headers.get(k)\n if has_value: # Python 3: Repeating header keys are unmerged.\n v = ', '.join([has_value, v])\n\n headers[k] = v\n\n # HTTPResponse objects in Python 3 don't have a .strict attribute\n strict = getattr(r, 'strict', 0)\n return ResponseCls(body=r,\n headers=headers,\n status=r.status,\n version=r.version,\n reason=r.reason,\n strict=strict,\n original_response=r,\n **response_kw)\n\n # Backwards-compatibility methods for httplib.HTTPResponse\n def getheaders(self):\n return self.headers\n\n def getheader(self, name, default=None):\n return self.headers.get(name, default)\n\n # Overrides from io.IOBase\n def close(self):\n if not self.closed:\n self._fp.close()\n\n @property\n def closed(self):\n if self._fp is None:\n return True\n elif hasattr(self._fp, 'closed'):\n return self._fp.closed\n elif hasattr(self._fp, 'isclosed'): # Python 2\n return self._fp.isclosed()\n else:\n return True\n\n def fileno(self):\n if self._fp is None:\n raise IOError(\"HTTPResponse has no file to get a fileno from\")\n elif hasattr(self._fp, \"fileno\"):\n return self._fp.fileno()\n else:\n raise IOError(\"The file-like object this HTTPResponse is wrapped \"\n \"around has no file descriptor\")\n\n def flush(self):\n if self._fp is not None and hasattr(self._fp, 'flush'):\n return self._fp.flush()\n\n def readable(self):\n return True\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476169,"cells":{"repo_name":{"kind":"string","value":"jhoos/django"},"path":{"kind":"string","value":"tests/unmanaged_models/tests.py"},"copies":{"kind":"string","value":"296"},"size":{"kind":"string","value":"2174"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom django.db import connection\nfrom django.test import TestCase\n\nfrom .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2\n\n\nclass SimpleTests(TestCase):\n\n def test_simple(self):\n \"\"\"\n The main test here is that the all the models can be created without\n any database errors. We can also do some more simple insertion and\n lookup tests whilst we're here to show that the second of models do\n refer to the tables from the first set.\n \"\"\"\n # Insert some data into one set of models.\n a = A01.objects.create(f_a=\"foo\", f_b=42)\n B01.objects.create(fk_a=a, f_a=\"fred\", f_b=1729)\n c = C01.objects.create(f_a=\"barney\", f_b=1)\n c.mm_a = [a]\n\n # ... and pull it out via the other set.\n a2 = A02.objects.all()[0]\n self.assertIsInstance(a2, A02)\n self.assertEqual(a2.f_a, \"foo\")\n\n b2 = B02.objects.all()[0]\n self.assertIsInstance(b2, B02)\n self.assertEqual(b2.f_a, \"fred\")\n\n self.assertIsInstance(b2.fk_a, A02)\n self.assertEqual(b2.fk_a.f_a, \"foo\")\n\n self.assertEqual(list(C02.objects.filter(f_a=None)), [])\n\n resp = list(C02.objects.filter(mm_a=a.id))\n self.assertEqual(len(resp), 1)\n\n self.assertIsInstance(resp[0], C02)\n self.assertEqual(resp[0].f_a, 'barney')\n\n\nclass ManyToManyUnmanagedTests(TestCase):\n\n def test_many_to_many_between_unmanaged(self):\n \"\"\"\n The intermediary table between two unmanaged models should not be created.\n \"\"\"\n table = Unmanaged2._meta.get_field('mm').m2m_db_table()\n tables = connection.introspection.table_names()\n self.assertNotIn(table, tables, \"Table '%s' should not exist, but it does.\" % table)\n\n def test_many_to_many_between_unmanaged_and_managed(self):\n \"\"\"\n An intermediary table between a managed and an unmanaged model should be created.\n \"\"\"\n table = Managed1._meta.get_field('mm').m2m_db_table()\n tables = connection.introspection.table_names()\n self.assertIn(table, tables, \"Table '%s' does not exist.\" % table)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476170,"cells":{"repo_name":{"kind":"string","value":"UASLab/ImageAnalysis"},"path":{"kind":"string","value":"video/import_apt.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1267"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport argparse\nimport gzip\n\nargparser = argparse.ArgumentParser(description='import apt.dat.gz from FlightGear')\nargparser.add_argument('--file', help='fgfs apt.dat.gz file')\nargs = argparser.parse_args()\n\nft2m = 0.3048\n\nident = ''\nalt = ''\ncount = 0\nlat_sum = 0\nlon_sum = 0\n\nprint 'Ident,Lat,Lon,Alt'\nwith gzip.open(args.file, 'rb') as f:\n for line in f:\n tokens = line.split()\n #print tokens\n if len(tokens) and tokens[0] == '1':\n # start of apt record\n if count > 0:\n # output last record\n print '%s,%.8f,%.8f,%.0f' % (ident, lat_sum / count,\n lon_sum / count, alt)\n ident = tokens[4]\n alt = float(tokens[1]) * ft2m\n count = 0\n lat_sum = 0\n lon_sum = 0\n elif len(tokens) and tokens[0] == '100':\n # basic data\n lat_sum += float(tokens[9])\n lon_sum += float(tokens[10])\n lat_sum += float(tokens[18])\n lon_sum += float(tokens[19])\n count += 2\nif count > 0:\n # output last record\n print '%s,%.8f,%.8f,%.0f' % (ident, lat_sum / count,\n lon_sum / count, alt)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476171,"cells":{"repo_name":{"kind":"string","value":"viz-dev/viz"},"path":{"kind":"string","value":"qa/rpc-tests/mempool_reorg.py"},"copies":{"kind":"string","value":"41"},"size":{"kind":"string","value":"4514"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Test re-org scenarios with a mempool that contains transactions\n# that spend (directly or indirectly) coinbase transactions.\n#\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\n\n# Create one-input, one-output, no-fee transaction:\nclass MempoolCoinbaseTest(BitcoinTestFramework):\n def __init__(self):\n super().__init__()\n self.num_nodes = 2\n self.setup_clean_chain = False\n\n alert_filename = None # Set by setup_network\n\n def setup_network(self):\n args = [\"-checkmempool\", \"-debug=mempool\"]\n self.nodes = []\n self.nodes.append(start_node(0, self.options.tmpdir, args))\n self.nodes.append(start_node(1, self.options.tmpdir, args))\n connect_nodes(self.nodes[1], 0)\n self.is_network_split = False\n self.sync_all()\n\n def run_test(self):\n start_count = self.nodes[0].getblockcount()\n\n # Mine three blocks. After this, nodes[0] blocks\n # 101, 102, and 103 are spend-able.\n new_blocks = self.nodes[1].generate(4)\n self.sync_all()\n\n node0_address = self.nodes[0].getnewaddress()\n node1_address = self.nodes[1].getnewaddress()\n\n # Three scenarios for re-orging coinbase spends in the memory pool:\n # 1. Direct coinbase spend : spend_101\n # 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1\n # 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1\n # Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),\n # and make sure the mempool code behaves correctly.\n b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]\n coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]\n spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)\n spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)\n spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)\n\n # Create a block-height-locked transaction which will be invalid after reorg\n timelock_tx = self.nodes[0].createrawtransaction([{\"txid\": coinbase_txids[0], \"vout\": 0}], {node0_address: 49.99})\n # Set the time lock\n timelock_tx = timelock_tx.replace(\"ffffffff\", \"11111191\", 1)\n timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + \"000000\"\n timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)[\"hex\"]\n assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)\n\n # Broadcast and mine spend_102 and 103:\n spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)\n spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)\n self.nodes[0].generate(1)\n assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)\n\n # Create 102_1 and 103_1:\n spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)\n spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)\n\n # Broadcast and mine 103_1:\n spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)\n last_block = self.nodes[0].generate(1)\n timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)\n\n # ... now put spend_101 and spend_102_1 in memory pools:\n spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)\n spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)\n\n self.sync_all()\n\n assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})\n\n for node in self.nodes:\n node.invalidateblock(last_block[0])\n assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})\n\n # Use invalidateblock to re-org back and make all those coinbase spends\n # immature/invalid:\n for node in self.nodes:\n node.invalidateblock(new_blocks[0])\n\n self.sync_all()\n\n # mempool should be empty.\n assert_equal(set(self.nodes[0].getrawmempool()), set())\n\nif __name__ == '__main__':\n MempoolCoinbaseTest().main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476172,"cells":{"repo_name":{"kind":"string","value":"gurneyalex/odoo"},"path":{"kind":"string","value":"addons/event_sale/__manifest__.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"1311"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n{\n 'name': 'Events Sales',\n 'version': '1.1',\n 'category': 'Marketing/Events',\n 'website': 'https://www.odoo.com/page/events',\n 'description': \"\"\"\nCreating registration with sales orders.\n========================================\n\nThis module allows you to automate and connect your registration creation with\nyour main sale flow and therefore, to enable the invoicing feature of registrations.\n\nIt defines a new kind of service products that offers you the possibility to\nchoose an event category associated with it. When you encode a sales order for\nthat product, you will be able to choose an existing event of that category and\nwhen you confirm your sales order it will automatically create a registration for\nthis event.\n\"\"\",\n 'depends': ['event', 'sale_management'],\n 'data': [\n 'views/assets.xml',\n 'views/event_views.xml',\n 'views/product_views.xml',\n 'views/sale_order_views.xml',\n 'data/event_sale_data.xml',\n 'report/event_event_templates.xml',\n 'security/ir.model.access.csv',\n 'security/event_security.xml',\n 'wizard/event_edit_registration.xml',\n 'wizard/event_configurator_views.xml',\n ],\n 'demo': ['data/event_demo.xml'],\n 'installable': True,\n 'auto_install': True\n}\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":476173,"cells":{"repo_name":{"kind":"string","value":"OpenTrons/opentrons_sdk"},"path":{"kind":"string","value":"api/src/opentrons/api/calibration.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11815"},"content":{"kind":"string","value":"import functools\nimport logging\nfrom copy import copy\nfrom typing import Optional\n\nfrom opentrons.util import calibration_functions\nfrom opentrons.config import feature_flags as ff\nfrom opentrons.broker import Broker\nfrom opentrons.types import Point, Mount, Location\nfrom opentrons.protocol_api import labware\nfrom opentrons.hardware_control import CriticalPoint, ThreadedAsyncLock\n\nfrom .models import Container\nfrom .util import robot_is_busy, RobotBusy\n\n\nlog = logging.getLogger(__name__)\n\nVALID_STATES = {'probing', 'moving', 'ready'}\n\n\n# This hack is because if you have an old container that uses Placeable with\n# just one well, Placeable.wells() returns the Well rather than [Well].\n# Passing the well as an argument, though, will always return the well.\ndef _well0(cont):\n if isinstance(cont, labware.Labware):\n return cont.wells()[0]\n else:\n return cont.wells(0)\n\n\ndef _home_if_first_call(func):\n \"\"\" Decorator to make a function home if it is the first one called in\n this session.\"\"\"\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n self = args[0]\n if not self._has_homed:\n log.info(\"this is the first calibration action, homing\")\n self._hardware.home()\n self._has_homed = True\n return func(*args, **kwargs)\n return decorated\n\n\nclass CalibrationManager(RobotBusy):\n \"\"\"\n Serves endpoints that are primarily used in\n opentrons/app/ui/robot/api-client/client.js\n \"\"\"\n TOPIC = 'calibration'\n\n def __init__(self, hardware, loop=None, broker=None, lock=None):\n self._broker = broker or Broker()\n self._hardware = hardware\n self._loop = loop\n self.state = None\n self._lock = lock\n self._has_homed = False\n\n @property\n def busy_lock(self) -> ThreadedAsyncLock:\n return self._lock\n\n def _set_state(self, state):\n if state not in VALID_STATES:\n raise ValueError(\n 'State {0} not in {1}'.format(state, VALID_STATES))\n self.state = state\n self._on_state_changed()\n\n @robot_is_busy\n @_home_if_first_call\n def tip_probe(self, instrument):\n inst = instrument._instrument\n log.info('Probing tip with {}'.format(instrument.name))\n self._set_state('probing')\n\n if instrument._context:\n instrument._context.location_cache = None\n mount = Mount[instrument._instrument.mount.upper()]\n assert instrument.tip_racks,\\\n 'No known tipracks for {}'.format(instrument)\n tip_length = inst._tip_length_for(\n instrument.tip_racks[0]._container)\n # TODO (tm, 2019-04-22): This warns \"coroutine not awaited\" in\n # TODO: test. The test fixture probably needs to be modified to get\n # TODO: a synchronous adapter instead of a raw hardware_control API\n # finally:\n measured_center = self._hardware.locate_tip_probe_center(\n mount, tip_length)\n else:\n measured_center = calibration_functions.probe_instrument(\n instrument=inst,\n robot=inst.robot)\n\n log.info('Measured probe top center: {0}'.format(measured_center))\n\n if instrument._context:\n self._hardware.update_instrument_offset(\n Mount[instrument._instrument.mount.upper()],\n from_tip_probe=measured_center)\n config = self._hardware.config\n else:\n config = calibration_functions.update_instrument_config(\n instrument=inst,\n measured_center=measured_center)\n\n log.info('New config: {0}'.format(config))\n\n self._move_to_front(instrument)\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def pick_up_tip(self, instrument, container):\n if not isinstance(container, Container):\n raise ValueError(\n 'Invalid object type {0}. Expected models.Container'\n .format(type(container)))\n\n inst = instrument._instrument\n log.info('Picking up tip from {} in {} with {}'.format(\n container.name, container.slot, instrument.name))\n self._set_state('moving')\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n loc = _well0(container._container)\n instrument._context.location_cache =\\\n Location(self._hardware.gantry_position(\n Mount[inst.mount.upper()],\n critical_point=CriticalPoint.NOZZLE,\n refresh=True),\n loc)\n loc_leg = _well0(container._container)\n inst.pick_up_tip(loc_leg)\n else:\n inst.pick_up_tip(_well0(container._container))\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def drop_tip(self, instrument, container):\n if not isinstance(container, Container):\n raise ValueError(\n 'Invalid object type {0}. Expected models.Container'\n .format(type(container)))\n\n inst = instrument._instrument\n log.info('Dropping tip from {} in {} with {}'.format(\n container.name, container.slot, instrument.name))\n self._set_state('moving')\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n instrument._context.location_cache = None\n inst.drop_tip(_well0(container._container))\n else:\n inst.drop_tip(_well0(container._container))\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def return_tip(self, instrument):\n inst = instrument._instrument\n log.info('Returning tip from {}'.format(instrument.name))\n self._set_state('moving')\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n instrument._context.location_cache = None\n inst.return_tip()\n else:\n inst.return_tip()\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def move_to_front(self, instrument):\n \"\"\"Public face of move_to_front\"\"\"\n self._move_to_front(instrument)\n\n def _move_to_front(self, instrument):\n \"\"\"Private move_to_front that can be called internally\"\"\"\n inst = instrument._instrument\n log.info('Moving {}'.format(instrument.name))\n self._set_state('moving')\n if instrument._context:\n current = self._hardware.gantry_position(\n Mount[inst.mount.upper()],\n critical_point=CriticalPoint.NOZZLE,\n refresh=True)\n dest = instrument._context.deck.position_for(5) \\\n .point._replace(z=150)\n self._hardware.move_to(Mount[inst.mount.upper()],\n current,\n critical_point=CriticalPoint.NOZZLE)\n self._hardware.move_to(Mount[inst.mount.upper()],\n dest._replace(z=current.z),\n critical_point=CriticalPoint.NOZZLE)\n self._hardware.move_to(Mount[inst.mount.upper()],\n dest, critical_point=CriticalPoint.NOZZLE)\n else:\n calibration_functions.move_instrument_for_probing_prep(\n inst, inst.robot)\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def move_to(self, instrument, container):\n if not isinstance(container, Container):\n raise ValueError(\n 'Invalid object type {0}. Expected models.Container'\n .format(type(container)))\n\n inst = instrument._instrument\n cont = container._container\n target = _well0(cont).top()\n\n log.info('Moving {} to {} in {}'.format(\n instrument.name, container.name, container.slot))\n self._set_state('moving')\n\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n instrument._context.location_cache = None\n inst.move_to(target)\n else:\n inst.move_to(target)\n\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def jog(self, instrument, distance, axis):\n inst = instrument._instrument\n log.info('Jogging {} by {} in {}'.format(\n instrument.name, distance, axis))\n self._set_state('moving')\n if instrument._context:\n self._hardware.move_rel(\n Mount[inst.mount.upper()], Point(**{axis: distance}))\n else:\n calibration_functions.jog_instrument(\n instrument=inst,\n distance=distance,\n axis=axis,\n robot=inst.robot)\n self._set_state('ready')\n\n @robot_is_busy\n @_home_if_first_call\n def home(self, instrument):\n inst = instrument._instrument\n log.info('Homing {}'.format(instrument.name))\n self._set_state('moving')\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n instrument._context.location_cache = None\n inst.home()\n else:\n inst.home()\n self._set_state('ready')\n\n @robot_is_busy\n def home_all(self, instrument):\n # NOTE: this only takes instrument as a param, because we need\n # its reference to the ProtocolContext. This is code smell that\n # will be removed once sessions are managed better\n log.info('Homing via Calibration Manager')\n self._set_state('moving')\n if instrument._context:\n with instrument._context.temp_connect(self._hardware):\n instrument._context.home()\n else:\n self._hardware.home()\n self._set_state('ready')\n\n @robot_is_busy\n def update_container_offset(self, container, instrument):\n inst = instrument._instrument\n log.info('Updating {} in {}'.format(container.name, container.slot))\n if instrument._context:\n if 'centerMultichannelOnWells' in container._container.quirks:\n cp: Optional[CriticalPoint] = CriticalPoint.XY_CENTER\n else:\n cp = None\n here = self._hardware.gantry_position(Mount[inst.mount.upper()],\n critical_point=cp,\n refresh=True)\n # Reset calibration so we don’t actually calibrate the offset\n # relative to the old calibration\n container._container.set_calibration(Point(0, 0, 0))\n if ff.calibrate_to_bottom() and not (\n container._container.is_tiprack):\n orig = _well0(container._container)._bottom().point\n else:\n orig = _well0(container._container)._top().point\n delta = here - orig\n labware.save_calibration(container._container, delta)\n else:\n inst.robot.calibrate_container_with_instrument(\n container=container._container,\n instrument=inst,\n save=True\n )\n\n def _snapshot(self):\n return {\n 'topic': CalibrationManager.TOPIC,\n 'name': 'state',\n 'payload': copy(self)\n }\n\n def _on_state_changed(self):\n self._hardware._use_safest_height = (self.state in\n ['probing', 'moving'])\n self._broker.publish(CalibrationManager.TOPIC, self._snapshot())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476174,"cells":{"repo_name":{"kind":"string","value":"NeuralEnsemble/python-neo"},"path":{"kind":"string","value":"neo/core/baseneo.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"14038"},"content":{"kind":"string","value":"\"\"\"\nThis module defines :class:`BaseNeo`, the abstract base class\nused by all :module:`neo.core` classes.\n\"\"\"\n\nfrom copy import deepcopy\nfrom datetime import datetime, date, time, timedelta\nfrom decimal import Decimal\nimport logging\nfrom numbers import Number\n\nimport numpy as np\n\nALLOWED_ANNOTATION_TYPES = (int, float, complex,\n str, bytes,\n type(None),\n datetime, date, time, timedelta,\n Number, Decimal,\n np.number, np.bool_)\n\nlogger = logging.getLogger(\"Neo\")\n\n\nclass MergeError(Exception):\n pass\n\n\ndef _check_annotations(value):\n \"\"\"\n Recursively check that value is either of a \"simple\" type (number, string,\n date/time) or is a (possibly nested) dict, list or numpy array containing\n only simple types.\n \"\"\"\n if isinstance(value, np.ndarray):\n if not issubclass(value.dtype.type, ALLOWED_ANNOTATION_TYPES):\n raise ValueError(\"Invalid annotation. NumPy arrays with dtype %s\"\n \"are not allowed\" % value.dtype.type)\n elif isinstance(value, dict):\n for element in value.values():\n _check_annotations(element)\n elif isinstance(value, (list, tuple)):\n for element in value:\n _check_annotations(element)\n elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):\n raise ValueError(\"Invalid annotation. Annotations of type %s are not\"\n \"allowed\" % type(value))\n\n\ndef merge_annotation(a, b):\n \"\"\"\n First attempt at a policy for merging annotations (intended for use with\n parallel computations using MPI). This policy needs to be discussed\n further, or we could allow the user to specify a policy.\n\n Current policy:\n For arrays or lists: concatenate\n For dicts: merge recursively\n For strings: concatenate with ';'\n Otherwise: fail if the annotations are not equal\n \"\"\"\n assert type(a) == type(b), 'type({}) {} != type({}) {}'.format(a, type(a),\n b, type(b))\n if isinstance(a, dict):\n return merge_annotations(a, b)\n elif isinstance(a, np.ndarray): # concatenate b to a\n return np.append(a, b)\n elif isinstance(a, list): # concatenate b to a\n return a + b\n elif isinstance(a, str):\n if a == b:\n return a\n else:\n return a + \";\" + b\n else:\n assert a == b, '{} != {}'.format(a, b)\n return a\n\n\ndef merge_annotations(A, *Bs):\n \"\"\"\n Merge two sets of annotations.\n\n Merging follows these rules:\n All keys that are in A or B, but not both, are kept.\n For keys that are present in both:\n For arrays or lists: concatenate\n For dicts: merge recursively\n For strings: concatenate with ';'\n Otherwise: warn if the annotations are not equal\n \"\"\"\n merged = A.copy()\n for B in Bs:\n for name in B:\n if name not in merged:\n merged[name] = B[name]\n else:\n try:\n merged[name] = merge_annotation(merged[name], B[name])\n except BaseException as exc:\n # exc.args += ('key %s' % name,)\n # raise\n merged[name] = \"MERGE CONFLICT\" # temporary hack\n logger.debug(\"Merging annotations: A=%s Bs=%s merged=%s\", A, Bs, merged)\n return merged\n\n\ndef intersect_annotations(A, B):\n \"\"\"\n Identify common entries in dictionaries A and B\n and return these in a separate dictionary.\n\n Entries have to share key as well as value to be\n considered common.\n\n Parameters\n ----------\n A, B : dict\n Dictionaries to merge.\n \"\"\"\n\n result = {}\n\n for key in set(A.keys()) & set(B.keys()):\n v1, v2 = A[key], B[key]\n assert type(v1) == type(v2), 'type({}) {} != type({}) {}'.format(v1, type(v1),\n v2, type(v2))\n if isinstance(v1, dict) and v1 == v2:\n result[key] = deepcopy(v1)\n elif isinstance(v1, str) and v1 == v2:\n result[key] = A[key]\n elif isinstance(v1, list) and v1 == v2:\n result[key] = deepcopy(v1)\n elif isinstance(v1, np.ndarray) and all(v1 == v2):\n result[key] = deepcopy(v1)\n return result\n\n\ndef _reference_name(class_name):\n \"\"\"\n Given the name of a class, return an attribute name to be used for\n references to instances of that class.\n\n For example, a Segment object has a parent Block object, referenced by\n `segment.block`. The attribute name `block` is obtained by calling\n `_container_name(\"Block\")`.\n \"\"\"\n return class_name.lower()\n\n\ndef _container_name(class_name):\n \"\"\"\n Given the name of a class, return an attribute name to be used for\n lists (or other containers) containing instances of that class.\n\n For example, a Block object contains a list of Segment objects,\n referenced by `block.segments`. The attribute name `segments` is\n obtained by calling `_container_name_plural(\"Segment\")`.\n \"\"\"\n return _reference_name(class_name) + 's'\n\n\nclass BaseNeo:\n \"\"\"\n This is the base class from which all Neo objects inherit.\n\n This class implements support for universally recommended arguments,\n and also sets up the :attr:`annotations` dict for additional arguments.\n\n Each class can define one or more of the following class attributes:\n :_parent_objects: Neo objects that can be parents of this\n object. Note that no Neo object can have\n more than one parent.\n An instance attribute named\n class.__name__.lower() will be automatically\n defined to hold this parent and will be\n initialized to None.\n :_necessary_attrs: A list of tuples containing the attributes that the\n class must have. The tuple can have 2-4 elements.\n The first element is the attribute name.\n The second element is the attribute type.\n The third element is the number of dimensions\n (only for numpy arrays and quantities).\n The fourth element is the dtype of array\n (only for numpy arrays and quantities).\n This does NOT include the attributes holding the\n parents or children of the object.\n :_recommended_attrs: A list of tuples containing the attributes that\n the class may optionally have. It uses the same\n structure as :_necessary_attrs:\n :_repr_pretty_attrs_keys_: The names of attributes printed when\n pretty-printing using iPython.\n\n The following helper properties are available:\n :_parent_containers: The names of the container attributes used\n to store :_parent_objects:\n :parents: All objects that are parents of the current object.\n :_all_attrs: All required and optional attributes.\n :_necessary_attrs: + :_recommended_attrs:\n\n The following \"universal\" methods are available:\n :__init__: Grabs the universally recommended arguments :attr:`name`,\n :attr:`file_origin`, and :attr:`description` and stores them as\n attributes.\n\n Also takes every additional argument (that is, every argument\n that is not handled by :class:`BaseNeo` or the child class), and\n puts in the dict :attr:`annotations`.\n\n :annotate(**args): Updates :attr:`annotations` with keyword/value\n pairs.\n\n :merge(**args): Merge the contents of another object into this one.\n The merge method implemented here only merges\n annotations (see :merge_annotations:).\n Subclasses should implementt their own merge rules.\n\n :merge_annotations(**args): Merge the :attr:`annotations` of another\n object into this one.\n\n Each child class should:\n 0) describe its parents (if any) and attributes in the relevant\n class attributes. :_recommended_attrs: should append\n BaseNeo._recommended_attrs to the end.\n 1) call BaseNeo.__init__(self, name=name, description=description,\n file_origin=file_origin, **annotations)\n with the universal recommended arguments, plus optional annotations\n 2) process its required arguments in its __new__ or __init__ method\n 3) process its non-universal recommended arguments (in its __new__ or\n __init__ method\n\n Non-keyword arguments should only be used for required arguments.\n\n The required and recommended arguments for each child class (Neo object)\n are specified in the _necessary_attrs and _recommended_attrs attributes and\n documentation for the child object.\n \"\"\"\n\n # these attributes control relationships, they need to be\n # specified in each child class\n # Parent objects whose children can have a single parent\n _parent_objects = ()\n # Attribute names corresponding to _parent_objects\n _parent_attrs = ()\n\n # Attributes that an instance is required to have defined\n _necessary_attrs = ()\n # Attributes that an instance may or may have defined\n _recommended_attrs = (('name', str),\n ('description', str),\n ('file_origin', str))\n # Attributes that are used for pretty-printing\n _repr_pretty_attrs_keys_ = (\"name\", \"description\", \"annotations\")\n\n def __init__(self, name=None, description=None, file_origin=None,\n **annotations):\n \"\"\"\n This is the base constructor for all Neo objects.\n\n Stores universally recommended attributes and creates\n :attr:`annotations` from additional arguments not processed by\n :class:`BaseNeo` or the child class.\n \"\"\"\n # create `annotations` for additional arguments\n _check_annotations(annotations)\n self.annotations = annotations\n\n # these attributes are recommended for all objects.\n self.name = name\n self.description = description\n self.file_origin = file_origin\n\n # initialize parent containers\n for parent in self._parent_containers:\n setattr(self, parent, None)\n\n def annotate(self, **annotations):\n \"\"\"\n Add annotations (non-standardized metadata) to a Neo object.\n\n Example:\n\n >>> obj.annotate(key1=value0, key2=value1)\n >>> obj.key2\n value2\n \"\"\"\n _check_annotations(annotations)\n self.annotations.update(annotations)\n\n def _has_repr_pretty_attrs_(self):\n return any(getattr(self, k) for k in self._repr_pretty_attrs_keys_)\n\n def _repr_pretty_attrs_(self, pp, cycle):\n first = True\n for key in self._repr_pretty_attrs_keys_:\n value = getattr(self, key)\n if value:\n if first:\n first = False\n else:\n pp.breakable()\n with pp.group(indent=1):\n pp.text(\"{}: \".format(key))\n pp.pretty(value)\n\n def _repr_pretty_(self, pp, cycle):\n \"\"\"\n Handle pretty-printing the :class:`BaseNeo`.\n \"\"\"\n pp.text(self.__class__.__name__)\n if self._has_repr_pretty_attrs_():\n pp.breakable()\n self._repr_pretty_attrs_(pp, cycle)\n\n @property\n def _parent_containers(self):\n \"\"\"\n Containers for parent objects.\n \"\"\"\n return tuple([_reference_name(parent) for parent in\n self._parent_objects])\n\n @property\n def parents(self):\n \"\"\"\n All parent objects storing the current object.\n \"\"\"\n return tuple([getattr(self, attr) for attr in\n self._parent_containers])\n\n @property\n def _all_attrs(self):\n \"\"\"\n Returns a combination of all required and recommended\n attributes.\n \"\"\"\n return self._necessary_attrs + self._recommended_attrs\n\n def merge_annotations(self, *others):\n \"\"\"\n Merge annotations from the other object into this one.\n\n Merging follows these rules:\n All keys that are in the either object, but not both, are kept.\n For keys that are present in both objects:\n For arrays or lists: concatenate the two arrays\n For dicts: merge recursively\n For strings: concatenate with ';'\n Otherwise: fail if the annotations are not equal\n \"\"\"\n other_annotations = [other.annotations for other in others]\n merged_annotations = merge_annotations(self.annotations,\n *other_annotations)\n self.annotations.update(merged_annotations)\n\n def merge(self, *others):\n \"\"\"\n Merge the contents of another object into this one.\n\n See :meth:`merge_annotations` for details of the merge operation.\n \"\"\"\n self.merge_annotations(*others)\n\n def set_parent(self, obj):\n \"\"\"\n Set the appropriate \"parent\" attribute of this object\n according to the type of \"obj\"\n \"\"\"\n if obj.__class__.__name__ not in self._parent_objects:\n raise TypeError(\"{} can only have parents of type {}, not {}\".format(\n self.__class__.__name__, self._parent_objects, obj.__class__.__name__))\n loc = self._parent_objects.index(obj.__class__.__name__)\n parent_attr = self._parent_attrs[loc]\n setattr(self, parent_attr, obj)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476175,"cells":{"repo_name":{"kind":"string","value":"mattpap/sympy-polys"},"path":{"kind":"string","value":"sympy/concrete/products.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3802"},"content":{"kind":"string","value":"from sympy.core import Expr, S, C, Mul, sympify\nfrom sympy.polys import quo, roots\nfrom sympy.simplify import powsimp\n\nclass Product(Expr):\n \"\"\"Represents unevaluated product.\n\n \"\"\"\n\n def __new__(cls, term, *symbols, **assumptions):\n term = sympify(term)\n\n if term.is_Number:\n if term is S.NaN:\n return S.NaN\n elif term is S.Infinity:\n return S.NaN\n elif term is S.NegativeInfinity:\n return S.NaN\n elif term is S.Zero:\n return S.Zero\n elif term is S.One:\n return S.One\n\n if len(symbols) == 1:\n symbol = symbols[0]\n\n if isinstance(symbol, C.Equality):\n k = symbol.lhs\n a = symbol.rhs.start\n n = symbol.rhs.end\n elif isinstance(symbol, (tuple, list)):\n k, a, n = symbol\n else:\n raise ValueError(\"Invalid arguments\")\n\n k, a, n = map(sympify, (k, a, n))\n\n if isinstance(a, C.Number) and isinstance(n, C.Number):\n return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])\n else:\n raise NotImplementedError\n\n obj = Expr.__new__(cls, **assumptions)\n obj._args = (term, k, a, n)\n\n return obj\n\n @property\n def term(self):\n return self._args[0]\n\n @property\n def index(self):\n return self._args[1]\n\n @property\n def lower(self):\n return self._args[2]\n\n @property\n def upper(self):\n return self._args[3]\n\n def doit(self, **hints):\n term = self.term\n lower = self.lower\n upper = self.upper\n if hints.get('deep', True):\n term = term.doit(**hints)\n lower = lower.doit(**hints)\n upper = upper.doit(**hints)\n\n prod = self._eval_product(lower, upper, term)\n\n if prod is not None:\n return powsimp(prod)\n else:\n return self\n\n def _eval_product(self, a, n, term):\n from sympy import sum, Sum\n k = self.index\n\n if not term.has(k):\n return term**(n-a+1)\n elif term.is_polynomial(k):\n poly = term.as_poly(k)\n\n A = B = Q = S.One\n C_= poly.LC()\n\n all_roots = roots(poly, multiple=True)\n\n for r in all_roots:\n A *= C.RisingFactorial(a-r, n-a+1)\n Q *= n - r\n\n if len(all_roots) < poly.degree():\n B = Product(quo(poly, Q.as_poly(k)), (k, a, n))\n\n return poly.LC()**(n-a+1) * A * B\n elif term.is_Add:\n p, q = term.as_numer_denom()\n\n p = self._eval_product(a, n, p)\n q = self._eval_product(a, n, q)\n\n return p / q\n elif term.is_Mul:\n exclude, include = [], []\n\n for t in term.args:\n p = self._eval_product(a, n, t)\n\n if p is not None:\n exclude.append(p)\n else:\n include.append(t)\n\n if not exclude:\n return None\n else:\n A, B = Mul(*exclude), Mul(*include)\n return A * Product(B, (k, a, n))\n elif term.is_Pow:\n if not term.base.has(k):\n s = sum(term.exp, (k, a, n))\n\n if not isinstance(s, Sum):\n return term.base**s\n elif not term.exp.has(k):\n p = self._eval_product(a, n, term.base)\n\n if p is not None:\n return p**term.exp\n\ndef product(*args, **kwargs):\n prod = Product(*args, **kwargs)\n\n if isinstance(prod, Product):\n return prod.doit(deep=False)\n else:\n return prod\n\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476176,"cells":{"repo_name":{"kind":"string","value":"travisfcollins/gnuradio"},"path":{"kind":"string","value":"gr-wxgui/python/wxgui/waterfall_window.py"},"copies":{"kind":"string","value":"47"},"size":{"kind":"string","value":"10668"},"content":{"kind":"string","value":"#\n# Copyright 2008 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# GNU Radio is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# GNU Radio is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GNU Radio; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1`301, USA.\n#\n\n##################################################\n# Imports\n##################################################\nimport plotter\nimport common\nimport wx\nimport numpy\nimport math\nimport pubsub\nfrom constants import *\nfrom gnuradio import gr #for gr.prefs\nimport forms\n\n##################################################\n# Constants\n##################################################\nSLIDER_STEPS = 100\nAVG_ALPHA_MIN_EXP, AVG_ALPHA_MAX_EXP = -3, 0\nDEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'waterfall_rate', 30)\nDEFAULT_COLOR_MODE = gr.prefs().get_string('wxgui', 'waterfall_color', 'rgb1')\nDEFAULT_WIN_SIZE = (600, 300)\nDIV_LEVELS = (1, 2, 5, 10, 20)\nMIN_DYNAMIC_RANGE, MAX_DYNAMIC_RANGE = 10, 200\nDYNAMIC_RANGE_STEP = 10.\nCOLOR_MODES = (\n\t('RGB1', 'rgb1'),\n\t('RGB2', 'rgb2'),\n\t('RGB3', 'rgb3'),\n\t('Gray', 'gray'),\n)\n\n##################################################\n# Waterfall window control panel\n##################################################\nclass control_panel(wx.Panel):\n\t\"\"\"\n\tA control panel with wx widgits to control the plotter and fft block chain.\n\t\"\"\"\n\n\tdef __init__(self, parent):\n\t\t\"\"\"\n\t\tCreate a new control panel.\n\n\t\tArgs:\n\t\t parent: the wx parent window\n\t\t\"\"\"\n\t\tself.parent = parent\n\t\twx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)\n\t\tparent[SHOW_CONTROL_PANEL_KEY] = True\n\t\tparent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)\n\t\tcontrol_box = wx.BoxSizer(wx.VERTICAL)\n\t\tcontrol_box.AddStretchSpacer()\n\t\toptions_box = forms.static_box_sizer(\n\t\t\tparent=self, sizer=control_box, label='Options',\n\t\t\tbold=True, orient=wx.VERTICAL,\n\t\t)\n\t\t#average\n\t\tforms.check_box(\n\t\t\tsizer=options_box, parent=self, label='Average',\n\t\t\tps=parent, key=AVERAGE_KEY,\n\t\t)\n\t\tavg_alpha_text = forms.static_text(\n\t\t\tsizer=options_box, parent=self, label='Avg Alpha',\n\t\t\tconverter=forms.float_converter(lambda x: '%.4f'%x),\n\t\t\tps=parent, key=AVG_ALPHA_KEY, width=50,\n\t\t)\n\t\tavg_alpha_slider = forms.log_slider(\n\t\t\tsizer=options_box, parent=self,\n\t\t\tmin_exp=AVG_ALPHA_MIN_EXP,\n\t\t\tmax_exp=AVG_ALPHA_MAX_EXP,\n\t\t\tnum_steps=SLIDER_STEPS,\n\t\t\tps=parent, key=AVG_ALPHA_KEY,\n\t\t)\n\t\tfor widget in (avg_alpha_text, avg_alpha_slider):\n\t\t\tparent.subscribe(AVERAGE_KEY, widget.Enable)\n\t\t\twidget.Enable(parent[AVERAGE_KEY])\n\t\t#begin axes box\n\t\tcontrol_box.AddStretchSpacer()\n\t\taxes_box = forms.static_box_sizer(\n\t\t\tparent=self, sizer=control_box, label='Axes Options',\n\t\t\tbold=True, orient=wx.VERTICAL,\n\t\t)\n\t\t#num lines buttons\n\t\tforms.incr_decr_buttons(\n\t\t\tparent=self, sizer=axes_box, label='Time Scale',\n\t\t\ton_incr=self._on_incr_time_scale, on_decr=self._on_decr_time_scale,\n\t\t)\n\t\t#dyanmic range buttons\n\t\tforms.incr_decr_buttons(\n\t\t\tparent=self, sizer=axes_box, label='Dyn Range',\n\t\t\ton_incr=self._on_incr_dynamic_range, on_decr=self._on_decr_dynamic_range,\n\t\t)\n\t\t#ref lvl buttons\n\t\tforms.incr_decr_buttons(\n\t\t\tparent=self, sizer=axes_box, label='Ref Level',\n\t\t\ton_incr=self._on_incr_ref_level, on_decr=self._on_decr_ref_level,\n\t\t)\n\t\t#color mode\n\t\tforms.drop_down(\n\t\t\tparent=self, sizer=axes_box, width=100,\n\t\t\tps=parent, key=COLOR_MODE_KEY, label='Color',\n\t\t\tchoices=map(lambda x: x[1], COLOR_MODES),\n\t\t\tlabels=map(lambda x: x[0], COLOR_MODES),\n\t\t)\n\t\t#autoscale\n\t\tforms.single_button(\n\t\t\tparent=self, sizer=axes_box, label='Autoscale',\n\t\t\tcallback=self.parent.autoscale,\n\t\t)\n\t\t#clear\n\t\tcontrol_box.AddStretchSpacer()\n\t\tforms.single_button(\n\t\t\tparent=self, sizer=control_box, label='Clear',\n\t\t\tcallback=self._on_clear_button,\n\t\t)\n\t\t#run/stop\n\t\tforms.toggle_button(\n\t\t\tsizer=control_box, parent=self,\n\t\t\ttrue_label='Stop', false_label='Run',\n\t\t\tps=parent, key=RUNNING_KEY,\n\t\t)\n\t\t#set sizer\n\t\tself.SetSizerAndFit(control_box)\n\n\t##################################################\n\t# Event handlers\n\t##################################################\n\tdef _on_clear_button(self, event):\n\t\tself.parent[NUM_LINES_KEY] = self.parent[NUM_LINES_KEY]\n\tdef _on_incr_dynamic_range(self, event):\n\t\tself.parent[DYNAMIC_RANGE_KEY] = min(MAX_DYNAMIC_RANGE, common.get_clean_incr(self.parent[DYNAMIC_RANGE_KEY]))\n\tdef _on_decr_dynamic_range(self, event):\n\t\tself.parent[DYNAMIC_RANGE_KEY] = max(MIN_DYNAMIC_RANGE, common.get_clean_decr(self.parent[DYNAMIC_RANGE_KEY]))\n\tdef _on_incr_ref_level(self, event):\n\t\tself.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] + self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP\n\tdef _on_decr_ref_level(self, event):\n\t\tself.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] - self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP\n\tdef _on_incr_time_scale(self, event):\n\t\told_rate = self.parent[FRAME_RATE_KEY]\n\t\tself.parent[FRAME_RATE_KEY] *= 0.75\n\t\tif self.parent[FRAME_RATE_KEY] < 1.0:\n\t\t\tself.parent[FRAME_RATE_KEY] = 1.0\n\n\t\tif self.parent[FRAME_RATE_KEY] == old_rate:\n\t\t\tself.parent[DECIMATION_KEY] += 1\n\tdef _on_decr_time_scale(self, event):\n\t\told_rate = self.parent[FRAME_RATE_KEY]\n\t\tself.parent[FRAME_RATE_KEY] *= 1.25\n\t\tif self.parent[FRAME_RATE_KEY] == old_rate:\n\t\t\tself.parent[DECIMATION_KEY] -= 1\n\n##################################################\n# Waterfall window with plotter and control panel\n##################################################\nclass waterfall_window(wx.Panel, pubsub.pubsub):\n\tdef __init__(\n\t\tself,\n\t\tparent,\n\t\tcontroller,\n\t\tsize,\n\t\ttitle,\n\t\treal,\n\t\tfft_size,\n\t\tnum_lines,\n\t\tdecimation_key,\n\t\tbaseband_freq,\n\t\tsample_rate_key,\n\t\tframe_rate_key,\n\t\tdynamic_range,\n\t\tref_level,\n\t\taverage_key,\n\t\tavg_alpha_key,\n\t\tmsg_key,\n\t):\n\t\tpubsub.pubsub.__init__(self)\n\t\t#setup\n\t\tself.samples = list()\n\t\tself.real = real\n\t\tself.fft_size = fft_size\n\t\t#proxy the keys\n\t\tself.proxy(MSG_KEY, controller, msg_key)\n\t\tself.proxy(DECIMATION_KEY, controller, decimation_key)\n\t\tself.proxy(FRAME_RATE_KEY, controller, frame_rate_key)\n\t\tself.proxy(AVERAGE_KEY, controller, average_key)\n\t\tself.proxy(AVG_ALPHA_KEY, controller, avg_alpha_key)\n\t\tself.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)\n\t\t#init panel and plot\n\t\twx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)\n\t\tself.plotter = plotter.waterfall_plotter(self)\n\t\tself.plotter.SetSize(wx.Size(*size))\n\t\tself.plotter.SetSizeHints(*size)\n\t\tself.plotter.set_title(title)\n\t\tself.plotter.enable_point_label(True)\n\t\tself.plotter.enable_grid_lines(False)\n\t\t#plotter listeners\n\t\tself.subscribe(COLOR_MODE_KEY, self.plotter.set_color_mode)\n\t\tself.subscribe(NUM_LINES_KEY, self.plotter.set_num_lines)\n\t\t#initialize values\n\t\tself[DYNAMIC_RANGE_KEY] = dynamic_range\n\t\tself[NUM_LINES_KEY] = num_lines\n\t\tself[Y_DIVS_KEY] = 8\n\t\tself[X_DIVS_KEY] = 8 #approximate\n\t\tself[REF_LEVEL_KEY] = ref_level\n\t\tself[BASEBAND_FREQ_KEY] = baseband_freq\n\t\tself[COLOR_MODE_KEY] = COLOR_MODES[0][1]\n\t\tself[COLOR_MODE_KEY] = DEFAULT_COLOR_MODE\n\t\tself[RUNNING_KEY] = True\n\t\t#setup the box with plot and controls\n\t\tself.control_panel = control_panel(self)\n\t\tmain_box = wx.BoxSizer(wx.HORIZONTAL)\n\t\tmain_box.Add(self.plotter, 1, wx.EXPAND)\n\t\tmain_box.Add(self.control_panel, 0, wx.EXPAND)\n\t\tself.SetSizerAndFit(main_box)\n\t\t#register events\n\t\tself.subscribe(MSG_KEY, self.handle_msg)\n\t\tfor key in (\n\t\t\tDECIMATION_KEY, SAMPLE_RATE_KEY, FRAME_RATE_KEY,\n\t\t\tBASEBAND_FREQ_KEY, X_DIVS_KEY, Y_DIVS_KEY, NUM_LINES_KEY,\n\t\t): self.subscribe(key, self.update_grid)\n\t\t#initial update\n\t\tself.update_grid()\n\n\tdef set_callback(self,callb):\n\t\tself.plotter.set_callback(callb)\n\n\tdef autoscale(self, *args):\n\t\t\"\"\"\n\t\tAutoscale the waterfall plot to the last frame.\n\t\tSet the dynamic range and reference level.\n\t\tDoes not affect the current data in the waterfall.\n\t\t\"\"\"\n\t\tif not len(self.samples): return\n\t\tmin_level, max_level = common.get_min_max_fft(self.samples)\n\t\t#set the range and level\n\t\tself[DYNAMIC_RANGE_KEY] = common.get_clean_num(max_level - min_level)\n\t\tself[REF_LEVEL_KEY] = DYNAMIC_RANGE_STEP*round(.5+max_level/DYNAMIC_RANGE_STEP)\n\n\tdef handle_msg(self, msg):\n\t\t\"\"\"\n\t\tHandle the message from the fft sink message queue.\n\t\tIf complex, reorder the fft samples so the negative bins come first.\n\t\tIf real, keep take only the positive bins.\n\t\tSend the data to the plotter.\n\n\t\tArgs:\n\t\t msg: the fft array as a character array\n\t\t\"\"\"\n\t\tif not self[RUNNING_KEY]: return\n\t\t#convert to floating point numbers\n\t\tself.samples = samples = numpy.fromstring(msg, numpy.float32)[:self.fft_size] #only take first frame\n\t\tnum_samps = len(samples)\n\t\t#reorder fft\n\t\tif self.real: samples = samples[:(num_samps+1)/2]\n\t\telse: samples = numpy.concatenate((samples[num_samps/2+1:], samples[:(num_samps+1)/2]))\n\t\t#plot the fft\n\t\tself.plotter.set_samples(\n\t\t\tsamples=samples,\n\t\t\tminimum=self[REF_LEVEL_KEY] - self[DYNAMIC_RANGE_KEY],\n\t\t\tmaximum=self[REF_LEVEL_KEY],\n\t\t)\n\t\t#update the plotter\n\t\tself.plotter.update()\n\n\tdef update_grid(self, *args):\n\t\t\"\"\"\n\t\tUpdate the plotter grid.\n\t\tThis update method is dependent on the variables below.\n\t\tDetermine the x and y axis grid parameters.\n\t\tThe x axis depends on sample rate, baseband freq, and x divs.\n\t\tThe y axis depends on y per div, y divs, and ref level.\n\t\t\"\"\"\n\t\t#grid parameters\n\t\tsample_rate = self[SAMPLE_RATE_KEY]\n\t\tframe_rate = self[FRAME_RATE_KEY]\n\t\tif frame_rate < 1.0 :\n\t\t\tframe_rate = 1.0\n\t\tbaseband_freq = self[BASEBAND_FREQ_KEY]\n\t\tnum_lines = self[NUM_LINES_KEY]\n\t\ty_divs = self[Y_DIVS_KEY]\n\t\tx_divs = self[X_DIVS_KEY]\n\t\t#determine best fitting x_per_div\n\t\tif self.real: x_width = sample_rate/2.0\n\t\telse: x_width = sample_rate/1.0\n\t\tx_per_div = common.get_clean_num(x_width/x_divs)\n\t\t#update the x grid\n\t\tif self.real:\n\t\t\tself.plotter.set_x_grid(\n\t\t\t\tbaseband_freq,\n\t\t\t\tbaseband_freq + sample_rate/2.0,\n\t\t\t\tx_per_div, True,\n\t\t\t)\n\t\telse:\n\t\t\tself.plotter.set_x_grid(\n\t\t\t\tbaseband_freq - sample_rate/2.0,\n\t\t\t\tbaseband_freq + sample_rate/2.0,\n\t\t\t\tx_per_div, True,\n\t\t\t)\n\t\t#update x units\n\t\tself.plotter.set_x_label('Frequency', 'Hz')\n\t\t#update y grid\n\t\tduration = float(num_lines)/frame_rate\n\t\ty_per_div = common.get_clean_num(duration/y_divs)\n\t\tself.plotter.set_y_grid(0, duration, y_per_div, True)\n\t\t#update y units\n\t\tself.plotter.set_y_label('Time', 's')\n\t\t#update plotter\n\t\tself.plotter.update()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476177,"cells":{"repo_name":{"kind":"string","value":"TestInABox/openstackinabox"},"path":{"kind":"string","value":"openstackinabox/tests/utils/test_directory.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1798"},"content":{"kind":"string","value":"import os\nimport os.path\n\nimport ddt\nimport mock\n\nfrom openstackinabox.tests.base import TestBase\n\nfrom openstackinabox.utils import directory\n\n\n@ddt.ddt\nclass TestTempDirectory(TestBase):\n\n def setUp(self):\n super(TestTempDirectory, self).setUp()\n\n def tearDown(self):\n super(TestTempDirectory, self).tearDown()\n\n def test_initialization(self):\n temp_dir = directory.TemporaryDirectory()\n self.assertIsInstance(temp_dir.name, str)\n self.assertIn(directory.TemporaryDirectory.__name__, repr(temp_dir))\n\n def test_cleanup(self):\n temp_dir = directory.TemporaryDirectory()\n\n self.assertTrue(os.path.exists(temp_dir.name))\n\n file_names = [temp_dir.name]\n for x in range(10):\n filename = '{0}/file_{1}'.format(\n temp_dir.name,\n x\n )\n with open(filename, 'w') as data_output:\n data_output.write(str(os.urandom(8192)))\n\n file_names.append(filename)\n\n temp_dir.cleanup()\n\n for name in file_names:\n self.assertFalse(os.path.exists(name))\n\n def test_del_cleanup_error(self):\n with mock.patch(\n 'shutil.rmtree'\n ) as mock_rmtree:\n mock_rmtree.side_effect = OSError('mock error')\n\n temp_dir = directory.TemporaryDirectory()\n temp_dir.cleanup()\n\n def test_context(self):\n temp_dir_name = None\n\n temp_dir = directory.TemporaryDirectory()\n with temp_dir as context:\n self.assertEqual(id(temp_dir), id(context))\n temp_dir_name = context.name\n\n self.assertTrue(os.path.exists(temp_dir_name))\n\n try:\n self.assertFalse(os.path.exists(temp_dir_name))\n except OSError:\n pass\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476178,"cells":{"repo_name":{"kind":"string","value":"Jumpscale/ays9"},"path":{"kind":"string","value":"tests/test_services/test_directory_structure/actions.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3901"},"content":{"kind":"string","value":"def init_actions_(service, args):\n\n \"\"\"\n\n this needs to returns an array of actions representing the depencies between actions.\n\n Looks at ACTION_DEPS in this module for an example of what is expected\n\n \"\"\"\n # some default logic for simple actions\n return {\n\n 'test': ['install']\n\n }\n\n\n\ndef test(job):\n \"\"\"\n Test the created directory structure is corrected after ays blueprint on a test repo\n \"\"\"\n import sys\n import os\n RESULT_OK = 'OK : %s'\n RESULT_FAILED = 'FAILED : %s'\n RESULT_ERROR = 'ERROR : %s %%s' % job.service.name\n model = job.service.model\n model.data.result = RESULT_OK % job.service.name\n failures = []\n expected_actors = ['cockpittesting', 'datacenter', 'sshkey']\n expected_files_per_actor = ['actions.py', 'actor.json', 'schema.capnp']\n actor_missing_msg = 'Actor folder [%s] does not exist'\n actor_file_missing_msg = 'File [%s] for actor [%s] is missing'\n service_file_missing_msg = 'Service file [%s] is missing'\n expected_services = {'datacenter!ovh_germany1':{\n # Un-comment the following when enabling the _bp_related1.yaml blueprint\n # 'cockpittesting!cockpitv1': {'files': ['data.json',\n # 'schema.capnp',\n # 'service.json']},\n 'files': ['data.json', 'schema.capnp', 'service.json']},\n # Un-comment the following when enabling the _bp_related1.yaml blueprint\n # 'datacenter!ovh_germany2': {'files': ['data.json',\n # 'schema.capnp',\n # 'service.json']},\n 'datacenter!ovh_germany3': {'cockpittesting!cockpitv2': {'files': ['data.json',\n 'schema.capnp',\n 'service.json']},\n 'files': ['data.json', 'schema.capnp', 'service.json']},\n 'sshkey!main': {'files': ['data.json', 'schema.capnp', 'service.json']}}\n cwd = os.getcwd()\n repos = []\n repo_name = 'sample_repo1'\n repo_path = j.sal.fs.joinPaths(j.dirs.CODEDIR, 'github/jumpscale/ays9/tests/%s' % repo_name)\n repos.append(repo_name)\n\n def check_service_dir(base_path, service):\n for service_name, service_info in service.items():\n if service_name != 'files':\n path = j.sal.fs.joinPaths(base_path, service_name)\n check_service_dir(path, service_info)\n else:\n for service_file in service['files']:\n if not j.sal.fs.exists(j.sal.fs.joinPaths(base_path, service_file)):\n failures.append(service_file_missing_msg % j.sal.fs.joinPaths(base_path, service_file))\n try:\n ays_client = j.clients.atyourservice.get().api.ays\n blueprints = map(lambda item: item['name'], ays_client.listBlueprints(repo_name, query_params={'archived': False}).json())\n for blueprint in blueprints:\n ays_client.executeBlueprint(data={}, blueprint=blueprint, repository=repo_name)\n\n\n # validate directory structure\n for actor in expected_actors:\n if not j.sal.fs.exists(j.sal.fs.joinPaths(repo_path, 'actors', actor)):\n failures.append(actor_missing_msg % actor)\n else:\n for actor_file in expected_files_per_actor:\n if not j.sal.fs.exists(j.sal.fs.joinPaths(repo_path, 'actors', actor, actor_file)):\n failures.append(actor_file_missing_msg % (actor_file, actor))\n\n for service_name, service_info in expected_services.items():\n path = j.sal.fs.joinPaths(repo_path, 'services', service_name)\n check_service_dir(path, service_info)\n if failures:\n model.data.result = RESULT_FAILED % '\\n'.join(failures)\n\n except:\n model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])\n finally:\n job.service.save()\n j.sal.fs.changeDir(cwd)\n for repo in repos:\n ays_client.destroyRepository(data={}, repository=repo)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476179,"cells":{"repo_name":{"kind":"string","value":"MechanisM/bleach"},"path":{"kind":"string","value":"bleach/tests/test_links.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"4636"},"content":{"kind":"string","value":"from nose.tools import eq_\nimport urllib\n\nfrom bleach import Bleach, url_re\n\nb = Bleach()\n\n\nclass cleach(Bleach):\n def filter_url(self, url):\n return u'http://bouncer/?u=%s' % urllib.quote_plus(url)\n\nc = cleach()\n\n\ndef test_url_re():\n def no_match(s):\n match = url_re.search(s)\n if match:\n assert not match, 'matched %s' % s[slice(*match.span())]\n yield no_match, 'just what i am looking for...it'\n\n\ndef test_empty():\n eq_('', b.linkify(''))\n\n\ndef test_simple_link():\n eq_('a http://example.com link',\n b.linkify('a http://example.com link'))\n eq_('a https://example.com link',\n b.linkify('a https://example.com link'))\n\n\ndef test_mangle_link():\n eq_('http://example.com',\n c.linkify('http://example.com'))\n\n\ndef test_email_link():\n eq_('a james@example.com mailto',\n b.linkify('a james@example.com mailto'))\n\n\ndef test_tlds():\n eq_('example.com',\n b.linkify('example.com'))\n eq_('example.co.uk',\n b.linkify('example.co.uk'))\n eq_('example.edu',\n b.linkify('example.edu'))\n eq_('example.xxx', b.linkify('example.xxx'))\n eq_(' brie', b.linkify(' brie'))\n eq_('bit.ly/fun',\n b.linkify('bit.ly/fun'))\n\n\ndef test_escaping():\n eq_('&lt; unrelated', b.linkify('< unrelated'))\n\n\ndef test_nofollow_off():\n eq_('example.com',\n b.linkify(u'example.com', nofollow=False))\n\n\ndef test_link_in_html():\n eq_('http://yy.com',\n b.linkify('http://yy.com'))\n eq_('http://xx.com',\n b.linkify('http://xx.com'))\n\n\ndef test_links_https():\n eq_('https://yy.com',\n b.linkify('https://yy.com'))\n\n\ndef test_add_rel_nofollow():\n \"\"\"Verify that rel=\"nofollow\" is added to an existing link\"\"\"\n eq_('http://yy.com',\n b.linkify('http://yy.com'))\n\n\ndef test_url_with_path():\n eq_('http://example.com/path/to/file',\n b.linkify('http://example.com/path/to/file'))\n\n\ndef test_link_ftp():\n eq_('ftp://ftp.mozilla.org/some/file',\n b.linkify('ftp://ftp.mozilla.org/some/file'))\n\n\ndef test_link_query():\n eq_('http://xx.com/?test=win',\n b.linkify('http://xx.com/?test=win'))\n eq_('xx.com/?test=win',\n b.linkify('xx.com/?test=win'))\n eq_('xx.com?test=win',\n b.linkify('xx.com?test=win'))\n\n\ndef test_link_fragment():\n eq_('http://xx.com/path#frag',\n b.linkify('http://xx.com/path#frag'))\n\n\ndef test_link_entities():\n eq_('http://xx.com/?a=1&amp;b=2',\n b.linkify('http://xx.com/?a=1&b=2'))\n\n\ndef test_escaped_html():\n \"\"\"If I pass in escaped HTML, it should probably come out escaped.\"\"\"\n s = '&lt;em&gt;strong&lt;/em&gt;'\n eq_(s, b.linkify(s))\n\n# Not supported at this time\n# TODO:\n# - Can this pass eventually?\n#def test_link_http_complete():\n# eq_('https://user:pass@ftp.mozilla.com/x/y.exe?a=b&amp;c=d&amp;e#f',\n# b.linkify('https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d&e#f'))\n\n\ndef test_non_url():\n \"\"\"document.vulnerable should absolutely not be linkified.\"\"\"\n s = 'document.vulnerable'\n eq_(s, b.linkify(s))\n\n\ndef test_javascript_url():\n \"\"\"javascript: urls should never be linkified.\"\"\"\n s = 'javascript:document.vulnerable'\n eq_(s, b.linkify(s))\n\n\ndef test_unsafe_url():\n \"\"\"Any unsafe char ({}[]<>, etc.) in the path should end URL scanning.\"\"\"\n eq_('All your{\"xx.yy.com/grover.png\"}base are',\n b.linkify('All your{\"xx.yy.com/grover.png\"}base are'))\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476180,"cells":{"repo_name":{"kind":"string","value":"cjh1/VTK"},"path":{"kind":"string","value":"Examples/GUI/Python/ImagePlaneWidget.py"},"copies":{"kind":"string","value":"14"},"size":{"kind":"string","value":"10228"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# This code is a direct translation of the Tcl code in\n# ImagePlaneWidget.tcl. It could easily be written using a nice class\n# to do the job but the present code should definitely make for an\n# illustrative example.\n\n# This example demonstrates how to use the vtkImagePlaneWidget\n# to probe a 3D image dataset with three orthogonal planes.\n# Buttons are provided to:\n# a) capture the render window display to a tiff file\n# b) x,y,z buttons reset the widget to orthonormal\n# positioning, set the horizontal slider to move the\n# associated widget along its normal, and set the\n# camera to face the widget\n# c) right clicking on x,y,z buttons pops up a menu to set\n# the associated widget's reslice interpolation mode\n\nimport vtk\nimport Tkinter\nfrom vtk.tk.vtkTkRenderWindowInteractor import \\\n vtkTkRenderWindowInteractor\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Start by loading some data.\nv16 = vtk.vtkVolume16Reader()\nv16.SetDataDimensions(64, 64)\nv16.SetDataByteOrderToLittleEndian()\nv16.SetFilePrefix(VTK_DATA_ROOT + \"/Data/headsq/quarter\")\nv16.SetImageRange(1, 93)\nv16.SetDataSpacing(3.2, 3.2, 1.5)\nv16.Update()\n\nxMin, xMax, yMin, yMax, zMin, zMax = v16.GetExecutive().GetWholeExtent(v16.GetOutputInformation(0))\n\nspacing = v16.GetOutput().GetSpacing()\nsx, sy, sz = spacing\n\norigin = v16.GetOutput().GetOrigin()\nox, oy, oz = origin\n\n# An outline is shown for context.\noutline = vtk.vtkOutlineFilter()\noutline.SetInputConnection(v16.GetOutputPort())\n\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\n\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\n\n# The shared picker enables us to use 3 planes at one time\n# and gets the picking order right\npicker = vtk.vtkCellPicker()\npicker.SetTolerance(0.005)\n\n# The 3 image plane widgets are used to probe the dataset.\nplaneWidgetX = vtk.vtkImagePlaneWidget()\nplaneWidgetX.DisplayTextOn()\nplaneWidgetX.SetInputConnection(v16.GetOutputPort())\nplaneWidgetX.SetPlaneOrientationToXAxes()\nplaneWidgetX.SetSliceIndex(32)\nplaneWidgetX.SetPicker(picker)\nplaneWidgetX.SetKeyPressActivationValue(\"x\")\nprop1 = planeWidgetX.GetPlaneProperty()\nprop1.SetColor(1, 0, 0)\n\nplaneWidgetY = vtk.vtkImagePlaneWidget()\nplaneWidgetY.DisplayTextOn()\nplaneWidgetY.SetInputConnection(v16.GetOutputPort())\nplaneWidgetY.SetPlaneOrientationToYAxes()\nplaneWidgetY.SetSliceIndex(32)\nplaneWidgetY.SetPicker(picker)\nplaneWidgetY.SetKeyPressActivationValue(\"y\")\nprop2 = planeWidgetY.GetPlaneProperty()\nprop2.SetColor(1, 1, 0)\nplaneWidgetY.SetLookupTable(planeWidgetX.GetLookupTable())\n\n# for the z-slice, turn off texture interpolation:\n# interpolation is now nearest neighbour, to demonstrate\n# cross-hair cursor snapping to pixel centers\nplaneWidgetZ = vtk.vtkImagePlaneWidget()\nplaneWidgetZ.DisplayTextOn()\nplaneWidgetZ.SetInputConnection(v16.GetOutputPort())\nplaneWidgetZ.SetPlaneOrientationToZAxes()\nplaneWidgetZ.SetSliceIndex(46)\nplaneWidgetZ.SetPicker(picker)\nplaneWidgetZ.SetKeyPressActivationValue(\"z\")\nprop3 = planeWidgetZ.GetPlaneProperty()\nprop3.SetColor(0, 0, 1)\nplaneWidgetZ.SetLookupTable(planeWidgetX.GetLookupTable())\n\n# Create the RenderWindow and Renderer\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\n\n# Add the outline actor to the renderer, set the background color and size\nren.AddActor(outlineActor)\nrenWin.SetSize(600, 600)\nren.SetBackground(0.1, 0.1, 0.2)\n\ncurrent_widget = planeWidgetZ\nmode_widget = planeWidgetZ\n\n# Create the GUI\n# We first create the supporting functions (callbacks) for the GUI\n#\n# Align the camera so that it faces the desired widget\ndef AlignCamera():\n #global ox, oy, oz, sx, sy, sz, xMax, xMin, yMax, yMin, zMax, \\\n # zMin, slice_number\n #global current_widget\n cx = ox+(0.5*(xMax-xMin))*sx\n cy = oy+(0.5*(yMax-yMin))*sy\n cz = oy+(0.5*(zMax-zMin))*sz\n vx, vy, vz = 0, 0, 0\n nx, ny, nz = 0, 0, 0\n iaxis = current_widget.GetPlaneOrientation()\n if iaxis == 0:\n vz = -1\n nx = ox + xMax*sx\n cx = ox + slice_number*sx\n elif iaxis == 1:\n vz = -1\n ny = oy+yMax*sy\n cy = oy+slice_number*sy\n else:\n vy = 1\n nz = oz+zMax*sz\n cz = oz+slice_number*sz\n\n px = cx+nx*2\n py = cy+ny*2\n pz = cz+nz*3\n\n camera = ren.GetActiveCamera()\n camera.SetViewUp(vx, vy, vz)\n camera.SetFocalPoint(cx, cy, cz)\n camera.SetPosition(px, py, pz)\n camera.OrthogonalizeViewUp()\n ren.ResetCameraClippingRange()\n renWin.Render()\n\n# Capture the display and place in a tiff\ndef CaptureImage():\n w2i = vtk.vtkWindowToImageFilter()\n writer = vtk.vtkTIFFWriter()\n w2i.SetInput(renWin)\n w2i.Update()\n writer.SetInputConnection(w2i.GetOutputPort())\n writer.SetFileName(\"image.tif\")\n renWin.Render()\n writer.Write()\n\n\n# Align the widget back into orthonormal position,\n# set the slider to reflect the widget's position,\n# call AlignCamera to set the camera facing the widget\ndef AlignXaxis():\n global xMax, xMin, current_widget, slice_number\n po = planeWidgetX.GetPlaneOrientation()\n if po == 3:\n planeWidgetX.SetPlaneOrientationToXAxes()\n slice_number = (xMax-xMin)/2\n planeWidgetX.SetSliceIndex(slice_number)\n else:\n slice_number = planeWidgetX.GetSliceIndex()\n\n current_widget = planeWidgetX\n\n slice.config(from_=xMin, to=xMax)\n slice.set(slice_number)\n AlignCamera()\n\n\ndef AlignYaxis():\n global yMin, yMax, current_widget, slice_number\n po = planeWidgetY.GetPlaneOrientation()\n if po == 3:\n planeWidgetY.SetPlaneOrientationToYAxes()\n slice_number = (yMax-yMin)/2\n planeWidgetY.SetSliceIndex(slice_number)\n else:\n slice_number = planeWidgetY.GetSliceIndex()\n\n current_widget = planeWidgetY\n\n slice.config(from_=yMin, to=yMax)\n slice.set(slice_number)\n AlignCamera()\n\ndef AlignZaxis():\n global yMin, yMax, current_widget, slice_number\n po = planeWidgetZ.GetPlaneOrientation()\n if po == 3:\n planeWidgetZ.SetPlaneOrientationToZAxes()\n slice_number = (zMax-zMin)/2\n planeWidgetZ.SetSliceIndex(slice_number)\n else:\n slice_number = planeWidgetZ.GetSliceIndex()\n\n current_widget = planeWidgetZ\n\n slice.config(from_=zMin, to=zMax)\n slice.set(slice_number)\n AlignCamera()\n\n\n# Set the widget's reslice interpolation mode\n# to the corresponding popup menu choice\ndef SetInterpolation():\n global mode_widget, mode\n if mode.get() == 0:\n mode_widget.TextureInterpolateOff()\n else:\n mode_widget.TextureInterpolateOn()\n\n mode_widget.SetResliceInterpolate(mode.get())\n renWin.Render()\n\n# Share the popup menu among buttons, keeping track of associated\n# widget's interpolation mode\ndef buttonEvent(event, arg=None):\n global mode, mode_widget, popm\n if arg == 0:\n mode_widget = planeWidgetX\n elif arg == 1:\n mode_widget = planeWidgetY\n elif arg == 2:\n mode_widget = planeWidgetZ\n else:\n return\n mode.set(mode_widget.GetResliceInterpolate())\n popm.entryconfigure(arg, variable=mode)\n popm.post(event.x + event.x_root, event.y + event.y_root)\n\ndef SetSlice(sl):\n global current_widget\n current_widget.SetSliceIndex(int(sl))\n ren.ResetCameraClippingRange()\n renWin.Render()\n\n\n###\n# Now actually create the GUI\nroot = Tkinter.Tk()\nroot.withdraw()\ntop = Tkinter.Toplevel(root)\n\n# Define a quit method that exits cleanly.\ndef quit(obj=root):\n obj.quit()\n\n# Popup menu\npopm = Tkinter.Menu(top, tearoff=0)\nmode = Tkinter.IntVar()\nmode.set(1)\npopm.add_radiobutton(label=\"nearest\", variable=mode, value=0,\n command=SetInterpolation)\npopm.add_radiobutton(label=\"linear\", variable=mode, value=1,\n command=SetInterpolation)\npopm.add_radiobutton(label=\"cubic\", variable=mode, value=2,\n command=SetInterpolation)\n\ndisplay_frame = Tkinter.Frame(top)\ndisplay_frame.pack(side=\"top\", anchor=\"n\", fill=\"both\", expand=\"false\")\n\n# Buttons\nctrl_buttons = Tkinter.Frame(top)\nctrl_buttons.pack(side=\"top\", anchor=\"n\", fill=\"both\", expand=\"false\")\n\nquit_button = Tkinter.Button(ctrl_buttons, text=\"Quit\", command=quit)\ncapture_button = Tkinter.Button(ctrl_buttons, text=\"Tif\",\n command=CaptureImage)\n\nx_button = Tkinter.Button(ctrl_buttons, text=\"x\", command=AlignXaxis)\ny_button = Tkinter.Button(ctrl_buttons, text=\"y\", command=AlignYaxis)\nz_button = Tkinter.Button(ctrl_buttons, text=\"z\", command=AlignZaxis)\nx_button.bind(\"\", lambda e: buttonEvent(e, 0))\ny_button.bind(\"\", lambda e: buttonEvent(e, 1))\nz_button.bind(\"\", lambda e: buttonEvent(e, 2))\n\nfor i in (quit_button, capture_button, x_button, y_button, z_button):\n i.pack(side=\"left\", expand=\"true\", fill=\"both\")\n\n\n# Create the render widget\nrenderer_frame = Tkinter.Frame(display_frame)\nrenderer_frame.pack(padx=3, pady=3,side=\"left\", anchor=\"n\",\n fill=\"both\", expand=\"false\")\n\nrender_widget = vtkTkRenderWindowInteractor(renderer_frame,\n rw=renWin, width=600,\n height=600)\nfor i in (render_widget, display_frame):\n i.pack(side=\"top\", anchor=\"n\",fill=\"both\", expand=\"false\")\n\n# Add a slice scale to browse the current slice stack\nslice_number = Tkinter.IntVar()\nslice_number.set(current_widget.GetSliceIndex())\nslice = Tkinter.Scale(top, from_=zMin, to=zMax, orient=\"horizontal\",\n command=SetSlice,variable=slice_number,\n label=\"Slice\")\nslice.pack(fill=\"x\", expand=\"false\")\n\n# Done with the GUI.\n###\n\n# Set the interactor for the widgets\niact = render_widget.GetRenderWindow().GetInteractor()\nplaneWidgetX.SetInteractor(iact)\nplaneWidgetX.On()\nplaneWidgetY.SetInteractor(iact)\nplaneWidgetY.On()\nplaneWidgetZ.SetInteractor(iact)\nplaneWidgetZ.On()\n\n# Create an initial interesting view\ncam1 = ren.GetActiveCamera()\ncam1.Elevation(110)\ncam1.SetViewUp(0, 0, -1)\ncam1.Azimuth(45)\nren.ResetCameraClippingRange()\n\n# Render it\nrender_widget.Render()\n\niact.Initialize()\nrenWin.Render()\niact.Start()\n\n# Start Tkinter event loop\nroot.mainloop()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476181,"cells":{"repo_name":{"kind":"string","value":"jordiclariana/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/cloud/misc/virt_pool.py"},"copies":{"kind":"string","value":"48"},"size":{"kind":"string","value":"22292"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2015, Maciej Delmanowski \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'community',\n 'version': '1.0'}\n\nDOCUMENTATION = '''\n---\nmodule: virt_pool\nauthor: \"Maciej Delmanowski (@drybjed)\"\nversion_added: \"2.0\"\nshort_description: Manage libvirt storage pools\ndescription:\n - Manage I(libvirt) storage pools.\noptions:\n name:\n required: false\n aliases: [ \"pool\" ]\n description:\n - name of the storage pool being managed. Note that pool must be previously\n defined with xml.\n state:\n required: false\n choices: [ \"active\", \"inactive\", \"present\", \"absent\", \"undefined\", \"deleted\" ]\n description:\n - specify which state you want a storage pool to be in.\n If 'active', pool will be started.\n If 'present', ensure that pool is present but do not change its\n state; if it's missing, you need to specify xml argument.\n If 'inactive', pool will be stopped.\n If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.\n If 'deleted', pool contents will be deleted and then pool undefined.\n command:\n required: false\n choices: [ \"define\", \"build\", \"create\", \"start\", \"stop\", \"destroy\",\n \"delete\", \"undefine\", \"get_xml\", \"list_pools\", \"facts\",\n \"info\", \"status\" ]\n description:\n - in addition to state management, various non-idempotent commands are available.\n See examples.\n autostart:\n required: false\n choices: [\"yes\", \"no\"]\n description:\n - Specify if a given storage pool should be started automatically on system boot.\n uri:\n required: false\n default: \"qemu:///system\"\n description:\n - I(libvirt) connection uri.\n xml:\n required: false\n description:\n - XML document used with the define command.\n mode:\n required: false\n choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]\n description:\n - Pass additional parameters to 'build' or 'delete' commands.\nrequirements:\n - \"python >= 2.6\"\n - \"python-libvirt\"\n - \"python-lxml\"\n'''\n\nEXAMPLES = '''\n# Define a new storage pool\n- virt_pool:\n command: define\n name: vms\n xml: '{{ lookup(\"template\", \"pool/dir.xml.j2\") }}'\n\n# Build a storage pool if it does not exist\n- virt_pool:\n command: build\n name: vms\n\n# Start a storage pool\n- virt_pool:\n command: create\n name: vms\n\n# List available pools\n- virt_pool:\n command: list_pools\n\n# Get XML data of a specified pool\n- virt_pool:\n command: get_xml\n name: vms\n\n# Stop a storage pool\n- virt_pool:\n command: destroy\n name: vms\n\n# Delete a storage pool (destroys contents)\n- virt_pool:\n command: delete\n name: vms\n\n# Undefine a storage pool\n- virt_pool:\n command: undefine\n name: vms\n\n# Gather facts about storage pools\n# Facts will be available as 'ansible_libvirt_pools'\n- virt_pool:\n command: facts\n\n# Gather information about pools managed by 'libvirt' remotely using uri\n- virt_pool:\n command: info\n uri: '{{ item }}'\n with_items: '{{ libvirt_uris }}'\n register: storage_pools\n\n# Ensure that a pool is active (needs to be defined and built first)\n- virt_pool:\n state: active\n name: vms\n\n# Ensure that a pool is inactive\n- virt_pool:\n state: inactive\n name: vms\n\n# Ensure that a given pool will be started at boot\n- virt_pool:\n autostart: yes\n name: vms\n\n# Disable autostart for a given pool\n- virt_pool:\n autostart: no\n name: vms\n'''\n\nVIRT_FAILED = 1\nVIRT_SUCCESS = 0\nVIRT_UNAVAILABLE=2\n\ntry:\n import libvirt\nexcept ImportError:\n HAS_VIRT = False\nelse:\n HAS_VIRT = True\n\ntry:\n from lxml import etree\nexcept ImportError:\n HAS_XML = False\nelse:\n HAS_XML = True\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nALL_COMMANDS = []\nENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',\n 'undefine', 'destroy', 'get_xml', 'define', 'refresh']\nHOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]\nALL_COMMANDS.extend(ENTRY_COMMANDS)\nALL_COMMANDS.extend(HOST_COMMANDS)\n\nENTRY_STATE_ACTIVE_MAP = {\n 0 : \"inactive\",\n 1 : \"active\"\n}\n\nENTRY_STATE_AUTOSTART_MAP = {\n 0 : \"no\",\n 1 : \"yes\"\n}\n\nENTRY_STATE_PERSISTENT_MAP = {\n 0 : \"no\",\n 1 : \"yes\"\n}\n\nENTRY_STATE_INFO_MAP = {\n 0 : \"inactive\",\n 1 : \"building\",\n 2 : \"running\",\n 3 : \"degraded\",\n 4 : \"inaccessible\"\n}\n\nENTRY_BUILD_FLAGS_MAP = {\n \"new\" : 0,\n \"repair\" : 1,\n \"resize\" : 2,\n \"no_overwrite\" : 4,\n \"overwrite\" : 8\n}\n\nENTRY_DELETE_FLAGS_MAP = {\n \"normal\" : 0,\n \"zeroed\" : 1\n}\n\nALL_MODES = []\nALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())\nALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())\n\n\nclass EntryNotFound(Exception):\n pass\n\n\nclass LibvirtConnection(object):\n\n def __init__(self, uri, module):\n\n self.module = module\n\n conn = libvirt.open(uri)\n\n if not conn:\n raise Exception(\"hypervisor connection failure\")\n\n self.conn = conn\n\n def find_entry(self, entryid):\n # entryid = -1 returns a list of everything\n\n results = []\n\n # Get active entries\n for name in self.conn.listStoragePools():\n entry = self.conn.storagePoolLookupByName(name)\n results.append(entry)\n\n # Get inactive entries\n for name in self.conn.listDefinedStoragePools():\n entry = self.conn.storagePoolLookupByName(name)\n results.append(entry)\n\n if entryid == -1:\n return results\n\n for entry in results:\n if entry.name() == entryid:\n return entry\n\n raise EntryNotFound(\"storage pool %s not found\" % entryid)\n\n def create(self, entryid):\n if not self.module.check_mode:\n return self.find_entry(entryid).create()\n else:\n try:\n state = self.find_entry(entryid).isActive()\n except:\n return self.module.exit_json(changed=True)\n if not state:\n return self.module.exit_json(changed=True)\n\n def destroy(self, entryid):\n if not self.module.check_mode:\n return self.find_entry(entryid).destroy()\n else:\n if self.find_entry(entryid).isActive():\n return self.module.exit_json(changed=True)\n\n def undefine(self, entryid):\n if not self.module.check_mode:\n return self.find_entry(entryid).undefine()\n else:\n if not self.find_entry(entryid):\n return self.module.exit_json(changed=True)\n\n def get_status2(self, entry):\n state = entry.isActive()\n return ENTRY_STATE_ACTIVE_MAP.get(state,\"unknown\")\n\n def get_status(self, entryid):\n if not self.module.check_mode:\n state = self.find_entry(entryid).isActive()\n return ENTRY_STATE_ACTIVE_MAP.get(state,\"unknown\")\n else:\n try:\n state = self.find_entry(entryid).isActive()\n return ENTRY_STATE_ACTIVE_MAP.get(state,\"unknown\")\n except:\n return ENTRY_STATE_ACTIVE_MAP.get(\"inactive\",\"unknown\")\n\n def get_uuid(self, entryid):\n return self.find_entry(entryid).UUIDString()\n\n def get_xml(self, entryid):\n return self.find_entry(entryid).XMLDesc(0)\n\n def get_info(self, entryid):\n return self.find_entry(entryid).info()\n\n def get_volume_count(self, entryid):\n return self.find_entry(entryid).numOfVolumes()\n\n def get_volume_names(self, entryid):\n return self.find_entry(entryid).listVolumes()\n\n def get_devices(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n if xml.xpath('/pool/source/device'):\n result = []\n for device in xml.xpath('/pool/source/device'):\n result.append(device.get('path'))\n try:\n return result\n except:\n raise ValueError('No devices specified')\n\n def get_format(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n try:\n result = xml.xpath('/pool/source/format')[0].get('type')\n except:\n raise ValueError('Format not specified')\n return result\n\n def get_host(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n try:\n result = xml.xpath('/pool/source/host')[0].get('name')\n except:\n raise ValueError('Host not specified')\n return result\n\n def get_source_path(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n try:\n result = xml.xpath('/pool/source/dir')[0].get('path')\n except:\n raise ValueError('Source path not specified')\n return result\n\n def get_path(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n return xml.xpath('/pool/target/path')[0].text\n\n def get_type(self, entryid):\n xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))\n return xml.get('type')\n\n def build(self, entryid, flags):\n if not self.module.check_mode:\n return self.find_entry(entryid).build(flags)\n else:\n try:\n state = self.find_entry(entryid)\n except:\n return self.module.exit_json(changed=True)\n if not state:\n return self.module.exit_json(changed=True)\n\n def delete(self, entryid, flags):\n if not self.module.check_mode:\n return self.find_entry(entryid).delete(flags)\n else:\n try:\n state = self.find_entry(entryid)\n except:\n return self.module.exit_json(changed=True)\n if state:\n return self.module.exit_json(changed=True)\n\n def get_autostart(self, entryid):\n state = self.find_entry(entryid).autostart()\n return ENTRY_STATE_AUTOSTART_MAP.get(state,\"unknown\")\n\n def get_autostart2(self, entryid):\n if not self.module.check_mode:\n return self.find_entry(entryid).autostart()\n else:\n try:\n return self.find_entry(entryid).autostart()\n except:\n return self.module.exit_json(changed=True)\n\n def set_autostart(self, entryid, val):\n if not self.module.check_mode:\n return self.find_entry(entryid).setAutostart(val)\n else:\n try:\n state = self.find_entry(entryid).autostart()\n except:\n return self.module.exit_json(changed=True)\n if bool(state) != val:\n return self.module.exit_json(changed=True)\n\n def refresh(self, entryid):\n return self.find_entry(entryid).refresh()\n\n def get_persistent(self, entryid):\n state = self.find_entry(entryid).isPersistent()\n return ENTRY_STATE_PERSISTENT_MAP.get(state,\"unknown\")\n\n def define_from_xml(self, entryid, xml):\n if not self.module.check_mode:\n return self.conn.storagePoolDefineXML(xml)\n else:\n try:\n self.find_entry(entryid)\n except:\n return self.module.exit_json(changed=True)\n\n\nclass VirtStoragePool(object):\n\n def __init__(self, uri, module):\n self.module = module\n self.uri = uri\n self.conn = LibvirtConnection(self.uri, self.module)\n\n def get_pool(self, entryid):\n return self.conn.find_entry(entryid)\n\n def list_pools(self, state=None):\n results = []\n for entry in self.conn.find_entry(-1):\n if state:\n if state == self.conn.get_status2(entry):\n results.append(entry.name())\n else:\n results.append(entry.name())\n return results\n\n def state(self):\n results = []\n for entry in self.list_pools():\n state_blurb = self.conn.get_status(entry)\n results.append(\"%s %s\" % (entry,state_blurb))\n return results\n\n def autostart(self, entryid):\n return self.conn.set_autostart(entryid, True)\n\n def get_autostart(self, entryid):\n return self.conn.get_autostart2(entryid)\n\n def set_autostart(self, entryid, state):\n return self.conn.set_autostart(entryid, state)\n\n def create(self, entryid):\n return self.conn.create(entryid)\n\n def start(self, entryid):\n return self.conn.create(entryid)\n\n def stop(self, entryid):\n return self.conn.destroy(entryid)\n\n def destroy(self, entryid):\n return self.conn.destroy(entryid)\n\n def undefine(self, entryid):\n return self.conn.undefine(entryid)\n\n def status(self, entryid):\n return self.conn.get_status(entryid)\n\n def get_xml(self, entryid):\n return self.conn.get_xml(entryid)\n\n def define(self, entryid, xml):\n return self.conn.define_from_xml(entryid, xml)\n\n def build(self, entryid, flags):\n return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))\n\n def delete(self, entryid, flags):\n return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))\n\n def refresh(self, entryid):\n return self.conn.refresh(entryid)\n\n def info(self):\n return self.facts(facts_mode='info')\n\n def facts(self, facts_mode='facts'):\n results = dict()\n for entry in self.list_pools():\n results[entry] = dict()\n if self.conn.find_entry(entry):\n data = self.conn.get_info(entry)\n # libvirt returns maxMem, memory, and cpuTime as long()'s, which\n # xmlrpclib tries to convert to regular int's during serialization.\n # This throws exceptions, so convert them to strings here and\n # assume the other end of the xmlrpc connection can figure things\n # out or doesn't care.\n results[entry] = {\n \"status\" : ENTRY_STATE_INFO_MAP.get(data[0],\"unknown\"),\n \"size_total\" : str(data[1]),\n \"size_used\" : str(data[2]),\n \"size_available\" : str(data[3]),\n }\n results[entry][\"autostart\"] = self.conn.get_autostart(entry)\n results[entry][\"persistent\"] = self.conn.get_persistent(entry)\n results[entry][\"state\"] = self.conn.get_status(entry)\n results[entry][\"path\"] = self.conn.get_path(entry)\n results[entry][\"type\"] = self.conn.get_type(entry)\n results[entry][\"uuid\"] = self.conn.get_uuid(entry)\n if self.conn.find_entry(entry).isActive():\n results[entry][\"volume_count\"] = self.conn.get_volume_count(entry)\n results[entry][\"volumes\"] = list()\n for volume in self.conn.get_volume_names(entry):\n results[entry][\"volumes\"].append(volume)\n else:\n results[entry][\"volume_count\"] = -1\n\n try:\n results[entry][\"host\"] = self.conn.get_host(entry)\n except ValueError:\n pass\n\n try:\n results[entry][\"source_path\"] = self.conn.get_source_path(entry)\n except ValueError:\n pass\n\n try:\n results[entry][\"format\"] = self.conn.get_format(entry)\n except ValueError:\n pass\n\n try:\n devices = self.conn.get_devices(entry)\n results[entry][\"devices\"] = devices\n except ValueError:\n pass\n\n else:\n results[entry][\"state\"] = self.conn.get_status(entry)\n\n facts = dict()\n if facts_mode == 'facts':\n facts[\"ansible_facts\"] = dict()\n facts[\"ansible_facts\"][\"ansible_libvirt_pools\"] = results\n elif facts_mode == 'info':\n facts['pools'] = results\n return facts\n\n\ndef core(module):\n\n state = module.params.get('state', None)\n name = module.params.get('name', None)\n command = module.params.get('command', None)\n uri = module.params.get('uri', None)\n xml = module.params.get('xml', None)\n autostart = module.params.get('autostart', None)\n mode = module.params.get('mode', None)\n\n v = VirtStoragePool(uri, module)\n res = {}\n\n if state and command == 'list_pools':\n res = v.list_pools(state=state)\n if not isinstance(res, dict):\n res = { command: res }\n return VIRT_SUCCESS, res\n\n if state:\n if not name:\n module.fail_json(msg = \"state change requires a specified name\")\n\n res['changed'] = False\n if state in [ 'active' ]:\n if v.status(name) is not 'active':\n res['changed'] = True\n res['msg'] = v.start(name)\n elif state in [ 'present' ]:\n try:\n v.get_pool(name)\n except EntryNotFound:\n if not xml:\n module.fail_json(msg = \"storage pool '\" + name + \"' not present, but xml not specified\")\n v.define(name, xml)\n res = {'changed': True, 'created': name}\n elif state in [ 'inactive' ]:\n entries = v.list_pools()\n if name in entries:\n if v.status(name) is not 'inactive':\n res['changed'] = True\n res['msg'] = v.destroy(name)\n elif state in [ 'undefined', 'absent' ]:\n entries = v.list_pools()\n if name in entries:\n if v.status(name) is not 'inactive':\n v.destroy(name)\n res['changed'] = True\n res['msg'] = v.undefine(name)\n elif state in [ 'deleted' ]:\n entries = v.list_pools()\n if name in entries:\n if v.status(name) is not 'inactive':\n v.destroy(name)\n v.delete(name, mode)\n res['changed'] = True\n res['msg'] = v.undefine(name)\n else:\n module.fail_json(msg=\"unexpected state\")\n\n return VIRT_SUCCESS, res\n\n if command:\n if command in ENTRY_COMMANDS:\n if not name:\n module.fail_json(msg = \"%s requires 1 argument: name\" % command)\n if command == 'define':\n if not xml:\n module.fail_json(msg = \"define requires xml argument\")\n try:\n v.get_pool(name)\n except EntryNotFound:\n v.define(name, xml)\n res = {'changed': True, 'created': name}\n return VIRT_SUCCESS, res\n elif command == 'build':\n res = v.build(name, mode)\n if not isinstance(res, dict):\n res = { 'changed': True, command: res }\n return VIRT_SUCCESS, res\n elif command == 'delete':\n res = v.delete(name, mode)\n if not isinstance(res, dict):\n res = { 'changed': True, command: res }\n return VIRT_SUCCESS, res\n res = getattr(v, command)(name)\n if not isinstance(res, dict):\n res = { command: res }\n return VIRT_SUCCESS, res\n\n elif hasattr(v, command):\n res = getattr(v, command)()\n if not isinstance(res, dict):\n res = { command: res }\n return VIRT_SUCCESS, res\n\n else:\n module.fail_json(msg=\"Command %s not recognized\" % command)\n\n if autostart is not None:\n if not name:\n module.fail_json(msg = \"state change requires a specified name\")\n\n res['changed'] = False\n if autostart:\n if not v.get_autostart(name):\n res['changed'] = True\n res['msg'] = v.set_autostart(name, True)\n else:\n if v.get_autostart(name):\n res['changed'] = True\n res['msg'] = v.set_autostart(name, False)\n\n return VIRT_SUCCESS, res\n\n module.fail_json(msg=\"expected state or command parameter to be specified\")\n\n\ndef main():\n\n module = AnsibleModule (\n argument_spec = dict(\n name = dict(aliases=['pool']),\n state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),\n command = dict(choices=ALL_COMMANDS),\n uri = dict(default='qemu:///system'),\n xml = dict(),\n autostart = dict(type='bool'),\n mode = dict(choices=ALL_MODES),\n ),\n supports_check_mode = True\n )\n\n if not HAS_VIRT:\n module.fail_json(\n msg='The `libvirt` module is not importable. Check the requirements.'\n )\n\n if not HAS_XML:\n module.fail_json(\n msg='The `lxml` module is not importable. Check the requirements.'\n )\n\n rc = VIRT_SUCCESS\n try:\n rc, result = core(module)\n except Exception as e:\n module.fail_json(msg=str(e))\n\n if rc != 0: # something went wrong emit the msg\n module.fail_json(rc=rc, msg=result)\n else:\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476182,"cells":{"repo_name":{"kind":"string","value":"gregdek/ansible"},"path":{"kind":"string","value":"lib/ansible/plugins/lookup/vars.py"},"copies":{"kind":"string","value":"55"},"size":{"kind":"string","value":"3004"},"content":{"kind":"string","value":"# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n lookup: vars\n author: Ansible Core\n version_added: \"2.5\"\n short_description: Lookup templated value of variables\n description:\n - Retrieves the value of an Ansible variable.\n options:\n _terms:\n description: The variable names to look up.\n required: True\n default:\n description:\n - What to return if a variable is undefined.\n - If no default is set, it will result in an error if any of the variables is undefined.\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Show value of 'variablename'\n debug: msg=\"{{ lookup('vars', 'variabl' + myvar)}}\"\n vars:\n variablename: hello\n myvar: ename\n\n- name: Show default empty since i dont have 'variablnotename'\n debug: msg=\"{{ lookup('vars', 'variabl' + myvar, default='')}}\"\n vars:\n variablename: hello\n myvar: notename\n\n- name: Produce an error since i dont have 'variablnotename'\n debug: msg=\"{{ lookup('vars', 'variabl' + myvar)}}\"\n ignore_errors: True\n vars:\n variablename: hello\n myvar: notename\n\n- name: find several related variables\n debug: msg=\"{{ lookup('vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}\"\n\n- name: alternate way to find some 'prefixed vars' in loop\n debug: msg=\"{{ lookup('vars', 'ansible_play_' + item) }}\"\n loop:\n - hosts\n - batch\n - hosts_all\n\"\"\"\n\nRETURN = \"\"\"\n_value:\n description:\n - value of the variables requested.\n\"\"\"\n\nfrom ansible.errors import AnsibleError, AnsibleUndefinedVariable\nfrom ansible.module_utils.six import string_types\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n if variables is not None:\n self._templar.set_available_variables(variables)\n myvars = getattr(self._templar, '_available_variables', {})\n\n self.set_options(direct=kwargs)\n default = self.get_option('default')\n\n ret = []\n for term in terms:\n if not isinstance(term, string_types):\n raise AnsibleError('Invalid setting identifier, \"%s\" is not a string, its a %s' % (term, type(term)))\n\n try:\n try:\n value = myvars[term]\n except KeyError:\n try:\n value = myvars['hostvars'][myvars['inventory_hostname']][term]\n except KeyError:\n raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)\n\n ret.append(self._templar.template(value, fail_on_undefined=True))\n except AnsibleUndefinedVariable:\n if default is not None:\n ret.append(default)\n else:\n raise\n\n return ret\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476183,"cells":{"repo_name":{"kind":"string","value":"roselleebarle04/django"},"path":{"kind":"string","value":"django/conf/locale/el/formats.py"},"copies":{"kind":"string","value":"446"},"size":{"kind":"string","value":"1477"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n# This file is distributed under the same license as the Django package.\n#\nfrom __future__ import unicode_literals\n\n# The *_FORMAT strings use the Django date format syntax,\n# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT = 'd/m/Y'\nTIME_FORMAT = 'P'\nDATETIME_FORMAT = 'd/m/Y P'\nYEAR_MONTH_FORMAT = 'F Y'\nMONTH_DAY_FORMAT = 'j F'\nSHORT_DATE_FORMAT = 'd/m/Y'\nSHORT_DATETIME_FORMAT = 'd/m/Y P'\nFIRST_DAY_OF_WEEK = 0 # Sunday\n\n# The *_INPUT_FORMATS strings use the Python strftime format syntax,\n# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior\nDATE_INPUT_FORMATS = [\n '%d/%m/%Y', '%d/%m/%y', '%Y-%m-%d', # '25/10/2006', '25/10/06', '2006-10-25',\n]\nDATETIME_INPUT_FORMATS = [\n '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'\n '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'\n '%d/%m/%Y %H:%M', # '25/10/2006 14:30'\n '%d/%m/%Y', # '25/10/2006'\n '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'\n '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'\n '%d/%m/%y %H:%M', # '25/10/06 14:30'\n '%d/%m/%y', # '25/10/06'\n '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'\n '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'\n '%Y-%m-%d %H:%M', # '2006-10-25 14:30'\n '%Y-%m-%d', # '2006-10-25'\n]\nDECIMAL_SEPARATOR = ','\nTHOUSAND_SEPARATOR = '\\xa0' # non-breaking space\nNUMBER_GROUPING = 3\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476184,"cells":{"repo_name":{"kind":"string","value":"CSC301H-Fall2013/JuakStore"},"path":{"kind":"string","value":"site-packages/django/db/utils.py"},"copies":{"kind":"string","value":"100"},"size":{"kind":"string","value":"6204"},"content":{"kind":"string","value":"import os\nimport pkgutil\nfrom threading import local\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.importlib import import_module\nfrom django.utils._os import upath\nfrom django.utils import six\n\n\nDEFAULT_DB_ALIAS = 'default'\n\n# Define some exceptions that mirror the PEP249 interface.\n# We will rethrow any backend-specific errors using these\n# common wrappers\nclass DatabaseError(Exception):\n pass\n\nclass IntegrityError(DatabaseError):\n pass\n\n\ndef load_backend(backend_name):\n # Look for a fully qualified database backend name\n try:\n return import_module('.base', backend_name)\n except ImportError as e_user:\n # The database backend wasn't found. Display a helpful error message\n # listing all possible (built-in) database backends.\n backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')\n try:\n builtin_backends = [\n name for _, name, ispkg in pkgutil.iter_modules([backend_dir])\n if ispkg and name != 'dummy']\n except EnvironmentError:\n builtin_backends = []\n if backend_name not in ['django.db.backends.%s' % b for b in\n builtin_backends]:\n backend_reprs = map(repr, sorted(builtin_backends))\n error_msg = (\"%r isn't an available database backend.\\n\"\n \"Try using 'django.db.backends.XXX', where XXX \"\n \"is one of:\\n %s\\nError was: %s\" %\n (backend_name, \", \".join(backend_reprs), e_user))\n raise ImproperlyConfigured(error_msg)\n else:\n # If there's some other error, this must be an error in Django\n raise\n\n\nclass ConnectionDoesNotExist(Exception):\n pass\n\n\nclass ConnectionHandler(object):\n def __init__(self, databases):\n if not databases:\n self.databases = {\n DEFAULT_DB_ALIAS: {\n 'ENGINE': 'django.db.backends.dummy',\n },\n }\n else:\n self.databases = databases\n self._connections = local()\n\n def ensure_defaults(self, alias):\n \"\"\"\n Puts the defaults into the settings dictionary for a given connection\n where no settings is provided.\n \"\"\"\n try:\n conn = self.databases[alias]\n except KeyError:\n raise ConnectionDoesNotExist(\"The connection %s doesn't exist\" % alias)\n\n conn.setdefault('ENGINE', 'django.db.backends.dummy')\n if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:\n conn['ENGINE'] = 'django.db.backends.dummy'\n conn.setdefault('OPTIONS', {})\n conn.setdefault('TIME_ZONE', 'UTC' if settings.USE_TZ else settings.TIME_ZONE)\n for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:\n conn.setdefault(setting, '')\n for setting in ['TEST_CHARSET', 'TEST_COLLATION', 'TEST_NAME', 'TEST_MIRROR']:\n conn.setdefault(setting, None)\n\n def __getitem__(self, alias):\n if hasattr(self._connections, alias):\n return getattr(self._connections, alias)\n\n self.ensure_defaults(alias)\n db = self.databases[alias]\n backend = load_backend(db['ENGINE'])\n conn = backend.DatabaseWrapper(db, alias)\n setattr(self._connections, alias, conn)\n return conn\n\n def __setitem__(self, key, value):\n setattr(self._connections, key, value)\n\n def __iter__(self):\n return iter(self.databases)\n\n def all(self):\n return [self[alias] for alias in self]\n\n\nclass ConnectionRouter(object):\n def __init__(self, routers):\n self.routers = []\n for r in routers:\n if isinstance(r, six.string_types):\n try:\n module_name, klass_name = r.rsplit('.', 1)\n module = import_module(module_name)\n except ImportError as e:\n raise ImproperlyConfigured('Error importing database router %s: \"%s\"' % (klass_name, e))\n try:\n router_class = getattr(module, klass_name)\n except AttributeError:\n raise ImproperlyConfigured('Module \"%s\" does not define a database router name \"%s\"' % (module, klass_name))\n else:\n router = router_class()\n else:\n router = r\n self.routers.append(router)\n\n def _router_func(action):\n def _route_db(self, model, **hints):\n chosen_db = None\n for router in self.routers:\n try:\n method = getattr(router, action)\n except AttributeError:\n # If the router doesn't have a method, skip to the next one.\n pass\n else:\n chosen_db = method(model, **hints)\n if chosen_db:\n return chosen_db\n try:\n return hints['instance']._state.db or DEFAULT_DB_ALIAS\n except KeyError:\n return DEFAULT_DB_ALIAS\n return _route_db\n\n db_for_read = _router_func('db_for_read')\n db_for_write = _router_func('db_for_write')\n\n def allow_relation(self, obj1, obj2, **hints):\n for router in self.routers:\n try:\n method = router.allow_relation\n except AttributeError:\n # If the router doesn't have a method, skip to the next one.\n pass\n else:\n allow = method(obj1, obj2, **hints)\n if allow is not None:\n return allow\n return obj1._state.db == obj2._state.db\n\n def allow_syncdb(self, db, model):\n for router in self.routers:\n try:\n method = router.allow_syncdb\n except AttributeError:\n # If the router doesn't have a method, skip to the next one.\n pass\n else:\n allow = method(db, model)\n if allow is not None:\n return allow\n return True\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":476185,"cells":{"repo_name":{"kind":"string","value":"itucsdb1611/itucsdb1611"},"path":{"kind":"string","value":"classes/operations/project_operations.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4444"},"content":{"kind":"string","value":"\n\nimport psycopg2 as dbapi2\nimport datetime\nfrom classes.model_config import dsn\n\nclass project_operations:\n def __init__(self):\n self.last_key = None\n\n\n def add_project(self, Project):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"INSERT INTO Project(Name, Description, ProjectTypeId, ProjectThesisTypeId, DepartmentId, ProjectStatusTypeId, StartDate, EndDate, MemberLimit, CreatedByPersonId, ProjectManagerId, Deleted) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, False )\",\n (Project.title, Project.project_description, Project.project_type, Project.project_thesis_type,\n Project.department, Project.project_status_type, Project.start_date, Project.end_date,\n Project.member_limit, Project.created_by, Project.manager))\n connection.commit()\n self.last_key = cursor.lastrowid\n\n def delete_project(self, key):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\"\"\"DELETE FROM Project WHERE (ObjectId=%s)\"\"\", (key,))\n connection.commit()\n\n def update_project(self, key, title, project_description, end_date, member_limit, manager, deleted):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"UPDATE Project SET Name = %s, Description = %s, EndDate = %s, MemberLimit = %s, ProjectManagerId = %s, Deleted = %s WHERE (ObjectId=%s)\"\"\",\n (title, project_description, end_date, member_limit, manager, deleted, key))\n connection.commit()\n\n def get_project(self, key):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n query = \"\"\"SELECT Project.Name, Project.Description, ProjectType.Name, Department.Name, ProjectStatusType.Name, Person.FirstName, Person.LastName, Project.ObjectId, Project.CreatedByPersonId, Project.EndDate, Project.MemberLimit FROM Project\n JOIN ProjectType ON(Project.ProjectTypeId=ProjectType.ObjectId)\n JOIN Department ON(Project.DepartmentId = Department.ObjectId)\n JOIN ProjectStatusType ON(Project.ProjectStatusTypeId=ProjectStatusType.ObjectId)\n JOIN Person ON(Project.CreatedByPersonId=Person.ObjectId)\n WHERE (Project.ObjectID = %s)\"\"\"\n cursor.execute(query, (key,))\n project = cursor.fetchone()\n connection.commit()\n return project\n\n def get_projects(self):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\"\"\"SELECT Project.ObjectId, Project.Name, Description, Department.Name, Person.FirstName, Person.LastName\n FROM Project JOIN Department ON(Project.DepartmentId = Department.ObjectId) JOIN Person ON(Person.ObjectId = Project.ProjectManagerId)\"\"\")\n projects = cursor.fetchall()\n connection.commit()\n return projects\n\n def get_project_member_limit(self, key):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\"\"\"SELECT MemberLimit FROM Project WHERE (ObjectId=%s)\"\"\", (key,))\n projects = cursor.fetchall()\n connection.commit()\n return projects\n\n def get_the_projects_of_a_person(self, key):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n query = \"\"\"SELECT Project.Name, Project.Description, ProjectType.Name, Project.ObjectId FROM Project\n JOIN ProjectType ON(Project.ProjectTypeId=ProjectType.ObjectId)\n JOIN Team ON(Project.ObjectId = Team.ProjectId)\n WHERE (Team.MemberId = %s)\"\"\"\n cursor.execute(query, (key,))\n project_ids = cursor.fetchall()\n connection.commit()\n return project_ids\n\n\n def get_last(self):\n with dbapi2.connect(dsn) as connection:\n cursor = connection.cursor()\n cursor.execute(\"\"\"SELECT ObjectId FROM Project Order By ObjectId Desc LIMIT 1\"\"\")\n projectId = cursor.fetchone()\n connection.commit()\n return projectId\n\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476186,"cells":{"repo_name":{"kind":"string","value":"andensinlimite/metaespacio"},"path":{"kind":"string","value":"metaespacio/metaespacio/settings.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"4507"},"content":{"kind":"string","value":"\"\"\"\nDjango settings for metaespacio project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '57#f7u+v@yh*vwv^ox#%*wgx6c@_a*%8#)0@1f6#dt=oar4u$f'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = (os.getuid() >= 1000)\nPRODUCCION = False\nTEMPLATE_DEBUG = DEBUG\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django_extensions',\n 'oidc_provider',\n 'crispy_forms',\n 'common',\n 'registro',\n 'espacios',\n 'pages',\n 'taquilla',\n 'encuestas',\n # 'bibliotheca', # necesita actualizar a 1.8\n # 'tastypie', # necesita actualizar a 1.8\n 'django.contrib.admin', # al final por un override de templates\n 'cuotas',\n 'graphos',\n 'contabilidad',\n 'adjuntos',\n 'caronte',\n 'rest',\n 'rest_framework',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nfrom django.conf import global_settings\nTEMPLATE_CONTEXT_PROCESSORS = \\\n global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n 'common.context_processor.site',\n )\n\nROOT_URLCONF = 'metaespacio.urls'\n\nWSGI_APPLICATION = 'metaespacio.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-es'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# SITE_ID = 1\n\nTEMPLATE_LOADERS = ('django.template.loaders.app_directories.Loader', )\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n 'include_html': True,\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n }\n}\n\n# cosas custom\nMEDIA_URL = \"/media/\"\nSTATIC_URL = '/static/'\nLOGIN_REDIRECT_URL = \"/\"\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\nSITE_URL = 'http://metaespacio.org'\nLOGIN_URL = '/accounts/login'\nOIDC_RSA_KEY_FOLDER = BASE_DIR\nDEFAULT_FROM_EMAIL = 'cambiame@que.soy.util'\n\ntry:\n from .settings_local import * # noqa\nexcept ImportError:\n pass\n\nif DEBUG:\n # static en desarrollo en carpeta del proyecto\n STATIC_ROOT = os.path.join(BASE_DIR, '.static')\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n # en desarrollo no se usa\n ALLOWED_HOSTS = []\n INSTALLED_APPS += ('debug_toolbar', )\n MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )\n\nif PRODUCCION:\n # FIXME Esto revisarlo porque tampoco lo estamos poniendo aqui exactamente\n STATIC_ROOT = '/var/www/metaespacio/static/'\n MEDIA_ROOT = '/opt/metaespacio/media/'\n # en preproduccion o produccion si se usa\n ALLOWED_HOSTS = ['*']\nelse:\n # errores por consola\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 10\n}\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":476187,"cells":{"repo_name":{"kind":"string","value":"geekboxzone/lollipop_external_chromium_org"},"path":{"kind":"string","value":"third_party/markdown/extensions/codehilite.py"},"copies":{"kind":"string","value":"109"},"size":{"kind":"string","value":"10820"},"content":{"kind":"string","value":"# markdown is released under the BSD license\n# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)\n# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)\n# Copyright 2004 Manfred Stienstra (the original version)\n# \n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\n\"\"\"\nCodeHilite Extension for Python-Markdown\n========================================\n\nAdds code/syntax highlighting to standard Python-Markdown code blocks.\n\nCopyright 2006-2008 [Waylan Limberg](http://achinghead.com/).\n\nProject website: \nContact: markdown@freewisdom.org\n\nLicense: BSD (see ../LICENSE.md for details)\n\nDependencies:\n* [Python 2.3+](http://python.org/)\n* [Markdown 2.0+](http://packages.python.org/Markdown/)\n* [Pygments](http://pygments.org/)\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom . import Extension\nfrom ..treeprocessors import Treeprocessor\nimport warnings\ntry:\n from pygments import highlight\n from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer\n from pygments.formatters import HtmlFormatter\n pygments = True\nexcept ImportError:\n pygments = False\n\n# ------------------ The Main CodeHilite Class ----------------------\nclass CodeHilite(object):\n \"\"\"\n Determine language of source code, and pass it into the pygments hilighter.\n\n Basic Usage:\n >>> code = CodeHilite(src = 'some text')\n >>> html = code.hilite()\n\n * src: Source string or any object with a .readline attribute.\n\n * linenums: (Boolean) Set line numbering to 'on' (True), 'off' (False) or 'auto'(None). \n Set to 'auto' by default.\n\n * guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default).\n\n * css_class: Set class name of wrapper div ('codehilite' by default).\n\n Low Level Usage:\n >>> code = CodeHilite()\n >>> code.src = 'some text' # String or anything with a .readline attr.\n >>> code.linenos = True # True or False; Turns line numbering on or of.\n >>> html = code.hilite()\n\n \"\"\"\n\n def __init__(self, src=None, linenums=None, guess_lang=True,\n css_class=\"codehilite\", lang=None, style='default',\n noclasses=False, tab_length=4):\n self.src = src\n self.lang = lang\n self.linenums = linenums\n self.guess_lang = guess_lang\n self.css_class = css_class\n self.style = style\n self.noclasses = noclasses\n self.tab_length = tab_length\n\n def hilite(self):\n \"\"\"\n Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with\n optional line numbers. The output should then be styled with css to\n your liking. No styles are applied by default - only styling hooks\n (i.e.: ).\n\n returns : A string of html.\n\n \"\"\"\n\n self.src = self.src.strip('\\n')\n\n if self.lang is None:\n self._getLang()\n\n if pygments:\n try:\n lexer = get_lexer_by_name(self.lang)\n except ValueError:\n try:\n if self.guess_lang:\n lexer = guess_lexer(self.src)\n else:\n lexer = TextLexer()\n except ValueError:\n lexer = TextLexer()\n formatter = HtmlFormatter(linenos=self.linenums,\n cssclass=self.css_class,\n style=self.style,\n noclasses=self.noclasses)\n return highlight(self.src, lexer, formatter)\n else:\n # just escape and build markup usable by JS highlighting libs\n txt = self.src.replace('&', '&amp;')\n txt = txt.replace('<', '&lt;')\n txt = txt.replace('>', '&gt;')\n txt = txt.replace('\"', '&quot;')\n classes = []\n if self.lang:\n classes.append('language-%s' % self.lang)\n if self.linenums:\n classes.append('linenums')\n class_str = ''\n if classes:\n class_str = ' class=\"%s\"' % ' '.join(classes) \n return '
    %s
    \\n'% \\\n (self.css_class, class_str, txt)\n\n def _getLang(self):\n \"\"\"\n Determines language of a code block from shebang line and whether said\n line should be removed or left in place. If the sheband line contains a\n path (even a single /) then it is assumed to be a real shebang line and\n left alone. However, if no path is given (e.i.: #!python or :::python)\n then it is assumed to be a mock shebang for language identifitation of a\n code fragment and removed from the code block prior to processing for\n code highlighting. When a mock shebang (e.i: #!python) is found, line\n numbering is turned on. When colons are found in place of a shebang\n (e.i.: :::python), line numbering is left in the current state - off\n by default.\n\n \"\"\"\n\n import re\n\n #split text into lines\n lines = self.src.split(\"\\n\")\n #pull first line to examine\n fl = lines.pop(0)\n\n c = re.compile(r'''\n (?:(?:^::+)|(?P^[#]!))\t# Shebang or 2 or more colons.\n (?P(?:/\\w+)*[/ ])? # Zero or 1 path\n (?P[\\w+-]*) # The language\n ''', re.VERBOSE)\n # search first line for shebang\n m = c.search(fl)\n if m:\n # we have a match\n try:\n self.lang = m.group('lang').lower()\n except IndexError:\n self.lang = None\n if m.group('path'):\n # path exists - restore first line\n lines.insert(0, fl)\n if self.linenums is None and m.group('shebang'):\n # Overridable and Shebang exists - use line numbers\n self.linenums = True\n else:\n # No match\n lines.insert(0, fl)\n\n self.src = \"\\n\".join(lines).strip(\"\\n\")\n\n\n\n# ------------------ The Markdown Extension -------------------------------\nclass HiliteTreeprocessor(Treeprocessor):\n \"\"\" Hilight source code in code blocks. \"\"\"\n\n def run(self, root):\n \"\"\" Find code blocks and store in htmlStash. \"\"\"\n blocks = root.getiterator('pre')\n for block in blocks:\n children = block.getchildren()\n if len(children) == 1 and children[0].tag == 'code':\n code = CodeHilite(children[0].text,\n linenums=self.config['linenums'],\n guess_lang=self.config['guess_lang'],\n css_class=self.config['css_class'],\n style=self.config['pygments_style'],\n noclasses=self.config['noclasses'],\n tab_length=self.markdown.tab_length)\n placeholder = self.markdown.htmlStash.store(code.hilite(),\n safe=True)\n # Clear codeblock in etree instance\n block.clear()\n # Change to p element which will later\n # be removed when inserting raw html\n block.tag = 'p'\n block.text = placeholder\n\n\nclass CodeHiliteExtension(Extension):\n \"\"\" Add source code hilighting to markdown codeblocks. \"\"\"\n\n def __init__(self, configs):\n # define default configs\n self.config = {\n 'linenums': [None, \"Use lines numbers. True=yes, False=no, None=auto\"],\n 'force_linenos' : [False, \"Depreciated! Use 'linenums' instead. Force line numbers - Default: False\"],\n 'guess_lang' : [True, \"Automatic language detection - Default: True\"],\n 'css_class' : [\"codehilite\",\n \"Set class name for wrapper
    - Default: codehilite\"],\n 'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'],\n 'noclasses': [False, 'Use inline styles instead of CSS classes - Default false']\n }\n\n # Override defaults with user settings\n for key, value in configs:\n # convert strings to booleans\n if value == 'True': value = True\n if value == 'False': value = False\n if value == 'None': value = None\n\n if key == 'force_linenos':\n warnings.warn('The \"force_linenos\" config setting'\n ' to the CodeHilite extension is deprecrecated.'\n ' Use \"linenums\" instead.', PendingDeprecationWarning)\n if value:\n # Carry 'force_linenos' over to new 'linenos'.\n self.setConfig('linenums', True)\n\n self.setConfig(key, value)\n\n def extendMarkdown(self, md, md_globals):\n \"\"\" Add HilitePostprocessor to Markdown instance. \"\"\"\n hiliter = HiliteTreeprocessor(md)\n hiliter.config = self.getConfigs()\n md.treeprocessors.add(\"hilite\", hiliter, \"\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\"\"\"\nThis module contains class that could be useful in various parts of the\nprogram\n\"\"\"\n\nimport sys\nimport copy\nimport Queue\nimport threading\n\nimport StringIO\nimport traceback\n\nfrom HTMLParser import HTMLParser\n\nfrom umit.pm.core.logger import log\n\ntry:\n from collections import defaultdict\nexcept ImportError:\n class defaultdict(dict):\n def __init__(self, default_factory=None, *a, **kw):\n if (default_factory is not None and\n not hasattr(default_factory, '__call__')):\n raise TypeError('first argument must be callable')\n dict.__init__(self, *a, **kw)\n self.default_factory = default_factory\n def __getitem__(self, key):\n if key in self:\n return dict.__getitem__(self, key)\n else:\n return self.__missing__(key)\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n self[key] = value = self.default_factory()\n return value\n def __reduce__(self):\n if self.default_factory is None:\n args = tuple()\n else:\n args = self.default_factory,\n return type(self), args, None, None, self.iteritems()\n def copy(self):\n return self.__copy__()\n def __copy__(self):\n return type(self)(self.default_factory, self)\n def __deepcopy__(self, memo):\n import copy\n return type(self)(self.default_factory,\n copy.deepcopy(self.items()))\n def __repr__(self):\n return 'defaultdict(%s, %s)' % (self.default_factory,\n dict.__repr__(self))\n\n# Ordered dict python implementation\n\nclass odict(dict):\n\n def __init__(self, d={}):\n self._keys = d.keys()\n dict.__init__(self, d)\n\n def __delitem__(self, key):\n dict.__delitem__(self, key)\n self._keys.remove(key)\n\n def __setitem__(self, key, item):\n dict.__setitem__(self, key, item)\n # a peculiar sharp edge from copy.deepcopy\n # we'll have our set item called without __init__\n if not hasattr(self, '_keys'):\n self._keys = [key,]\n if key not in self._keys:\n self._keys.append(key)\n\n def clear(self):\n dict.clear(self)\n self._keys = []\n\n def items(self):\n items = []\n for i in self._keys:\n items.append(i, self[i])\n return items\n\n def keys(self):\n return self._keys\n\n def popitem(self):\n if len(self._keys) == 0:\n raise KeyError('dictionary is empty')\n else:\n key = self._keys[-1]\n val = self[key]\n del self[key]\n return key, val\n\n def setdefault(self, key, failobj = None):\n dict.setdefault(self, key, failobj)\n if key not in self._keys:\n self._keys.append(key)\n\n def update(self, d):\n for key in d.keys():\n if not self.has_key(key):\n self._keys.append(key)\n dict.update(self, d)\n\n def values(self):\n v = []\n for i in self._keys:\n v.append(self[i])\n return v\n\n def move(self, key, index):\n\n \"\"\" Move the specified to key to *before* the specified index. \"\"\"\n\n try:\n cur = self._keys.index(key)\n except ValueError:\n raise KeyError(key)\n self._keys.insert(index, key)\n # this may have shifted the position of cur, if it is after index\n if cur >= index: cur = cur + 1\n del self._keys[cur]\n\n def index(self, key):\n if not self.has_key(key):\n raise KeyError(key)\n return self._keys.index(key)\n\n def __iter__(self):\n for k in self._keys:\n yield k\n\n# Simple decorator for compatibility with python 2.4 (with statement)\ndef with_decorator(func):\n def proxy(self, *args, **kwargs):\n self.lock.acquire()\n\n try:\n return func(self, *args, **kwargs)\n finally:\n self.lock.release()\n\n proxy.__name__ = func.__name__\n proxy.__dict__ = func.__dict__\n proxy.__doc__ = func.__doc__\n\n return proxy\n\ndef generate_traceback():\n fp = StringIO.StringIO()\n traceback.print_exc(file=fp)\n return fp.getvalue()\n\nclass Node(object):\n \"\"\"\n A simple Node class to create Binary tree.\n To create a tree simply do tree = Node()\n \"\"\"\n\n def __init__(self, data=None, children=[]):\n \"\"\"\n Initialize a Node object\n @param data the data for the Node or None if you are constructing\n a Tree object\n @param children a list of Node objects\n \"\"\"\n\n self.data = data\n self.root = None\n self.children = []\n\n for child in children:\n self.append_node(child)\n\n def append_node(self, node):\n \"\"\"\n Append a child node\n @param node a Node object\n \"\"\"\n\n assert (isinstance(node, Node))\n\n node.root = self\n self.children.append(node)\n\n def __iter__(self):\n if self.data:\n yield self\n\n for child in self.children:\n for c in child:\n yield c\n\n def __repr__(self):\n if self.root != None:\n return \"%sChild -> %s (%d)\" % (\" \" * self.get_depth(), self.data,\n len(self.children))\n else:\n return \"Tree %s\" % object.__repr__(self)\n\n def get_depth(self):\n idx = 0\n root = self.root\n\n while root:\n root = root.root\n idx += 1\n\n return idx\n\n def __len__(self):\n tot = 0\n for node in self.children:\n tot += len(node)\n\n if self.data:\n tot += 1\n\n return tot\n\n def get_parent(self):\n return self.root\n\n def get_data(self):\n return self.data\n\n def get_children(self):\n for node in self.children:\n yield node\n\n def is_parent(self):\n return self.children != []\n\n def __getitem__(self, x):\n return self.children[x]\n\n def find(self, value):\n for i in self:\n if value == i.data:\n return i.get_path()\n\n return None\n\n def get_path(self):\n path = []\n\n find = self\n root = self.root\n\n while root:\n path.append(root.index(find))\n\n root = root.root\n find = find.root\n\n path.reverse()\n return tuple(path)\n\n def get_next_of(self, node):\n try:\n return self[self.index(node) + 1]\n except:\n return None\n\n def index(self, node):\n return self.children.index(node)\n\n def get_from_path(self, path):\n root = self\n\n for idx in path:\n root = root[idx]\n\n return root\n\n def sort(self):\n for node in self.children:\n node.sort()\n\n self.children.sort()\n\n def __cmp__(self, node):\n if not self:\n return 1\n if not node:\n return -1\n return cmp(self.data, node.data)\n\nWorkerStop = object()\n\nclass ThreadPool(object):\n MIN_THREADS = 5\n MAX_THREADS = 20\n IS_DAEMON = True\n\n started = False\n joined = False\n workers = 0\n\n def __init__(self, minthreads=5, maxthreads=20):\n assert minthreads >= 0\n assert minthreads <= maxthreads\n\n self.queue = Queue.Queue(0)\n self.min = minthreads\n self.max = maxthreads\n\n self.waiters = []\n self.threads = []\n self.working = []\n\n def queue_work(self, callback, errback, func, *args, **kwargs):\n if self.joined:\n return\n\n obj = (callback, errback, func, args, kwargs)\n self.queue.put(obj)\n\n if self.started:\n self.resize()\n\n def start(self):\n self.joined = False\n self.started = True\n\n self.resize()\n\n def stop(self):\n self.joined = True\n threads = copy.copy(self.threads)\n\n while self.workers:\n self.queue.put(WorkerStop)\n self.workers -= 1\n\n def join_threads(self):\n # check out for exceptions on already joined\n # threads.\n\n threads = copy.copy(self.threads)\n\n for thread in threads:\n thread.join()\n\n def resize(self, minthreads=None, maxthreads=None):\n minthreads = max(minthreads, self.MIN_THREADS)\n maxthreads = max(minthreads, self.MAX_THREADS)\n\n assert minthreads >= 0\n assert minthreads <= maxthreads\n\n self.min = minthreads\n self.max = maxthreads\n\n if not self.started:\n return\n\n while self.workers > self.max:\n self.stop_worker()\n\n while self.workers < self.min:\n self.start_worker()\n\n self.start_needed_workers()\n\n def start_needed_workers(self):\n size = self.queue.qsize() + len(self.working)\n\n while self.workers < min(self.max, size):\n self.start_worker()\n\n def start_worker(self):\n self.workers += 1\n thread = threading.Thread(target=self._worker)\n thread.setDaemon(self.IS_DAEMON)\n\n self.threads.append(thread)\n thread.start()\n\n def stop_worker(self):\n self.queue.put(WorkerStop)\n self.workers -= 1\n\n def _worker(self):\n ct = threading.currentThread()\n obj = self.queue.get()\n\n while obj is not WorkerStop:\n self.working.append(ct)\n\n (callback, errback, func, args, kw) = obj\n\n try:\n try:\n result = func(*args, **kw)\n except Exception, exc:\n log.error(\"Handling exception %s Traceback:\" % exc)\n log.error(generate_traceback())\n\n if errback is not None:\n errback(sys.exc_info()[1])\n else:\n if callback is not None:\n callback(result)\n except Exception, err:\n log.critical(\"Thread exceptions ignored. Traceback:\")\n log.critical(generate_traceback())\n\n self.working.remove(ct)\n\n self.waiters.append(ct)\n\n obj = self.queue.get()\n self.waiters.remove(ct)\n\n self.threads.remove(ct)\n\nclass Interruptable:\n \"\"\"\n Interruptable interface\n \"\"\"\n\n def start(self):\n raise Exception(\"Implement me\")\n def terminate(self):\n raise Exception(\"Implement me\")\n def isAlive(self):\n raise Exception(\"Implement me\")\n\nclass Singleton(object):\n \"\"\"\n A class for singleton pattern\n Support also gobject if Singleton base subclass if specified first\n \"\"\"\n\n instances = {}\n def __new__(cls, *args, **kwargs):\n from gobject import GObject\n\n if Singleton.instances.get(cls) is None:\n cls.__original_init__ = cls.__init__\n if issubclass(cls, GObject):\n Singleton.instances[cls] = GObject.__new__(cls)\n else:\n Singleton.instances[cls] = object.__new__(cls, *args, **kwargs)\n elif cls.__init__ == cls.__original_init__:\n def nothing(*args, **kwargs):\n pass\n cls.__init__ = nothing\n return Singleton.instances[cls]\n\nclass HTMLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_stripped_data(self):\n return ''.join(self.fed)\n\ndef strip_tags(x):\n s = HTMLStripper()\n s.feed(x)\n return s.get_stripped_data()\n\n__all__ = ['strip_tags', 'Singleton', 'Interruptable', 'ThreadPool', 'Node', \\\n 'generate_traceback', 'with_decorator', 'defaultdict', 'odict']\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476189,"cells":{"repo_name":{"kind":"string","value":"agbell/karaka"},"path":{"kind":"string","value":"karaka/api/apiconfig.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"1689"},"content":{"kind":"string","value":"#\n# Karaka Skype-XMPP Gateway: API configuration handler\n# \n#\n# Copyright (C) 2008-2009 Vipadia Limited\n# Richard Mortier \n# Neil Stratford \n#\n\n## This program is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License version\n## 2 as published by the Free Software Foundation.\n\n## This program is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License version 2 for more details.\n\n## You should have received a copy of the GNU General Public License\n## version 2 along with this program; if not, write to the Free\n## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n## MA 02110-1301, USA.\n\nimport ConfigParser\nFILENAME = '/etc/karaka-api.conf'\n\nclass APIConfig:\n def __init__(self):\n self.config = ConfigParser.ConfigParser()\n self.config.read(FILENAME)\n\n self.sql_server = self.get(\"mysql\", \"server\")\n self.sql_database = self.get(\"mysql\", \"database\")\n self.sql_user = self.get(\"mysql\", \"user\")\n self.sql_password = self.get(\"mysql\", \"password\")\n self.marketing_message = self.get(\"default\",\"mood\")\n\n self.complete = True\n\n def get(self, section, option):\n if self.config.has_option(section, option):\n return self.config.get(section, option)\n else:\n print \"No option \" + option + \" in section \" + section + \" in \" + FILENAME\n self.complete = False\n return \"\"\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476190,"cells":{"repo_name":{"kind":"string","value":"Multirom-mi4i/android_kernel_xiaomi_ferrari"},"path":{"kind":"string","value":"tools/perf/scripts/python/net_dropmonitor.py"},"copies":{"kind":"string","value":"2669"},"size":{"kind":"string","value":"1738"},"content":{"kind":"string","value":"# Monitor the system for dropped packets and proudce a report of drop locations and counts\n\nimport os\nimport sys\n\nsys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n\t\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n\nfrom perf_trace_context import *\nfrom Core import *\nfrom Util import *\n\ndrop_log = {}\nkallsyms = []\n\ndef get_kallsyms_table():\n\tglobal kallsyms\n\n\ttry:\n\t\tf = open(\"/proc/kallsyms\", \"r\")\n\texcept:\n\t\treturn\n\n\tfor line in f:\n\t\tloc = int(line.split()[0], 16)\n\t\tname = line.split()[2]\n\t\tkallsyms.append((loc, name))\n\tkallsyms.sort()\n\ndef get_sym(sloc):\n\tloc = int(sloc)\n\n\t# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start\n\t# kallsyms[i][0] > loc for all end <= i < len(kallsyms)\n\tstart, end = -1, len(kallsyms)\n\twhile end != start + 1:\n\t\tpivot = (start + end) // 2\n\t\tif loc < kallsyms[pivot][0]:\n\t\t\tend = pivot\n\t\telse:\n\t\t\tstart = pivot\n\n\t# Now (start == -1 or kallsyms[start][0] <= loc)\n\t# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])\n\tif start >= 0:\n\t\tsymloc, name = kallsyms[start]\n\t\treturn (name, loc - symloc)\n\telse:\n\t\treturn (None, 0)\n\ndef print_drop_table():\n\tprint \"%25s %25s %25s\" % (\"LOCATION\", \"OFFSET\", \"COUNT\")\n\tfor i in drop_log.keys():\n\t\t(sym, off) = get_sym(i)\n\t\tif sym == None:\n\t\t\tsym = i\n\t\tprint \"%25s %25s %25s\" % (sym, off, drop_log[i])\n\n\ndef trace_begin():\n\tprint \"Starting trace (Ctrl-C to dump results)\"\n\ndef trace_end():\n\tprint \"Gathering kallsyms data\"\n\tget_kallsyms_table()\n\tprint_drop_table()\n\n# called from perf, when it finds a correspoinding event\ndef skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,\n\t\t skbaddr, location, protocol):\n\tslocation = str(location)\n\ttry:\n\t\tdrop_log[slocation] = drop_log[slocation] + 1\n\texcept:\n\t\tdrop_log[slocation] = 1\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":476191,"cells":{"repo_name":{"kind":"string","value":"sbrisard/janus"},"path":{"kind":"string","value":"examples/fftw_python_benchmark_mpi.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1237"},"content":{"kind":"string","value":"import sys\nimport time\n\nimport numpy as np\n\nimport janus.fft.parallel\n\nfrom mpi4py import MPI\n\ndef benchmark(shape, niter):\n comm = MPI.COMM_WORLD\n root = 0\n transform = janus.fft.parallel.create_real(shape, comm)\n local_sizes = comm.gather((transform.ishape[0], transform.offset0))\n\n if comm.rank == root:\n r = np.random.uniform(-1., 1., transform.shape)\n else:\n r= None\n rloc = np.empty(transform.ishape, dtype=np.float64)\n comm.Scatterv(r, rloc, root)\n cloc = np.empty(transform.oshape, dtype=np.float64)\n\n times = []\n for i in range(niter):\n t1 = time.perf_counter()\n transform.r2c(rloc, cloc)\n t2 = time.perf_counter()\n times.append(1E3 * (t2 - t1))\n\n return np.mean(times), np.std(times)\n\nif __name__ == '__main__':\n\n janus.fft.parallel.init()\n np.random.seed(20140121)\n\n params = [((128, 128, 128), 15000),\n ((256, 256, 256), 10000),\n ((512, 512, 512), 1000)]\n\n for shape, niter in params:\n mean, std = benchmark(shape, niter)\n if MPI.COMM_WORLD.rank == 0:\n args = map(str, shape + (niter, MPI.COMM_WORLD.size, mean, std))\n print(','.join(args), flush=True)\n\n MPI.Finalize()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":476192,"cells":{"repo_name":{"kind":"string","value":"snnn/tensorflow"},"path":{"kind":"string","value":"tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"8680"},"content":{"kind":"string","value":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.platform import test\n\n\nclass AssertElementShapeTest(test_base.DatasetTestBase):\n\n def test_assert_element_shape(self):\n\n def create_dataset(_):\n return (array_ops.ones(2, dtype=dtypes.float32),\n array_ops.zeros((3, 4), dtype=dtypes.int32))\n\n dataset = dataset_ops.Dataset.range(5).map(create_dataset)\n expected_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((3, 4)))\n self.assertEqual(expected_shapes, dataset.output_shapes)\n\n result = dataset.apply(batching.assert_element_shape(expected_shapes))\n self.assertEqual(expected_shapes, result.output_shapes)\n\n iterator = result.make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n for _ in range(5):\n sess.run(get_next)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def test_assert_wrong_element_shape(self):\n\n def create_dataset(_):\n return (array_ops.ones(2, dtype=dtypes.float32),\n array_ops.zeros((3, 4), dtype=dtypes.int32))\n\n dataset = dataset_ops.Dataset.range(3).map(create_dataset)\n wrong_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((3, 10)))\n with self.assertRaises(ValueError):\n dataset.apply(batching.assert_element_shape(wrong_shapes))\n\n def test_assert_element_shape_on_unknown_shape_dataset(self):\n\n def create_unknown_shape_dataset(x):\n return script_ops.py_func(\n lambda _: ( # pylint: disable=g-long-lambda\n np.ones(2, dtype=np.float32),\n np.zeros((3, 4), dtype=np.int32)),\n [x],\n [dtypes.float32, dtypes.int32])\n\n dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)\n unknown_shapes = (tensor_shape.TensorShape(None),\n tensor_shape.TensorShape(None))\n self.assertEqual(unknown_shapes, dataset.output_shapes)\n\n expected_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((3, 4)))\n result = dataset.apply(batching.assert_element_shape(expected_shapes))\n self.assertEqual(expected_shapes, result.output_shapes)\n\n iterator = result.make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n for _ in range(5):\n sess.run(get_next)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):\n\n def create_unknown_shape_dataset(x):\n return script_ops.py_func(\n lambda _: ( # pylint: disable=g-long-lambda\n np.ones(2, dtype=np.float32),\n np.zeros((3, 4), dtype=np.int32)),\n [x],\n [dtypes.float32, dtypes.int32])\n\n dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)\n unknown_shapes = (tensor_shape.TensorShape(None),\n tensor_shape.TensorShape(None))\n self.assertEqual(unknown_shapes, dataset.output_shapes)\n\n wrong_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((3, 10)))\n iterator = (\n dataset.apply(batching.assert_element_shape(wrong_shapes))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(get_next)\n\n def test_assert_partial_element_shape(self):\n\n def create_dataset(_):\n return (array_ops.ones(2, dtype=dtypes.float32),\n array_ops.zeros((3, 4), dtype=dtypes.int32))\n\n dataset = dataset_ops.Dataset.range(5).map(create_dataset)\n partial_expected_shape = (\n tensor_shape.TensorShape(None), # Unknown shape\n tensor_shape.TensorShape((None, 4))) # Partial shape\n result = dataset.apply(\n batching.assert_element_shape(partial_expected_shape))\n # Partial shapes are merged with actual shapes:\n actual_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((3, 4)))\n self.assertEqual(actual_shapes, result.output_shapes)\n\n iterator = result.make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n for _ in range(5):\n sess.run(get_next)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def test_assert_wrong_partial_element_shape(self):\n\n def create_dataset(_):\n return (array_ops.ones(2, dtype=dtypes.float32),\n array_ops.zeros((3, 4), dtype=dtypes.int32))\n\n dataset = dataset_ops.Dataset.range(3).map(create_dataset)\n wrong_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((None, 10)))\n with self.assertRaises(ValueError):\n dataset.apply(batching.assert_element_shape(wrong_shapes))\n\n def test_assert_partial_element_shape_on_unknown_shape_dataset(self):\n\n def create_unknown_shape_dataset(x):\n return script_ops.py_func(\n lambda _: ( # pylint: disable=g-long-lambda\n np.ones(2, dtype=np.float32),\n np.zeros((3, 4), dtype=np.int32)),\n [x],\n [dtypes.float32, dtypes.int32])\n\n dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)\n unknown_shapes = (tensor_shape.TensorShape(None),\n tensor_shape.TensorShape(None))\n self.assertEqual(unknown_shapes, dataset.output_shapes)\n\n expected_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((None, 4)))\n result = dataset.apply(batching.assert_element_shape(expected_shapes))\n self.assertEqual(expected_shapes, result.output_shapes)\n\n iterator = result.make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n for _ in range(5):\n sess.run(get_next)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):\n\n def create_unknown_shape_dataset(x):\n return script_ops.py_func(\n lambda _: ( # pylint: disable=g-long-lambda\n np.ones(2, dtype=np.float32),\n np.zeros((3, 4), dtype=np.int32)),\n [x],\n [dtypes.float32, dtypes.int32])\n\n dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)\n unknown_shapes = (tensor_shape.TensorShape(None),\n tensor_shape.TensorShape(None))\n self.assertEqual(unknown_shapes, dataset.output_shapes)\n\n wrong_shapes = (tensor_shape.TensorShape(2),\n tensor_shape.TensorShape((None, 10)))\n iterator = (\n dataset.apply(batching.assert_element_shape(wrong_shapes))\n .make_initializable_iterator())\n init_op = iterator.initializer\n get_next = iterator.get_next()\n with self.cached_session() as sess:\n sess.run(init_op)\n with self.assertRaises(errors.InvalidArgumentError):\n sess.run(get_next)\n\n\nif __name__ == \"__main__\":\n test.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":476193,"cells":{"repo_name":{"kind":"string","value":"alxgu/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/network/f5/bigip_device_group_member.py"},"copies":{"kind":"string","value":"38"},"size":{"kind":"string","value":"8383"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright: (c) 2017, F5 Networks Inc.\n# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['stableinterface'],\n 'supported_by': 'certified'}\n\nDOCUMENTATION = r'''\n---\nmodule: bigip_device_group_member\nshort_description: Manages members in a device group\ndescription:\n - Manages members in a device group. Members in a device group can only\n be added or removed, never updated. This is because the members are\n identified by unique name values and changing that name would invalidate\n the uniqueness.\nversion_added: 2.5\noptions:\n name:\n description:\n - Specifies the name of the device that you want to add to the\n device group. Often this will be the hostname of the device.\n This member must be trusted by the device already. Trusting\n can be done with the C(bigip_device_trust) module and the\n C(peer_hostname) option to that module.\n type: str\n required: True\n device_group:\n description:\n - The device group that you want to add the member to.\n type: str\n required: True\n state:\n description:\n - When C(present), ensures that the device group member exists.\n - When C(absent), ensures the device group member is removed.\n type: str\n choices:\n - present\n - absent\n default: present\nextends_documentation_fragment: f5\nauthor:\n - Tim Rupp (@caphrim007)\n - Wojciech Wypior (@wojtek0806)\n'''\n\nEXAMPLES = r'''\n- name: Add the current device to the \"device_trust_group\" device group\n bigip_device_group_member:\n name: \"{{ inventory_hostname }}\"\n device_group: device_trust_group\n provider:\n password: secret\n server: lb.mydomain.com\n user: admin\n delegate_to: localhost\n\n- name: Add the hosts in the current scope to \"device_trust_group\"\n bigip_device_group_member:\n name: \"{{ item }}\"\n device_group: device_trust_group\n provider:\n password: secret\n server: lb.mydomain.com\n user: admin\n loop: \"{{ hostvars.keys() }}\"\n run_once: true\n delegate_to: localhost\n'''\n\nRETURN = r'''\n# only common fields returned\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\ntry:\n from library.module_utils.network.f5.bigip import F5RestClient\n from library.module_utils.network.f5.common import F5ModuleError\n from library.module_utils.network.f5.common import AnsibleF5Parameters\n from library.module_utils.network.f5.common import f5_argument_spec\nexcept ImportError:\n from ansible.module_utils.network.f5.bigip import F5RestClient\n from ansible.module_utils.network.f5.common import F5ModuleError\n from ansible.module_utils.network.f5.common import AnsibleF5Parameters\n from ansible.module_utils.network.f5.common import f5_argument_spec\n\n\nclass Parameters(AnsibleF5Parameters):\n api_map = {}\n\n api_attributes = []\n\n returnables = []\n\n updatables = []\n\n\nclass ApiParameters(Parameters):\n pass\n\n\nclass ModuleParameters(Parameters):\n pass\n\n\nclass Changes(Parameters):\n def to_return(self):\n result = {}\n try:\n for returnable in self.returnables:\n change = getattr(self, returnable)\n if isinstance(change, dict):\n result.update(change)\n else:\n result[returnable] = change\n result = self._filter_params(result)\n except Exception:\n pass\n return result\n\n\nclass UsableChanges(Changes):\n pass\n\n\nclass ReportableChanges(Changes):\n pass\n\n\nclass Difference(object):\n pass\n\n\nclass ModuleManager(object):\n def __init__(self, *args, **kwargs):\n self.module = kwargs.get('module', None)\n self.client = F5RestClient(**self.module.params)\n self.want = Parameters(params=self.module.params)\n self.have = None\n self.changes = Changes()\n\n def _set_changed_options(self):\n changed = {}\n for key in Parameters.returnables:\n if getattr(self.want, key) is not None:\n changed[key] = getattr(self.want, key)\n if changed:\n self.changes = Changes(params=changed)\n\n def _announce_deprecations(self, result):\n warnings = result.pop('__warnings', [])\n for warning in warnings:\n self.module.deprecate(\n msg=warning['msg'],\n version=warning['version']\n )\n\n def exec_module(self):\n changed = False\n result = dict()\n state = self.want.state\n\n if state == \"present\":\n changed = self.present()\n elif state == \"absent\":\n changed = self.absent()\n\n reportable = ReportableChanges(params=self.changes.to_return())\n changes = reportable.to_return()\n result.update(**changes)\n result.update(dict(changed=changed))\n self._announce_deprecations(result)\n return result\n\n def present(self):\n if self.exists():\n return False\n else:\n return self.create()\n\n def absent(self):\n if self.exists():\n return self.remove()\n return False\n\n def create(self):\n self._set_changed_options()\n if self.module.check_mode:\n return True\n self.create_on_device()\n return True\n\n def remove(self):\n if self.module.check_mode:\n return True\n self.remove_from_device()\n if self.exists():\n raise F5ModuleError(\"Failed to remove the member from the device group.\")\n return True\n\n def exists(self):\n uri = \"https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}\".format(\n self.client.provider['server'],\n self.client.provider['server_port'],\n self.want.device_group,\n self.want.name\n )\n resp = self.client.api.get(uri)\n try:\n response = resp.json()\n except ValueError:\n return False\n if resp.status == 404 or 'code' in response and response['code'] == 404:\n return False\n return True\n\n def create_on_device(self):\n params = self.changes.api_params()\n params['name'] = self.want.name\n params['partition'] = self.want.partition\n uri = \"https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/\".format(\n self.client.provider['server'],\n self.client.provider['server_port'],\n self.want.device_group\n )\n resp = self.client.api.post(uri, json=params)\n try:\n response = resp.json()\n except ValueError as ex:\n raise F5ModuleError(str(ex))\n\n if 'code' in response and response['code'] in [400, 403]:\n if 'message' in response:\n raise F5ModuleError(response['message'])\n else:\n raise F5ModuleError(resp.content)\n\n def remove_from_device(self):\n uri = \"https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}\".format(\n self.client.provider['server'],\n self.client.provider['server_port'],\n self.want.device_group,\n self.want.name\n )\n response = self.client.api.delete(uri)\n if response.status == 200:\n return True\n raise F5ModuleError(response.content)\n\n\nclass ArgumentSpec(object):\n def __init__(self):\n self.supports_check_mode = True\n argument_spec = dict(\n name=dict(required=True),\n device_group=dict(required=True),\n state=dict(\n default='present',\n choices=['absent', 'present']\n ),\n )\n self.argument_spec = {}\n self.argument_spec.update(f5_argument_spec)\n self.argument_spec.update(argument_spec)\n\n\ndef main():\n spec = ArgumentSpec()\n\n module = AnsibleModule(\n argument_spec=spec.argument_spec,\n supports_check_mode=spec.supports_check_mode\n )\n\n try:\n mm = ModuleManager(module=module)\n results = mm.exec_module()\n module.exit_json(**results)\n except F5ModuleError as ex:\n module.fail_json(msg=str(ex))\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476194,"cells":{"repo_name":{"kind":"string","value":"Lyrositor/moul-scripts"},"path":{"kind":"string","value":"Python/system/random.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"32008"},"content":{"kind":"string","value":"\"\"\"Random variable generators.\n\n integers\n --------\n uniform within range\n\n sequences\n ---------\n pick random element\n pick random sample\n generate random permutation\n\n distributions on the real line:\n ------------------------------\n uniform\n triangular\n normal (Gaussian)\n lognormal\n negative exponential\n gamma\n beta\n pareto\n Weibull\n\n distributions on the circle (angles 0 to 2pi)\n ---------------------------------------------\n circular uniform\n von Mises\n\nGeneral notes on the underlying Mersenne Twister core generator:\n\n* The period is 2**19937-1.\n* It is one of the most extensively tested generators in existence.\n* Without a direct way to compute N steps forward, the semantics of\n jumpahead(n) are weakened to simply jump to another distant state and rely\n on the large period to avoid overlapping sequences.\n* The random() method is implemented in C, executes in a single Python step,\n and is, therefore, threadsafe.\n\n\"\"\"\n\nfrom __future__ import division\nfrom warnings import warn as _warn\nfrom types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType\nfrom math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil\nfrom math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin\nfrom os import urandom as _urandom\nfrom binascii import hexlify as _hexlify\nimport hashlib as _hashlib\n\n__all__ = [\"Random\",\"seed\",\"random\",\"uniform\",\"randint\",\"choice\",\"sample\",\n \"randrange\",\"shuffle\",\"normalvariate\",\"lognormvariate\",\n \"expovariate\",\"vonmisesvariate\",\"gammavariate\",\"triangular\",\n \"gauss\",\"betavariate\",\"paretovariate\",\"weibullvariate\",\n \"getstate\",\"setstate\",\"jumpahead\", \"WichmannHill\", \"getrandbits\",\n \"SystemRandom\"]\n\nNV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)\nTWOPI = 2.0*_pi\nLOG4 = _log(4.0)\nSG_MAGICCONST = 1.0 + _log(4.5)\nBPF = 53 # Number of bits in a float\nRECIP_BPF = 2**-BPF\n\n\n# Translated by Guido van Rossum from C source provided by\n# Adrian Baddeley. Adapted by Raymond Hettinger for use with\n# the Mersenne Twister and os.urandom() core generators.\n\nimport _random\n\nclass Random(_random.Random):\n \"\"\"Random number generator base class used by bound module functions.\n\n Used to instantiate instances of Random to get generators that don't\n share state. Especially useful for multi-threaded programs, creating\n a different instance of Random for each thread, and using the jumpahead()\n method to ensure that the generated sequences seen by each thread don't\n overlap.\n\n Class Random can also be subclassed if you want to use a different basic\n generator of your own devising: in that case, override the following\n methods: random(), seed(), getstate(), setstate() and jumpahead().\n Optionally, implement a getrandbits() method so that randrange() can cover\n arbitrarily large ranges.\n\n \"\"\"\n\n VERSION = 3 # used by getstate/setstate\n\n def __init__(self, x=None):\n \"\"\"Initialize an instance.\n\n Optional argument x controls seeding, as for Random.seed().\n \"\"\"\n\n self.seed(x)\n self.gauss_next = None\n\n def seed(self, a=None):\n \"\"\"Initialize internal state from hashable object.\n\n None or no argument seeds from current time or from an operating\n system specific randomness source if available.\n\n If a is not None or an int or long, hash(a) is used instead.\n \"\"\"\n\n if a is None:\n try:\n a = long(_hexlify(_urandom(16)), 16)\n except NotImplementedError:\n import time\n a = long(time.time() * 256) # use fractional seconds\n\n super(Random, self).seed(a)\n self.gauss_next = None\n\n def getstate(self):\n \"\"\"Return internal state; can be passed to setstate() later.\"\"\"\n return self.VERSION, super(Random, self).getstate(), self.gauss_next\n\n def setstate(self, state):\n \"\"\"Restore internal state from object returned by getstate().\"\"\"\n version = state[0]\n if version == 3:\n version, internalstate, self.gauss_next = state\n super(Random, self).setstate(internalstate)\n elif version == 2:\n version, internalstate, self.gauss_next = state\n # In version 2, the state was saved as signed ints, which causes\n # inconsistencies between 32/64-bit systems. The state is\n # really unsigned 32-bit ints, so we convert negative ints from\n # version 2 to positive longs for version 3.\n try:\n internalstate = tuple( long(x) % (2**32) for x in internalstate )\n except ValueError, e:\n raise TypeError, e\n super(Random, self).setstate(internalstate)\n else:\n raise ValueError(\"state with version %s passed to \"\n \"Random.setstate() of version %s\" %\n (version, self.VERSION))\n\n def jumpahead(self, n):\n \"\"\"Change the internal state to one that is likely far away\n from the current state. This method will not be in Py3.x,\n so it is better to simply reseed.\n \"\"\"\n # The super.jumpahead() method uses shuffling to change state,\n # so it needs a large and \"interesting\" n to work with. Here,\n # we use hashing to create a large n for the shuffle.\n s = repr(n) + repr(self.getstate())\n n = int(_hashlib.new('sha512', s).hexdigest(), 16)\n super(Random, self).jumpahead(n)\n\n## ---- Methods below this point do not need to be overridden when\n## ---- subclassing for the purpose of using a different core generator.\n\n## -------------------- pickle support -------------------\n\n def __getstate__(self): # for pickle\n return self.getstate()\n\n def __setstate__(self, state): # for pickle\n self.setstate(state)\n\n def __reduce__(self):\n return self.__class__, (), self.getstate()\n\n## -------------------- integer methods -------------------\n\n def randrange(self, start, stop=None, step=1, int=int, default=None,\n maxwidth=1L< 0:\n if istart >= maxwidth:\n return self._randbelow(istart)\n return int(self.random() * istart)\n raise ValueError, \"empty range for randrange()\"\n\n # stop argument supplied.\n istop = int(stop)\n if istop != stop:\n raise ValueError, \"non-integer stop for randrange()\"\n width = istop - istart\n if step == 1 and width > 0:\n # Note that\n # int(istart + self.random()*width)\n # instead would be incorrect. For example, consider istart\n # = -2 and istop = 0. Then the guts would be in\n # -2.0 to 0.0 exclusive on both ends (ignoring that random()\n # might return 0.0), and because int() truncates toward 0, the\n # final result would be -1 or 0 (instead of -2 or -1).\n # istart + int(self.random()*width)\n # would also be incorrect, for a subtler reason: the RHS\n # can return a long, and then randrange() would also return\n # a long, but we're supposed to return an int (for backward\n # compatibility).\n\n if width >= maxwidth:\n return int(istart + self._randbelow(width))\n return int(istart + int(self.random()*width))\n if step == 1:\n raise ValueError, \"empty range for randrange() (%d,%d, %d)\" % (istart, istop, width)\n\n # Non-unit step argument supplied.\n istep = int(step)\n if istep != step:\n raise ValueError, \"non-integer step for randrange()\"\n if istep > 0:\n n = (width + istep - 1) // istep\n elif istep < 0:\n n = (width + istep + 1) // istep\n else:\n raise ValueError, \"zero step for randrange()\"\n\n if n <= 0:\n raise ValueError, \"empty range for randrange()\"\n\n if n >= maxwidth:\n return istart + istep*self._randbelow(n)\n return istart + istep*int(self.random() * n)\n\n def randint(self, a, b):\n \"\"\"Return random integer in range [a, b], including both end points.\n \"\"\"\n\n return self.randrange(a, b+1)\n\n def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L< n-1 > 2**(k-2)\n r = getrandbits(k)\n while r >= n:\n r = getrandbits(k)\n return r\n if n >= _maxwidth:\n _warn(\"Underlying random() generator does not supply \\n\"\n \"enough bits to choose from a population range this large\")\n return int(self.random() * n)\n\n## -------------------- sequence methods -------------------\n\n def choice(self, seq):\n \"\"\"Choose a random element from a non-empty sequence.\"\"\"\n return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty\n\n def shuffle(self, x, random=None, int=int):\n \"\"\"x, random=random.random -> shuffle list x in place; return None.\n\n Optional arg random is a 0-argument function returning a random\n float in [0.0, 1.0); by default, the standard random.random.\n \"\"\"\n\n if random is None:\n random = self.random\n for i in reversed(xrange(1, len(x))):\n # pick an element in x[:i+1] with which to exchange x[i]\n j = int(random() * (i+1))\n x[i], x[j] = x[j], x[i]\n\n def sample(self, population, k):\n \"\"\"Chooses k unique random elements from a population sequence.\n\n Returns a new list containing elements from the population while\n leaving the original population unchanged. The resulting list is\n in selection order so that all sub-slices will also be valid random\n samples. This allows raffle winners (the sample) to be partitioned\n into grand prize and second place winners (the subslices).\n\n Members of the population need not be hashable or unique. If the\n population contains repeats, then each occurrence is a possible\n selection in the sample.\n\n To choose a sample in a range of integers, use xrange as an argument.\n This is especially fast and space efficient for sampling from a\n large population: sample(xrange(10000000), 60)\n \"\"\"\n\n # Sampling without replacement entails tracking either potential\n # selections (the pool) in a list or previous selections in a set.\n\n # When the number of selections is small compared to the\n # population, then tracking selections is efficient, requiring\n # only a small set and an occasional reselection. For\n # a larger number of selections, the pool tracking method is\n # preferred since the list takes less space than the\n # set and it doesn't suffer from frequent reselections.\n\n n = len(population)\n if not 0 <= k <= n:\n raise ValueError, \"sample larger than population\"\n random = self.random\n _int = int\n result = [None] * k\n setsize = 21 # size of a small set minus size of an empty list\n if k > 5:\n setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets\n if n <= setsize or hasattr(population, \"keys\"):\n # An n-length list is smaller than a k-length set, or this is a\n # mapping type so the other algorithm wouldn't work.\n pool = list(population)\n for i in xrange(k): # invariant: non-selected at [0,n-i)\n j = _int(random() * (n-i))\n result[i] = pool[j]\n pool[j] = pool[n-i-1] # move non-selected item into vacancy\n else:\n try:\n selected = set()\n selected_add = selected.add\n for i in xrange(k):\n j = _int(random() * n)\n while j in selected:\n j = _int(random() * n)\n selected_add(j)\n result[i] = population[j]\n except (TypeError, KeyError): # handle (at least) sets\n if isinstance(population, list):\n raise\n return self.sample(tuple(population), k)\n return result\n\n## -------------------- real-valued distributions -------------------\n\n## -------------------- uniform distribution -------------------\n\n def uniform(self, a, b):\n \"Get a random number in the range [a, b) or [a, b] depending on rounding.\"\n return a + (b-a) * self.random()\n\n## -------------------- triangular --------------------\n\n def triangular(self, low=0.0, high=1.0, mode=None):\n \"\"\"Triangular distribution.\n\n Continuous distribution bounded by given lower and upper limits,\n and having a given mode value in-between.\n\n http://en.wikipedia.org/wiki/Triangular_distribution\n\n \"\"\"\n u = self.random()\n c = 0.5 if mode is None else (mode - low) / (high - low)\n if u > c:\n u = 1.0 - u\n c = 1.0 - c\n low, high = high, low\n return low + (high - low) * (u * c) ** 0.5\n\n## -------------------- normal distribution --------------------\n\n def normalvariate(self, mu, sigma):\n \"\"\"Normal distribution.\n\n mu is the mean, and sigma is the standard deviation.\n\n \"\"\"\n # mu = mean, sigma = standard deviation\n\n # Uses Kinderman and Monahan method. Reference: Kinderman,\n # A.J. and Monahan, J.F., \"Computer generation of random\n # variables using the ratio of uniform deviates\", ACM Trans\n # Math Software, 3, (1977), pp257-260.\n\n random = self.random\n while 1:\n u1 = random()\n u2 = 1.0 - random()\n z = NV_MAGICCONST*(u1-0.5)/u2\n zz = z*z/4.0\n if zz <= -_log(u2):\n break\n return mu + z*sigma\n\n## -------------------- lognormal distribution --------------------\n\n def lognormvariate(self, mu, sigma):\n \"\"\"Log normal distribution.\n\n If you take the natural logarithm of this distribution, you'll get a\n normal distribution with mean mu and standard deviation sigma.\n mu can have any value, and sigma must be greater than zero.\n\n \"\"\"\n return _exp(self.normalvariate(mu, sigma))\n\n## -------------------- exponential distribution --------------------\n\n def expovariate(self, lambd):\n \"\"\"Exponential distribution.\n\n lambd is 1.0 divided by the desired mean. It should be\n nonzero. (The parameter would be called \"lambda\", but that is\n a reserved word in Python.) Returned values range from 0 to\n positive infinity if lambd is positive, and from negative\n infinity to 0 if lambd is negative.\n\n \"\"\"\n # lambd: rate lambd = 1/mean\n # ('lambda' is a Python reserved word)\n\n random = self.random\n u = random()\n while u <= 1e-7:\n u = random()\n return -_log(u)/lambd\n\n## -------------------- von Mises distribution --------------------\n\n def vonmisesvariate(self, mu, kappa):\n \"\"\"Circular data distribution.\n\n mu is the mean angle, expressed in radians between 0 and 2*pi, and\n kappa is the concentration parameter, which must be greater than or\n equal to zero. If kappa is equal to zero, this distribution reduces\n to a uniform random angle over the range 0 to 2*pi.\n\n \"\"\"\n # mu: mean angle (in radians between 0 and 2*pi)\n # kappa: concentration parameter kappa (>= 0)\n # if kappa = 0 generate uniform random angle\n\n # Based upon an algorithm published in: Fisher, N.I.,\n # \"Statistical Analysis of Circular Data\", Cambridge\n # University Press, 1993.\n\n # Thanks to Magnus Kessler for a correction to the\n # implementation of step 4.\n\n random = self.random\n if kappa <= 1e-6:\n return TWOPI * random()\n\n a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)\n b = (a - _sqrt(2.0 * a))/(2.0 * kappa)\n r = (1.0 + b * b)/(2.0 * b)\n\n while 1:\n u1 = random()\n\n z = _cos(_pi * u1)\n f = (1.0 + r * z)/(r + z)\n c = kappa * (r - f)\n\n u2 = random()\n\n if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):\n break\n\n u3 = random()\n if u3 > 0.5:\n theta = (mu % TWOPI) + _acos(f)\n else:\n theta = (mu % TWOPI) - _acos(f)\n\n return theta\n\n## -------------------- gamma distribution --------------------\n\n def gammavariate(self, alpha, beta):\n \"\"\"Gamma distribution. Not the gamma function!\n\n Conditions on the parameters are alpha > 0 and beta > 0.\n\n \"\"\"\n\n # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2\n\n # Warning: a few older sources define the gamma distribution in terms\n # of alpha > -1.0\n if alpha <= 0.0 or beta <= 0.0:\n raise ValueError, 'gammavariate: alpha and beta must be > 0.0'\n\n random = self.random\n if alpha > 1.0:\n\n # Uses R.C.H. Cheng, \"The generation of Gamma\n # variables with non-integral shape parameters\",\n # Applied Statistics, (1977), 26, No. 1, p71-74\n\n ainv = _sqrt(2.0 * alpha - 1.0)\n bbb = alpha - LOG4\n ccc = alpha + ainv\n\n while 1:\n u1 = random()\n if not 1e-7 < u1 < .9999999:\n continue\n u2 = 1.0 - random()\n v = _log(u1/(1.0-u1))/ainv\n x = alpha*_exp(v)\n z = u1*u1*u2\n r = bbb+ccc*v-x\n if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):\n return x * beta\n\n elif alpha == 1.0:\n # expovariate(1)\n u = random()\n while u <= 1e-7:\n u = random()\n return -_log(u) * beta\n\n else: # alpha is between 0 and 1 (exclusive)\n\n # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle\n\n while 1:\n u = random()\n b = (_e + alpha)/_e\n p = b*u\n if p <= 1.0:\n x = p ** (1.0/alpha)\n else:\n x = -_log((b-p)/alpha)\n u1 = random()\n if p > 1.0:\n if u1 <= x ** (alpha - 1.0):\n break\n elif u1 <= _exp(-x):\n break\n return x * beta\n\n## -------------------- Gauss (faster alternative) --------------------\n\n def gauss(self, mu, sigma):\n \"\"\"Gaussian distribution.\n\n mu is the mean, and sigma is the standard deviation. This is\n slightly faster than the normalvariate() function.\n\n Not thread-safe without a lock around calls.\n\n \"\"\"\n\n # When x and y are two variables from [0, 1), uniformly\n # distributed, then\n #\n # cos(2*pi*x)*sqrt(-2*log(1-y))\n # sin(2*pi*x)*sqrt(-2*log(1-y))\n #\n # are two *independent* variables with normal distribution\n # (mu = 0, sigma = 1).\n # (Lambert Meertens)\n # (corrected version; bug discovered by Mike Miller, fixed by LM)\n\n # Multithreading note: When two threads call this function\n # simultaneously, it is possible that they will receive the\n # same return value. The window is very small though. To\n # avoid this, you have to use a lock around all calls. (I\n # didn't want to slow this down in the serial case by using a\n # lock here.)\n\n random = self.random\n z = self.gauss_next\n self.gauss_next = None\n if z is None:\n x2pi = random() * TWOPI\n g2rad = _sqrt(-2.0 * _log(1.0 - random()))\n z = _cos(x2pi) * g2rad\n self.gauss_next = _sin(x2pi) * g2rad\n\n return mu + z*sigma\n\n## -------------------- beta --------------------\n## See\n## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470\n## for Ivan Frohne's insightful analysis of why the original implementation:\n##\n## def betavariate(self, alpha, beta):\n## # Discrete Event Simulation in C, pp 87-88.\n##\n## y = self.expovariate(alpha)\n## z = self.expovariate(1.0/beta)\n## return z/(y+z)\n##\n## was dead wrong, and how it probably got that way.\n\n def betavariate(self, alpha, beta):\n \"\"\"Beta distribution.\n\n Conditions on the parameters are alpha > 0 and beta > 0.\n Returned values range between 0 and 1.\n\n \"\"\"\n\n # This version due to Janne Sinkkonen, and matches all the std\n # texts (e.g., Knuth Vol 2 Ed 3 pg 134 \"the beta distribution\").\n y = self.gammavariate(alpha, 1.)\n if y == 0:\n return 0.0\n else:\n return y / (y + self.gammavariate(beta, 1.))\n\n## -------------------- Pareto --------------------\n\n def paretovariate(self, alpha):\n \"\"\"Pareto distribution. alpha is the shape parameter.\"\"\"\n # Jain, pg. 495\n\n u = 1.0 - self.random()\n return 1.0 / pow(u, 1.0/alpha)\n\n## -------------------- Weibull --------------------\n\n def weibullvariate(self, alpha, beta):\n \"\"\"Weibull distribution.\n\n alpha is the scale parameter and beta is the shape parameter.\n\n \"\"\"\n # Jain, pg. 499; bug fix courtesy Bill Arms\n\n u = 1.0 - self.random()\n return alpha * pow(-_log(u), 1.0/beta)\n\n## -------------------- Wichmann-Hill -------------------\n\nclass WichmannHill(Random):\n\n VERSION = 1 # used by getstate/setstate\n\n def seed(self, a=None):\n \"\"\"Initialize internal state from hashable object.\n\n None or no argument seeds from current time or from an operating\n system specific randomness source if available.\n\n If a is not None or an int or long, hash(a) is used instead.\n\n If a is an int or long, a is used directly. Distinct values between\n 0 and 27814431486575L inclusive are guaranteed to yield distinct\n internal states (this guarantee is specific to the default\n Wichmann-Hill generator).\n \"\"\"\n\n if a is None:\n try:\n a = long(_hexlify(_urandom(16)), 16)\n except NotImplementedError:\n import time\n a = long(time.time() * 256) # use fractional seconds\n\n if not isinstance(a, (int, long)):\n a = hash(a)\n\n a, x = divmod(a, 30268)\n a, y = divmod(a, 30306)\n a, z = divmod(a, 30322)\n self._seed = int(x)+1, int(y)+1, int(z)+1\n\n self.gauss_next = None\n\n def random(self):\n \"\"\"Get the next random number in the range [0.0, 1.0).\"\"\"\n\n # Wichman-Hill random number generator.\n #\n # Wichmann, B. A. & Hill, I. D. (1982)\n # Algorithm AS 183:\n # An efficient and portable pseudo-random number generator\n # Applied Statistics 31 (1982) 188-190\n #\n # see also:\n # Correction to Algorithm AS 183\n # Applied Statistics 33 (1984) 123\n #\n # McLeod, A. I. (1985)\n # A remark on Algorithm AS 183\n # Applied Statistics 34 (1985),198-200\n\n # This part is thread-unsafe:\n # BEGIN CRITICAL SECTION\n x, y, z = self._seed\n x = (171 * x) % 30269\n y = (172 * y) % 30307\n z = (170 * z) % 30323\n self._seed = x, y, z\n # END CRITICAL SECTION\n\n # Note: on a platform using IEEE-754 double arithmetic, this can\n # never return 0.0 (asserted by Tim; proof too long for a comment).\n return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0\n\n def getstate(self):\n \"\"\"Return internal state; can be passed to setstate() later.\"\"\"\n return self.VERSION, self._seed, self.gauss_next\n\n def setstate(self, state):\n \"\"\"Restore internal state from object returned by getstate().\"\"\"\n version = state[0]\n if version == 1:\n version, self._seed, self.gauss_next = state\n else:\n raise ValueError(\"state with version %s passed to \"\n \"Random.setstate() of version %s\" %\n (version, self.VERSION))\n\n def jumpahead(self, n):\n \"\"\"Act as if n calls to random() were made, but quickly.\n\n n is an int, greater than or equal to 0.\n\n Example use: If you have 2 threads and know that each will\n consume no more than a million random numbers, create two Random\n objects r1 and r2, then do\n r2.setstate(r1.getstate())\n r2.jumpahead(1000000)\n Then r1 and r2 will use guaranteed-disjoint segments of the full\n period.\n \"\"\"\n\n if not n >= 0:\n raise ValueError(\"n must be >= 0\")\n x, y, z = self._seed\n x = int(x * pow(171, n, 30269)) % 30269\n y = int(y * pow(172, n, 30307)) % 30307\n z = int(z * pow(170, n, 30323)) % 30323\n self._seed = x, y, z\n\n def __whseed(self, x=0, y=0, z=0):\n \"\"\"Set the Wichmann-Hill seed from (x, y, z).\n\n These must be integers in the range [0, 256).\n \"\"\"\n\n if not type(x) == type(y) == type(z) == int:\n raise TypeError('seeds must be integers')\n if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):\n raise ValueError('seeds must be in range(0, 256)')\n if 0 == x == y == z:\n # Initialize from current time\n import time\n t = long(time.time() * 256)\n t = int((t&0xffffff) ^ (t>>24))\n t, x = divmod(t, 256)\n t, y = divmod(t, 256)\n t, z = divmod(t, 256)\n # Zero is a poor seed, so substitute 1\n self._seed = (x or 1, y or 1, z or 1)\n\n self.gauss_next = None\n\n def whseed(self, a=None):\n \"\"\"Seed from hashable object's hash code.\n\n None or no argument seeds from current time. It is not guaranteed\n that objects with distinct hash codes lead to distinct internal\n states.\n\n This is obsolete, provided for compatibility with the seed routine\n used prior to Python 2.1. Use the .seed() method instead.\n \"\"\"\n\n if a is None:\n self.__whseed()\n return\n a = hash(a)\n a, x = divmod(a, 256)\n a, y = divmod(a, 256)\n a, z = divmod(a, 256)\n x = (x + a) % 256 or 1\n y = (y + a) % 256 or 1\n z = (z + a) % 256 or 1\n self.__whseed(x, y, z)\n\n## --------------- Operating System Random Source ------------------\n\nclass SystemRandom(Random):\n \"\"\"Alternate random number generator using sources provided\n by the operating system (such as /dev/urandom on Unix or\n CryptGenRandom on Windows).\n\n Not available on all systems (see os.urandom() for details).\n \"\"\"\n\n def random(self):\n \"\"\"Get the next random number in the range [0.0, 1.0).\"\"\"\n return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF\n\n def getrandbits(self, k):\n \"\"\"getrandbits(k) -> x. Generates a long int with k random bits.\"\"\"\n if k <= 0:\n raise ValueError('number of bits must be greater than zero')\n if k != int(k):\n raise TypeError('number of bits should be an integer')\n bytes = (k + 7) // 8 # bits / 8 and rounded up\n x = long(_hexlify(_urandom(bytes)), 16)\n return x >> (bytes * 8 - k) # trim excess bits\n\n def _stub(self, *args, **kwds):\n \"Stub method. Not used for a system random number generator.\"\n return None\n seed = jumpahead = _stub\n\n def _notimplemented(self, *args, **kwds):\n \"Method should not be called for a system random number generator.\"\n raise NotImplementedError('System entropy source does not have state.')\n getstate = setstate = _notimplemented\n\n## -------------------- test program --------------------\n\ndef _test_generator(n, func, args):\n import time\n print n, 'times', func.__name__\n total = 0.0\n sqsum = 0.0\n smallest = 1e10\n largest = -1e10\n t0 = time.time()\n for i in range(n):\n x = func(*args)\n total += x\n sqsum = sqsum + x*x\n smallest = min(x, smallest)\n largest = max(x, largest)\n t1 = time.time()\n print round(t1-t0, 3), 'sec,',\n avg = total/n\n stddev = _sqrt(sqsum/n - avg*avg)\n print 'avg %g, stddev %g, min %g, max %g' % \\\n (avg, stddev, smallest, largest)\n\n\ndef _test(N=2000):\n _test_generator(N, random, ())\n _test_generator(N, normalvariate, (0.0, 1.0))\n _test_generator(N, lognormvariate, (0.0, 1.0))\n _test_generator(N, vonmisesvariate, (0.0, 1.0))\n _test_generator(N, gammavariate, (0.01, 1.0))\n _test_generator(N, gammavariate, (0.1, 1.0))\n _test_generator(N, gammavariate, (0.1, 2.0))\n _test_generator(N, gammavariate, (0.5, 1.0))\n _test_generator(N, gammavariate, (0.9, 1.0))\n _test_generator(N, gammavariate, (1.0, 1.0))\n _test_generator(N, gammavariate, (2.0, 1.0))\n _test_generator(N, gammavariate, (20.0, 1.0))\n _test_generator(N, gammavariate, (200.0, 1.0))\n _test_generator(N, gauss, (0.0, 1.0))\n _test_generator(N, betavariate, (3.0, 3.0))\n _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))\n\n# Create one instance, seeded from current time, and export its methods\n# as module-level functions. The functions share state across all uses\n#(both in the user's code and in the Python libraries), but that's fine\n# for most programs and is easier for the casual user than making them\n# instantiate their own Random() instance.\n\n_inst = Random()\nseed = _inst.seed\nrandom = _inst.random\nuniform = _inst.uniform\ntriangular = _inst.triangular\nrandint = _inst.randint\nchoice = _inst.choice\nrandrange = _inst.randrange\nsample = _inst.sample\nshuffle = _inst.shuffle\nnormalvariate = _inst.normalvariate\nlognormvariate = _inst.lognormvariate\nexpovariate = _inst.expovariate\nvonmisesvariate = _inst.vonmisesvariate\ngammavariate = _inst.gammavariate\ngauss = _inst.gauss\nbetavariate = _inst.betavariate\nparetovariate = _inst.paretovariate\nweibullvariate = _inst.weibullvariate\ngetstate = _inst.getstate\nsetstate = _inst.setstate\njumpahead = _inst.jumpahead\ngetrandbits = _inst.getrandbits\n\nif __name__ == '__main__':\n _test()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":476195,"cells":{"repo_name":{"kind":"string","value":"pllim/astropy"},"path":{"kind":"string","value":"astropy/io/fits/column.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"98069"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\nimport copy\nimport operator\nimport re\nimport sys\nimport warnings\nimport weakref\nimport numbers\n\nfrom functools import reduce\nfrom collections import OrderedDict\nfrom contextlib import suppress\n\nimport numpy as np\nfrom numpy import char as chararray\n\nfrom .card import Card, CARD_LENGTH\nfrom .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,\n NotifierMixin)\nfrom .verify import VerifyError, VerifyWarning\n\nfrom astropy.utils import lazyproperty, isiterable, indent\nfrom astropy.utils.exceptions import AstropyUserWarning\n\n__all__ = ['Column', 'ColDefs', 'Delayed']\n\n\n# mapping from TFORM data type to numpy data type (code)\n# L: Logical (Boolean)\n# B: Unsigned Byte\n# I: 16-bit Integer\n# J: 32-bit Integer\n# K: 64-bit Integer\n# E: Single-precision Floating Point\n# D: Double-precision Floating Point\n# C: Single-precision Complex\n# M: Double-precision Complex\n# A: Character\nFITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',\n 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}\n\n# the inverse dictionary of the above\nNUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}\n# Normally booleans are represented as ints in Astropy, but if passed in a numpy\n# boolean array, that should be supported\nNUMPY2FITS['b1'] = 'L'\n# Add unsigned types, which will be stored as signed ints with a TZERO card.\nNUMPY2FITS['u2'] = 'I'\nNUMPY2FITS['u4'] = 'J'\nNUMPY2FITS['u8'] = 'K'\n# Add half precision floating point numbers which will be up-converted to\n# single precision.\nNUMPY2FITS['f2'] = 'E'\n\n# This is the order in which values are converted to FITS types\n# Note that only double precision floating point/complex are supported\nFORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']\n\n# Convert single precision floating point/complex to double precision.\nFITSUPCONVERTERS = {'E': 'D', 'C': 'M'}\n\n# mapping from ASCII table TFORM data type to numpy data type\n# A: Character\n# I: Integer (32-bit)\n# J: Integer (64-bit; non-standard)\n# F: Float (64-bit; fixed decimal notation)\n# E: Float (64-bit; exponential notation)\n# D: Float (64-bit; exponential notation, always 64-bit by convention)\nASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}\n\n# Maps FITS ASCII column format codes to the appropriate Python string\n# formatting codes for that type.\nASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}\n\n# For each ASCII table format code, provides a default width (and decimal\n# precision) for when one isn't given explicitly in the column format\nASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),\n 'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}\n\n# TDISPn for both ASCII and Binary tables\nTDISP_RE_DICT = {}\nTDISP_RE_DICT['F'] = re.compile(r'(?:(?P[F])(?:(?P[0-9]+)\\.{1}'\n r'(?P[0-9])+)+)|')\nTDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \\\n re.compile(r'(?:(?P[AL])(?P[0-9]+)+)|')\nTDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \\\n TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \\\n re.compile(r'(?:(?P[IBOZ])(?:(?P[0-9]+)'\n r'(?:\\.{0,1}(?P[0-9]+))?))|')\nTDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \\\n TDISP_RE_DICT['D'] = \\\n re.compile(r'(?:(?P[EGD])(?:(?P[0-9]+)\\.'\n r'(?P[0-9]+))+)'\n r'(?:E{0,1}(?P[0-9]+)?)|')\nTDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \\\n re.compile(r'(?:(?PE[NS])(?:(?P[0-9]+)\\.{1}'\n r'(?P[0-9])+)+)')\n\n# mapping from TDISP format to python format\n# A: Character\n# L: Logical (Boolean)\n# I: 16-bit Integer\n# Can't predefine zero padding and space padding before hand without\n# knowing the value being formatted, so grabbing precision and using that\n# to zero pad, ignoring width. Same with B, O, and Z\n# B: Binary Integer\n# O: Octal Integer\n# Z: Hexadecimal Integer\n# F: Float (64-bit; fixed decimal notation)\n# EN: Float (engineering fortran format, exponential multiple of thee\n# ES: Float (scientific, same as EN but non-zero leading digit\n# E: Float, exponential notation\n# Can't get exponential restriction to work without knowing value\n# before hand, so just using width and precision, same with D, G, EN, and\n# ES formats\n# D: Double-precision Floating Point with exponential\n# (E but for double precision)\n# G: Double-precision Floating Point, may or may not show exponent\nTDISP_FMT_DICT = {\n 'I': '{{:{width}d}}',\n 'B': '{{:{width}b}}',\n 'O': '{{:{width}o}}',\n 'Z': '{{:{width}x}}',\n 'F': '{{:{width}.{precision}f}}',\n 'G': '{{:{width}.{precision}g}}'\n}\nTDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'\nTDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \\\n TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] = '{{:{width}.{precision}e}}'\n\n# tuple of column/field definition common names and keyword names, make\n# sure to preserve the one-to-one correspondence when updating the list(s).\n# Use lists, instead of dictionaries so the names can be displayed in a\n# preferred order.\nKEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',\n 'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',\n 'TCRVL', 'TCDLT', 'TRPOS')\nKEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',\n 'disp', 'start', 'dim', 'coord_type', 'coord_unit',\n 'coord_ref_point', 'coord_ref_value', 'coord_inc',\n 'time_ref_pos')\n\"\"\"This is a list of the attributes that can be set on `Column` objects.\"\"\"\n\n\nKEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))\n\nATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))\n\n\n# TODO: Define a list of default comments to associate with each table keyword\n\n# TFORMn regular expression\nTFORMAT_RE = re.compile(r'(?P^[0-9]*)(?P[LXBIJKAEDCMPQ])'\n r'(?P