{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ','\\n%s\\n' % tables)\n\n # temporarily uncomment next two lines to baseline this test\n #with open('test_difflib_expect.html','w') as fp:\n # fp.write(actual)\n\n with open(findfile('test_difflib_expect.html')) as fp:\n self.assertEqual(actual, fp.read())\n\n def test_recursion_limit(self):\n # Check if the problem described in patch #1413711 exists.\n limit = sys.getrecursionlimit()\n old = [(i%2 and \"K:%d\" or \"V:A:%d\") % i for i in range(limit*2)]\n new = [(i%2 and \"K:%d\" or \"V:B:%d\") % i for i in range(limit*2)]\n difflib.SequenceMatcher(None, old, new).get_opcodes()\n\n\nclass TestOutputFormat(unittest.TestCase):\n def test_tab_delimiter(self):\n args = ['one', 'two', 'Original', 'Current',\n '2005-01-26 23:30:50', '2010-04-02 10:20:52']\n ud = difflib.unified_diff(*args, lineterm='')\n self.assertEqual(list(ud)[0:2], [\n \"--- Original\\t2005-01-26 23:30:50\",\n \"+++ Current\\t2010-04-02 10:20:52\"])\n cd = difflib.context_diff(*args, lineterm='')\n self.assertEqual(list(cd)[0:2], [\n \"*** Original\\t2005-01-26 23:30:50\",\n \"--- Current\\t2010-04-02 10:20:52\"])\n\n def test_no_trailing_tab_on_empty_filedate(self):\n args = ['one', 'two', 'Original', 'Current']\n ud = difflib.unified_diff(*args, lineterm='')\n self.assertEqual(list(ud)[0:2], [\"--- Original\", \"+++ Current\"])\n\n cd = difflib.context_diff(*args, lineterm='')\n self.assertEqual(list(cd)[0:2], [\"*** Original\", \"--- Current\"])\n\n def test_range_format_unified(self):\n # Per the diff spec at http://www.unix.org/single_unix_specification/\n spec = '''\\\n Each field shall be of the form:\n %1d\", if the range contains exactly one line,\n and:\n \"%1d,%1d\", , otherwise.\n If a range is empty, its beginning line number shall be the number of\n the line just before the range, or 0 if the empty range starts the file.\n '''\n fmt = difflib._format_range_unified\n self.assertEqual(fmt(3,3), '3,0')\n self.assertEqual(fmt(3,4), '4')\n self.assertEqual(fmt(3,5), '4,2')\n self.assertEqual(fmt(3,6), '4,3')\n self.assertEqual(fmt(0,0), '0,0')\n\n def test_range_format_context(self):\n # Per the diff spec at http://www.unix.org/single_unix_specification/\n spec = '''\\\n The range of lines in file1 shall be written in the following format\n if the range contains two or more lines:\n \"*** %d,%d ****\\n\", , \n and the following format otherwise:\n \"*** %d ****\\n\", \n The ending line number of an empty range shall be the number of the preceding line,\n or 0 if the range is at the start of the file.\n\n Next, the range of lines in file2 shall be written in the following format\n if the range contains two or more lines:\n \"--- %d,%d ----\\n\", , \n and the following format otherwise:\n \"--- %d ----\\n\", \n '''\n fmt = difflib._format_range_context\n self.assertEqual(fmt(3,3), '3')\n self.assertEqual(fmt(3,4), '4')\n self.assertEqual(fmt(3,5), '4,5')\n self.assertEqual(fmt(3,6), '4,6')\n self.assertEqual(fmt(0,0), '0')\n\n\ndef test_main():\n difflib.HtmlDiff._default_prefix = 0\n Doctests = doctest.DocTestSuite(difflib)\n run_unittest(\n TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,\n TestOutputFormat, Doctests)\n\nif __name__ == '__main__':\n test_main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203635,"cells":{"repo_name":{"kind":"string","value":"sergiohgz/incubator-airflow"},"path":{"kind":"string","value":"tests/contrib/operators/test_discord_webhook_operator.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"2403"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nimport unittest\n\nfrom airflow import DAG, configuration\n\nfrom airflow.contrib.operators.discord_webhook_operator import DiscordWebhookOperator\nfrom airflow.utils import timezone\n\nDEFAULT_DATE = timezone.datetime(2018, 1, 1)\n\n\nclass TestDiscordWebhookOperator(unittest.TestCase):\n _config = {\n 'http_conn_id': 'discord-webhook-default',\n 'webhook_endpoint': 'webhooks/11111/some-discord-token_111',\n 'message': 'your message here',\n 'username': 'Airflow Webhook',\n 'avatar_url': 'https://static-cdn.avatars.com/my-avatar-path',\n 'tts': False,\n 'proxy': 'https://proxy.proxy.com:8888'\n }\n\n def setUp(self):\n configuration.load_test_config()\n args = {\n 'owner': 'airflow',\n 'start_date': DEFAULT_DATE\n }\n self.dag = DAG('test_dag_id', default_args=args)\n\n def test_execute(self):\n operator = DiscordWebhookOperator(\n task_id='discord_webhook_task',\n dag=self.dag,\n **self._config\n )\n\n self.assertEqual(self._config['http_conn_id'], operator.http_conn_id)\n self.assertEqual(self._config['webhook_endpoint'], operator.webhook_endpoint)\n self.assertEqual(self._config['message'], operator.message)\n self.assertEqual(self._config['username'], operator.username)\n self.assertEqual(self._config['avatar_url'], operator.avatar_url)\n self.assertEqual(self._config['tts'], operator.tts)\n self.assertEqual(self._config['proxy'], operator.proxy)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203636,"cells":{"repo_name":{"kind":"string","value":"Tesla-Redux-Devices/android_kernel_mediatek_sprout"},"path":{"kind":"string","value":"arch/ia64/scripts/unwcheck.py"},"copies":{"kind":"string","value":"13143"},"size":{"kind":"string","value":"1714"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Usage: unwcheck.py FILE\n#\n# This script checks the unwind info of each function in file FILE\n# and verifies that the sum of the region-lengths matches the total\n# length of the function.\n#\n# Based on a shell/awk script originally written by Harish Patil,\n# which was converted to Perl by Matthew Chapman, which was converted\n# to Python by David Mosberger.\n#\nimport os\nimport re\nimport sys\n\nif len(sys.argv) != 2:\n print \"Usage: %s FILE\" % sys.argv[0]\n sys.exit(2)\n\nreadelf = os.getenv(\"READELF\", \"readelf\")\n\nstart_pattern = re.compile(\"<([^>]*)>: \\[0x([0-9a-f]+)-0x([0-9a-f]+)\\]\")\nrlen_pattern = re.compile(\".*rlen=([0-9]+)\")\n\ndef check_func (func, slots, rlen_sum):\n if slots != rlen_sum:\n global num_errors\n num_errors += 1\n if not func: func = \"[%#x-%#x]\" % (start, end)\n print \"ERROR: %s: %lu slots, total region length = %lu\" % (func, slots, rlen_sum)\n return\n\nnum_funcs = 0\nnum_errors = 0\nfunc = False\nslots = 0\nrlen_sum = 0\nfor line in os.popen(\"%s -u %s\" % (readelf, sys.argv[1])):\n m = start_pattern.match(line)\n if m:\n check_func(func, slots, rlen_sum)\n\n func = m.group(1)\n start = long(m.group(2), 16)\n end = long(m.group(3), 16)\n slots = 3 * (end - start) / 16\n rlen_sum = 0L\n num_funcs += 1\n else:\n m = rlen_pattern.match(line)\n if m:\n rlen_sum += long(m.group(1))\ncheck_func(func, slots, rlen_sum)\n\nif num_errors == 0:\n print \"No errors detected in %u functions.\" % num_funcs\nelse:\n if num_errors > 1:\n err=\"errors\"\n else:\n err=\"error\"\n print \"%u %s detected in %u functions.\" % (num_errors, err, num_funcs)\n sys.exit(1)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203637,"cells":{"repo_name":{"kind":"string","value":"rollenrolm/godot"},"path":{"kind":"string","value":"doc/tools/doc_merge.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"5046"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport xml.etree.ElementTree as ET\n\n\ntree = ET.parse(sys.argv[1])\nold_doc=tree.getroot()\n\ntree = ET.parse(sys.argv[2])\nnew_doc=tree.getroot()\n\nf = file(sys.argv[3],\"wb\")\ntab=0\n\nold_classes={}\n\ndef write_string(_f, text,newline=True):\n\tfor t in range(tab):\n\t\t_f.write(\"\\t\")\n\t_f.write(text)\n\tif (newline):\n\t\t_f.write(\"\\n\")\n\ndef escape(ret):\n\tret=ret.replace(\"&\",\"&amp;\");\n\tret=ret.replace(\"<\",\"&gt;\");\n\tret=ret.replace(\">\",\"&lt;\");\n\tret=ret.replace(\"'\",\"&apos;\");\n\tret=ret.replace(\"\\\"\",\"&quot;\");\n\treturn ret\n\n\ndef inc_tab():\n\tglobal tab\n\ttab+=1\n\ndef dec_tab():\n\tglobal tab\n\ttab-=1\n\nwrite_string(f,'')\nwrite_string(f,'')\n\ndef get_tag(node,name):\n\ttag=\"\"\n\tif (name in node.attrib):\n\t\ttag=' '+name+'=\"'+escape(node.attrib[name])+'\" '\n\treturn tag\n\ndef find_method_descr(old_class,name):\n\n\tmethods = old_class.find(\"methods\")\n\tif(methods!=None and len(list(methods))>0):\n\t\tfor m in list(methods):\n\t\t\tif (m.attrib[\"name\"]==name):\n\t\t\t\tdescription=m.find(\"description\")\n\t\t\t\tif (description!=None and description.text.strip()!=\"\"):\n\t\t\t\t\treturn description.text\n\n\treturn None\n\ndef find_signal_descr(old_class,name):\n\n\tsignals = old_class.find(\"signals\")\n\tif(signals!=None and len(list(signals))>0):\n\t\tfor m in list(signals):\n\t\t\tif (m.attrib[\"name\"]==name):\n\t\t\t\tdescription=m.find(\"description\")\n\t\t\t\tif (description!=None and description.text.strip()!=\"\"):\n\t\t\t\t\treturn description.text\n\n\treturn None\n\ndef find_constant_descr(old_class,name):\n\n\tif (old_class==None):\n\t\treturn None\n\tconstants = old_class.find(\"constants\")\n\tif(constants!=None and len(list(constants))>0):\n\t\tfor m in list(constants):\n\t\t\tif (m.attrib[\"name\"]==name):\n\t\t\t\tif (m.text.strip()!=\"\"):\n\t\t\t\t\treturn m.text\n\treturn None\n\ndef write_class(c):\n\tclass_name = c.attrib[\"name\"]\n\tprint(\"Parsing Class: \"+class_name)\n\tif (class_name in old_classes):\n\t\told_class=old_classes[class_name]\n\telse:\n\t\told_class=None\n\n\n\tcategory=get_tag(c,\"category\")\n\tinherits=get_tag(c,\"inherits\")\n\twrite_string(f,'')\n\tinc_tab()\n\n\twrite_string(f,\"\")\n\n\tif (old_class!=None):\n\t\told_brief_descr=old_class.find(\"brief_description\")\n\t\tif (old_brief_descr!=None):\n\t\t\twrite_string(f,escape(old_brief_descr.text.strip()))\n\n\n\twrite_string(f,\"\")\n\n\twrite_string(f,\"\")\n\tif (old_class!=None):\n\t\told_descr=old_class.find(\"description\")\n\t\tif (old_descr!=None):\n\t\t\twrite_string(f,escape(old_descr.text.strip()))\n\n\twrite_string(f,\"\")\n\n\tmethods = c.find(\"methods\")\n\tif(methods!=None and len(list(methods))>0):\n\n\t\twrite_string(f,\"\")\n\t\tinc_tab()\n\n\t\tfor m in list(methods):\n\t\t\tqualifiers=get_tag(m,\"qualifiers\")\n\n\t\t\twrite_string(f,'')\n\t\t\tinc_tab()\n\n\t\t\tfor a in list(m):\n\t\t\t\tif (a.tag==\"return\"):\n\t\t\t\t\ttyp=get_tag(a,\"type\")\n\t\t\t\t\twrite_string(f,'');\n\t\t\t\t\twrite_string(f,'');\n\t\t\t\telif (a.tag==\"argument\"):\n\n\t\t\t\t\tdefault=get_tag(a,\"default\")\n\n\t\t\t\t\twrite_string(f,'');\n\t\t\t\t\twrite_string(f,'');\n\n\t\t\twrite_string(f,'');\n\t\t\tif (old_class!=None):\n\t\t\t\told_method_descr=find_method_descr(old_class,m.attrib[\"name\"])\n\t\t\t\tif (old_method_descr):\n\t\t\t\t\twrite_string(f,escape(escape(old_method_descr.strip())))\n\n\t\t\twrite_string(f,'');\n\t\t\tdec_tab()\n\t\t\twrite_string(f,\"\")\n\t\tdec_tab()\n\t\twrite_string(f,\"\")\n\n\tsignals = c.find(\"signals\")\n\tif(signals!=None and len(list(signals))>0):\n\n\t\twrite_string(f,\"\")\n\t\tinc_tab()\n\n\t\tfor m in list(signals):\n\n\t\t\twrite_string(f,'')\n\t\t\tinc_tab()\n\n\t\t\tfor a in list(m):\n\t\t\t\tif (a.tag==\"argument\"):\n\n\t\t\t\t\twrite_string(f,'');\n\t\t\t\t\twrite_string(f,'');\n\n\t\t\twrite_string(f,'');\n\t\t\tif (old_class!=None):\n\t\t\t\told_signal_descr=find_signal_descr(old_class,m.attrib[\"name\"])\n\t\t\t\tif (old_signal_descr):\n\t\t\t\t\twrite_string(f,escape(old_signal_descr.strip()))\n\t\t\twrite_string(f,'');\n\t\t\tdec_tab()\n\t\t\twrite_string(f,\"\")\n\t\tdec_tab()\n\t\twrite_string(f,\"\")\n\n\tconstants = c.find(\"constants\")\n\tif(constants!=None and len(list(constants))>0):\n\n\t\twrite_string(f,\"\")\n\t\tinc_tab()\n\n\t\tfor m in list(constants):\n\n\t\t\twrite_string(f,'')\n\t\t\told_constant_descr=find_constant_descr(old_class,m.attrib[\"name\"])\n\t\t\tif (old_constant_descr):\n\t\t\t\twrite_string(f,escape(old_constant_descr.strip()))\n\t\t\twrite_string(f,\"\")\n\n\t\tdec_tab()\n\t\twrite_string(f,\"\")\n\n\tdec_tab()\n\twrite_string(f,\"\")\n\nfor c in list(old_doc):\n\told_classes[c.attrib[\"name\"]]=c\n\nfor c in list(new_doc):\n\twrite_class(c)\nwrite_string(f,'\\n')\n\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203638,"cells":{"repo_name":{"kind":"string","value":"bobellis/ghost_blog"},"path":{"kind":"string","value":"node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_openedgebuiltins.py"},"copies":{"kind":"string","value":"370"},"size":{"kind":"string","value":"40661"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers._openedgebuiltins\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Builtin list for the OpenEdgeLexer.\n\n :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nOPENEDGEKEYWORDS = [\n 'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR',\n 'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA',\n 'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER',\n 'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE',\n 'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST',\n 'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD',\n 'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA',\n 'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING',\n 'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG',\n 'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY',\n 'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT',\n 'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX',\n 'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY',\n 'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE',\n 'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN',\n 'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS',\n 'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST',\n 'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT',\n 'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY',\n 'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP',\n 'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI',\n 'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT',\n 'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE',\n 'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE',\n 'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB',\n 'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA',\n 'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO',\n 'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE',\n 'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-',\n 'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H',\n 'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN',\n 'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND',\n 'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS',\n 'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO',\n 'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI',\n 'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL',\n 'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT',\n 'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA',\n 'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI',\n 'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL',\n 'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH',\n 'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH',\n 'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS',\n 'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX',\n 'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T',\n 'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH',\n 'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P',\n 'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL',\n 'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB',\n 'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS',\n 'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE',\n 'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME',\n 'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY',\n 'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL',\n 'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE',\n 'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET',\n 'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN',\n 'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV',\n 'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED',\n 'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS',\n 'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION',\n 'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS',\n 'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL',\n 'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE',\n 'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT',\n 'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN',\n 'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM',\n 'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT',\n 'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF',\n 'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS',\n 'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE',\n 'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED',\n 'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP',\n 'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL',\n 'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS',\n 'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET',\n 'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF',\n 'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN',\n 'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE',\n 'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE',\n 'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE',\n 'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT',\n 'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO',\n 'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME',\n 'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE',\n 'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG',\n 'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED',\n 'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR',\n 'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND',\n 'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU',\n 'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT',\n 'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE',\n 'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED',\n 'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP',\n 'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA',\n 'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES',\n 'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC',\n 'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID',\n 'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE',\n 'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC',\n 'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST',\n 'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT',\n 'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU',\n 'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT',\n 'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE',\n 'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO',\n 'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT',\n 'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER',\n 'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR',\n 'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE',\n 'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY',\n 'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC',\n 'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN',\n 'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS',\n 'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT',\n 'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE',\n 'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS',\n 'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE',\n 'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA',\n 'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP',\n 'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS',\n 'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST',\n 'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION',\n 'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH',\n 'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX',\n 'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR',\n 'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO',\n 'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE',\n 'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END',\n 'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY',\n 'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT',\n 'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL',\n 'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE',\n 'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME',\n 'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS',\n 'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT',\n 'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L',\n 'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS',\n 'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL',\n 'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW',\n 'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD',\n 'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION',\n 'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT',\n 'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME',\n 'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS',\n 'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN',\n 'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID',\n 'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST',\n 'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE',\n 'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST',\n 'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM',\n 'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE',\n 'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER',\n 'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE',\n 'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS',\n 'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR',\n 'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO',\n 'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED',\n 'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT',\n 'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN',\n 'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE',\n 'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC',\n 'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU',\n 'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C',\n 'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR',\n 'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX',\n 'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT',\n 'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA',\n 'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI',\n 'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME',\n 'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS',\n 'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH',\n 'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P',\n 'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL',\n 'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE',\n 'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT',\n 'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE',\n 'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE',\n 'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU',\n 'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE',\n 'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST',\n 'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS',\n 'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE',\n 'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE',\n 'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST',\n 'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V',\n 'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU',\n 'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64',\n 'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST',\n 'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT',\n 'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS',\n 'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED',\n 'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU',\n 'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET',\n 'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI',\n 'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT',\n 'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM',\n 'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-',\n 'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA',\n 'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P',\n 'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE',\n 'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH',\n 'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH',\n 'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS',\n 'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX',\n 'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX',\n 'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG',\n 'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX',\n 'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE',\n 'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN',\n 'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG',\n 'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO',\n 'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ',\n 'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT',\n 'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V',\n 'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT',\n 'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA',\n 'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT',\n 'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH',\n 'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P',\n 'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE',\n 'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH',\n 'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH',\n 'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS',\n 'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX',\n 'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP',\n 'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS',\n 'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR',\n 'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE',\n 'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE',\n 'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA',\n 'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE',\n 'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN',\n 'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON',\n 'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE',\n 'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA',\n 'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI',\n 'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP',\n 'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN',\n 'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT',\n 'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM',\n 'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE',\n 'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL',\n 'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO',\n 'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI',\n 'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE',\n 'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT',\n 'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU',\n 'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B',\n 'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA',\n 'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T',\n 'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS',\n 'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP',\n 'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE',\n 'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED',\n 'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL',\n 'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z',\n 'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD',\n 'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE',\n 'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC',\n 'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC',\n 'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS',\n 'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC',\n 'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC',\n 'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC',\n 'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR',\n 'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS',\n 'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST',\n 'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT',\n 'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF',\n 'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU',\n 'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I',\n 'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE',\n 'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH',\n 'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT',\n 'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN',\n 'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS',\n 'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN',\n 'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN',\n 'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER',\n 'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN',\n 'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON',\n 'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER',\n 'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED',\n 'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP',\n 'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP',\n 'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP',\n 'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-',\n 'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA',\n 'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P',\n 'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE',\n 'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH',\n 'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA',\n 'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P',\n 'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE',\n 'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX',\n 'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT',\n 'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA',\n 'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI',\n 'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE',\n 'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS',\n 'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH',\n 'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH',\n 'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P',\n 'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL',\n 'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR',\n 'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE',\n 'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE',\n 'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN',\n 'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C',\n 'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR',\n 'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI',\n 'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL',\n 'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C',\n 'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS',\n 'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE',\n 'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE',\n 'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH',\n 'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA',\n 'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI',\n 'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO',\n 'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P',\n 'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE',\n 'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-',\n 'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-',\n 'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE',\n 'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-',\n 'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-',\n 'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE',\n 'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM',\n 'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO',\n 'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE',\n 'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL',\n 'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE',\n 'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW',\n 'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING',\n 'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE',\n 'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST',\n 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS',\n 'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP',\n 'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE',\n 'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS',\n 'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG',\n 'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI',\n 'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT',\n 'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL',\n 'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO',\n 'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS',\n 'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE',\n 'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION',\n 'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC',\n 'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL',\n 'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP',\n 'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL',\n 'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI',\n 'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP',\n 'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE',\n 'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO',\n 'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM',\n 'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES',\n 'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR',\n 'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS',\n 'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL',\n 'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES',\n 'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS',\n 'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED',\n 'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID',\n 'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET',\n 'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF',\n 'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME',\n 'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD',\n 'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN',\n 'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR',\n 'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV',\n 'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE',\n 'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED',\n 'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE',\n 'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM',\n 'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY',\n 'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM',\n 'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI',\n 'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME',\n 'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN',\n 'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO',\n 'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU',\n 'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME',\n 'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT',\n 'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING',\n 'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL',\n 'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING',\n 'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY',\n 'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME',\n 'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D',\n 'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED',\n 'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE',\n 'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST',\n 'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe',\n 'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S',\n 'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT',\n 'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY',\n 'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY',\n 'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH',\n 'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT',\n 'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG',\n 'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE',\n 'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION',\n 'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER',\n 'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM',\n 'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO',\n 'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY',\n 'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT',\n 'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY',\n 'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN',\n 'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE',\n 'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION',\n 'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE',\n 'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE',\n 'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID',\n 'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN',\n 'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER',\n 'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR',\n 'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU',\n 'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE',\n 'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX',\n 'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT',\n 'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE',\n 'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE',\n 'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST',\n 'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED',\n 'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT',\n 'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER',\n 'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL',\n 'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE',\n 'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL',\n 'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI',\n 'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON',\n 'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS',\n 'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER',\n 'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC',\n 'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION',\n 'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT',\n 'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET',\n 'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT',\n 'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL',\n 'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION',\n 'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED',\n 'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST',\n 'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW',\n 'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE',\n 'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER',\n 'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST',\n 'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID',\n 'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT',\n 'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE',\n 'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU',\n 'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT',\n 'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC',\n 'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-',\n 'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU',\n 'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER',\n 'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-',\n 'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU',\n 'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION',\n 'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI',\n 'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-',\n 'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS',\n 'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA',\n 'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS',\n 'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE',\n 'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS',\n 'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP',\n 'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE',\n 'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE',\n 'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE',\n 'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE',\n 'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA',\n 'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE',\n 'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED',\n 'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO',\n 'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF',\n 'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG',\n 'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM',\n 'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE',\n 'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT',\n 'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM',\n 'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING',\n 'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR',\n 'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING',\n 'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV',\n 'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES',\n 'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO',\n 'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP',\n 'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION',\n 'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR',\n 'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR',\n 'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI',\n 'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW',\n 'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D',\n 'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE',\n 'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO',\n 'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO',\n 'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO',\n 'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX',\n 'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID',\n 'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE',\n 'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM',\n 'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF',\n 'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE',\n 'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED',\n 'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION',\n 'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD',\n 'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL',\n 'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE',\n 'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO',\n 'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL',\n 'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION',\n 'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT',\n 'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES',\n 'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE',\n 'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW',\n 'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS',\n 'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C',\n 'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR',\n 'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI',\n 'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL',\n 'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C',\n 'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR',\n 'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI',\n 'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE',\n 'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN',\n 'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN',\n 'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L',\n 'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH',\n 'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA',\n 'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX',\n 'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM',\n 'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED',\n 'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE',\n 'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT',\n 'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP',\n 'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X',\n 'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE',\n 'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT',\n 'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD',\n 'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION',\n 'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE',\n 'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH',\n 'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y',\n 'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF'\n]\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203639,"cells":{"repo_name":{"kind":"string","value":"AndreasWilliams/BotGravindo"},"path":{"kind":"string","value":"src/scons-local-2.0.1/SCons/Variables/__init__.py"},"copies":{"kind":"string","value":"61"},"size":{"kind":"string","value":"11095"},"content":{"kind":"string","value":"\"\"\"engine.SCons.Variables\n\nThis file defines the Variables class that is used to add user-friendly\ncustomizable variables to an SCons build.\n\"\"\"\n\n#\n# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n__revision__ = \"src/engine/SCons/Variables/__init__.py 5134 2010/08/16 23:02:40 bdeegan\"\n\nimport os.path\nimport sys\n\nimport SCons.Environment\nimport SCons.Errors\nimport SCons.Util\nimport SCons.Warnings\n\nfrom BoolVariable import BoolVariable # okay\nfrom EnumVariable import EnumVariable # okay\nfrom ListVariable import ListVariable # naja\nfrom PackageVariable import PackageVariable # naja\nfrom PathVariable import PathVariable # okay\n\n\nclass Variables(object):\n instance=None\n\n \"\"\"\n Holds all the options, updates the environment with the variables,\n and renders the help text.\n \"\"\"\n def __init__(self, files=[], args={}, is_global=1):\n \"\"\"\n files - [optional] List of option configuration files to load\n (backward compatibility) If a single string is passed it is\n automatically placed in a file list\n \"\"\"\n self.options = []\n self.args = args\n if not SCons.Util.is_List(files):\n if files:\n files = [ files ]\n else:\n files = []\n self.files = files\n self.unknown = {}\n\n # create the singleton instance\n if is_global:\n self=Variables.instance\n\n if not Variables.instance:\n Variables.instance=self\n\n def _do_add(self, key, help=\"\", default=None, validator=None, converter=None):\n class Variable(object):\n pass\n\n option = Variable()\n\n # if we get a list or a tuple, we take the first element as the\n # option key and store the remaining in aliases.\n if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):\n option.key = key[0]\n option.aliases = key[1:]\n else:\n option.key = key\n option.aliases = [ key ]\n option.help = help\n option.default = default\n option.validator = validator\n option.converter = converter\n\n self.options.append(option)\n \n # options might be added after the 'unknown' dict has been set up,\n # so we remove the key and all its aliases from that dict\n for alias in list(option.aliases) + [ option.key ]:\n if alias in self.unknown:\n del self.unknown[alias]\n\n def keys(self):\n \"\"\"\n Returns the keywords for the options\n \"\"\"\n return [o.key for o in self.options]\n\n def Add(self, key, help=\"\", default=None, validator=None, converter=None, **kw):\n \"\"\"\n Add an option.\n\n key - the name of the variable, or a list or tuple of arguments\n help - optional help text for the options\n default - optional default value\n validator - optional function that is called to validate the option's value\n Called with (key, value, environment)\n converter - optional function that is called to convert the option's value before\n putting it in the environment.\n \"\"\"\n\n if SCons.Util.is_List(key) or isinstance(key, tuple):\n self._do_add(*key)\n return\n\n if not SCons.Util.is_String(key) or \\\n not SCons.Environment.is_valid_construction_var(key):\n raise SCons.Errors.UserError(\"Illegal Variables.Add() key `%s'\" % str(key))\n\n self._do_add(key, help, default, validator, converter)\n\n def AddVariables(self, *optlist):\n \"\"\"\n Add a list of options.\n\n Each list element is a tuple/list of arguments to be passed on\n to the underlying method for adding options.\n\n Example:\n opt.AddVariables(\n ('debug', '', 0),\n ('CC', 'The C compiler'),\n ('VALIDATE', 'An option for testing validation', 'notset',\n validator, None),\n )\n \"\"\"\n for o in optlist:\n self._do_add(*o)\n\n\n def Update(self, env, args=None):\n \"\"\"\n Update an environment with the option variables.\n\n env - the environment to update.\n \"\"\"\n\n values = {}\n\n # first set the defaults:\n for option in self.options:\n if not option.default is None:\n values[option.key] = option.default\n\n # next set the value specified in the options file\n for filename in self.files:\n if os.path.exists(filename):\n dir = os.path.split(os.path.abspath(filename))[0]\n if dir:\n sys.path.insert(0, dir)\n try:\n values['__name__'] = filename\n exec open(filename, 'rU').read() in {}, values\n finally:\n if dir:\n del sys.path[0]\n del values['__name__']\n\n # set the values specified on the command line\n if args is None:\n args = self.args\n\n for arg, value in args.items():\n added = False\n for option in self.options:\n if arg in list(option.aliases) + [ option.key ]:\n values[option.key] = value\n added = True\n if not added:\n self.unknown[arg] = value\n\n # put the variables in the environment:\n # (don't copy over variables that are not declared as options)\n for option in self.options:\n try:\n env[option.key] = values[option.key]\n except KeyError:\n pass\n\n # Call the convert functions:\n for option in self.options:\n if option.converter and option.key in values:\n value = env.subst('${%s}'%option.key)\n try:\n try:\n env[option.key] = option.converter(value)\n except TypeError:\n env[option.key] = option.converter(value, env)\n except ValueError, x:\n raise SCons.Errors.UserError('Error converting option: %s\\n%s'%(option.key, x))\n\n\n # Finally validate the values:\n for option in self.options:\n if option.validator and option.key in values:\n option.validator(option.key, env.subst('${%s}'%option.key), env)\n\n def UnknownVariables(self):\n \"\"\"\n Returns any options in the specified arguments lists that\n were not known, declared options in this object.\n \"\"\"\n return self.unknown\n\n def Save(self, filename, env):\n \"\"\"\n Saves all the options in the given file. This file can\n then be used to load the options next run. This can be used\n to create an option cache file.\n\n filename - Name of the file to save into\n env - the environment get the option values from\n \"\"\"\n\n # Create the file and write out the header\n try:\n fh = open(filename, 'w')\n\n try:\n # Make an assignment in the file for each option\n # within the environment that was assigned a value\n # other than the default.\n for option in self.options:\n try:\n value = env[option.key]\n try:\n prepare = value.prepare_to_store\n except AttributeError:\n try:\n eval(repr(value))\n except KeyboardInterrupt:\n raise\n except:\n # Convert stuff that has a repr() that\n # cannot be evaluated into a string\n value = SCons.Util.to_String(value)\n else:\n value = prepare()\n\n defaultVal = env.subst(SCons.Util.to_String(option.default))\n if option.converter:\n defaultVal = option.converter(defaultVal)\n\n if str(env.subst('${%s}' % option.key)) != str(defaultVal):\n fh.write('%s = %s\\n' % (option.key, repr(value)))\n except KeyError:\n pass\n finally:\n fh.close()\n\n except IOError, x:\n raise SCons.Errors.UserError('Error writing options to file: %s\\n%s' % (filename, x))\n\n def GenerateHelpText(self, env, sort=None):\n \"\"\"\n Generate the help text for the options.\n\n env - an environment that is used to get the current values\n of the options.\n \"\"\"\n\n if sort:\n options = sorted(self.options, key=lambda x: x.key)\n else:\n options = self.options\n\n def format(opt, self=self, env=env):\n if opt.key in env:\n actual = env.subst('${%s}' % opt.key)\n else:\n actual = None\n return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases)\n lines = [_f for _f in map(format, options) if _f]\n\n return ''.join(lines)\n\n format = '\\n%s: %s\\n default: %s\\n actual: %s\\n'\n format_ = '\\n%s: %s\\n default: %s\\n actual: %s\\n aliases: %s\\n'\n\n def FormatVariableHelpText(self, env, key, help, default, actual, aliases=[]):\n # Don't display the key name itself as an alias.\n aliases = [a for a in aliases if a != key]\n if len(aliases)==0:\n return self.format % (key, help, default, actual)\n else:\n return self.format_ % (key, help, default, actual, aliases)\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203640,"cells":{"repo_name":{"kind":"string","value":"amenonsen/ansible"},"path":{"kind":"string","value":"test/units/modules/network/f5/test_bigip_monitor_external.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"3318"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Copyright: (c) 2017, F5 Networks Inc.\n# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport json\nimport pytest\nimport sys\n\nif sys.version_info < (2, 7):\n pytestmark = pytest.mark.skip(\"F5 Ansible modules require Python >= 2.7\")\n\nfrom ansible.module_utils.basic import AnsibleModule\n\ntry:\n from library.modules.bigip_monitor_external import ModuleParameters\n from library.modules.bigip_monitor_external import ModuleManager\n from library.modules.bigip_monitor_external import ArgumentSpec\n\n # In Ansible 2.8, Ansible changed import paths.\n from test.units.compat import unittest\n from test.units.compat.mock import Mock\n\n from test.units.modules.utils import set_module_args\nexcept ImportError:\n from ansible.modules.network.f5.bigip_monitor_external import ModuleParameters\n from ansible.modules.network.f5.bigip_monitor_external import ModuleManager\n from ansible.modules.network.f5.bigip_monitor_external import ArgumentSpec\n\n # Ansible 2.8 imports\n from units.compat import unittest\n from units.compat.mock import Mock\n\n from units.modules.utils import set_module_args\n\n\nfixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')\nfixture_data = {}\n\n\ndef load_fixture(name):\n path = os.path.join(fixture_path, name)\n\n if path in fixture_data:\n return fixture_data[path]\n\n with open(path) as f:\n data = f.read()\n\n try:\n data = json.loads(data)\n except Exception:\n pass\n\n fixture_data[path] = data\n return data\n\n\nclass TestParameters(unittest.TestCase):\n def test_module_parameters(self):\n args = dict(\n name='foo',\n parent='parent',\n ip='10.10.10.10',\n port=80,\n interval=20,\n timeout=30,\n partition='Common'\n )\n\n p = ModuleParameters(params=args)\n assert p.name == 'foo'\n assert p.parent == '/Common/parent'\n assert p.ip == '10.10.10.10'\n assert p.type == 'external'\n assert p.port == 80\n assert p.destination == '10.10.10.10:80'\n assert p.interval == 20\n assert p.timeout == 30\n\n\nclass TestManager(unittest.TestCase):\n\n def setUp(self):\n self.spec = ArgumentSpec()\n\n def test_create_monitor(self, *args):\n set_module_args(dict(\n name='foo',\n parent='parent',\n ip='10.10.10.10',\n port=80,\n interval=20,\n timeout=30,\n partition='Common',\n provider=dict(\n server='localhost',\n password='password',\n user='admin'\n )\n ))\n\n module = AnsibleModule(\n argument_spec=self.spec.argument_spec,\n supports_check_mode=self.spec.supports_check_mode\n )\n\n # Override methods in the specific type of manager\n mm = ModuleManager(module=module)\n mm.exists = Mock(side_effect=[False, True])\n mm.create_on_device = Mock(return_value=True)\n\n results = mm.exec_module()\n\n assert results['changed'] is True\n assert results['parent'] == '/Common/parent'\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203641,"cells":{"repo_name":{"kind":"string","value":"jplusui/jplusui.github.com"},"path":{"kind":"string","value":"apps/node/node_modules/npm/node_modules/node-gyp/gyp/test/subdirectory/gyptest-SYMROOT-all.py"},"copies":{"kind":"string","value":"399"},"size":{"kind":"string","value":"1269"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright (c) 2009 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"\nVerifies building a target and a subsidiary dependent target from a\n.gyp file in a subdirectory, without specifying an explicit output build\ndirectory, and using the generated solution or project file at the top\nof the tree as the entry point.\n \nThe configuration sets the Xcode SYMROOT variable and uses --depth=\nto make Xcode behave like the other build tools--that is, put all\nbuilt targets in a single output build directory at the top of the tree.\n\"\"\"\n\nimport TestGyp\n\ntest = TestGyp.TestGyp()\n\ntest.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')\n\ntest.relocate('src', 'relocate/src')\n\n# Suppress the test infrastructure's setting SYMROOT on the command line.\ntest.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')\n\ntest.run_built_executable('prog1',\n stdout=\"Hello from prog1.c\\n\",\n chdir='relocate/src')\ntest.run_built_executable('prog2',\n stdout=\"Hello from prog2.c\\n\",\n chdir='relocate/src')\n\ntest.pass_test()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203642,"cells":{"repo_name":{"kind":"string","value":"CiviWiki/OpenCiviWiki"},"path":{"kind":"string","value":"project/api/migrations/0005_auto_20170109_1813.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1092"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"api\", \"0004_auto_20161230_0412\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"civi\",\n name=\"related_civis\",\n field=models.ManyToManyField(\n related_name=\"_related_civis_+\", to=\"api.Civi\"\n ),\n ),\n migrations.AlterField(\n model_name=\"civi\",\n name=\"c_type\",\n field=models.CharField(\n default=b\"problem\",\n max_length=31,\n choices=[\n (b\"problem\", b\"Problem\"),\n (b\"cause\", b\"Cause\"),\n (b\"solution\", b\"Solution\"),\n (b\"response\", b\"Response\"),\n ],\n ),\n ),\n migrations.AddField(\n model_name=\"civi\",\n name=\"links\",\n field=models.ManyToManyField(related_name=\"link\", to=\"api.Civi\"),\n ),\n ]\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203643,"cells":{"repo_name":{"kind":"string","value":"jonghough/SimpleWebsocket"},"path":{"kind":"string","value":"socket/websocket.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11128"},"content":{"kind":"string","value":"# -- coding: utf-8 --\n\nimport socket\nimport os\nimport base64\nimport hashlib\nimport struct\nfrom array import array\nfrom backports.ssl_match_hostname import match_hostname, CertificateError\n\n'''\nSimple python implementation of the Websocket protocol (RFC6455) for\nwebsocket clients.\nThe goal is to keep the source as compact and readable as possible while\nconforming to the protocol.\n\nauthor: Jonathan Hough\n'''\n\n\n\nclass WebsocketClient ( object ):\n\t''' Websocket client class. '''\n\t\n\t#Globally unique identifier, see RFC6454\n\tGUID = \"258EAFA5-E914-47DA-95CA-C5AB0DC85B11\"\n\n\t#Closing frame status code \n\t#see RFC 6455 Section 4.1 (Status codes)\n\tNORMAL_CLOSURE \t\t= 1000\n\tGOING_AWAY\t\t= 1001\n\tPROTOCOL_ERROR\t\t= 1002\n\tUNSUPPORTED_DATA\t= 1003\n\tRESERVED\t\t= 1004\n\tNO_STATUS_RECEIVED\t= 1005\n\tABNORMAl_CLOSURE\t= 1006\n\tINVALID_DATA\t\t= 1007\n\tPOLICY_VIOLATION\t= 1008\n\tMESSAGE_TOO_BIG\t\t= 1009\n\tMANDATORY_EXT\t\t= 1010\n\tINTERNAL_SERVER_ERR\t= 1011\n\tTLS_HANDSHAKE\t\t= 1015\n\n\n\t#Websocket op-codes\n\t#see RFC 6455 Section 11.8 (Opcodes)\n\tCONTINUATION_FRAME \t= 0x0\n\tTEXT_FRAME\t\t= 0x1\n\tBINARY_FRAME\t\t= 0x2\n\tCLOSE_FRAME\t\t= 0x8\n\tPING_FRAME\t\t= 0x9\n\tPONG_FRAME\t\t= 0xA\n\t#Frame bits\n\tfin_bits\t\t= 0x80 #final bit\n\tres1_bits\t\t= 0x40 #reserved bit 1\n\tres2_bits\t\t= 0x20 #reserved bit 2\n\tres3_bits\t\t= 0x10 #reserved bit 3\n\topcode\t\t\t= 0xF #opcode\n\tlen_mask\t\t= 0x80 #payload length \n\n\n\t#frame payload length bytes \n\tMAX_DATA_NO_EXTENSION \t= 126\n\tDATA_2_BYTE_EXTENSION \t= 1 << 16\n\tDATA_8_BYTE_EXTENSION \t= 1 << 63\n\n\n\t@staticmethod\n\tdef expected_value(val):\n\t\t'''Returns expected base 64 encoded Sha1 hash value of val concatenated with GUID.\n\t\tThis should be the same as the header field Sec-Websocket-Accept, returned from server.'''\n\t\tsha1 = base64.b64encode(hashlib.sha1(val + WebsocketClient.GUID).digest())\n\t\treturn sha1\n\n\t\n\t@staticmethod\n\tdef make_frame(data, opcode):\n\t\t'''Creates text frame to send to websocket server. \n\t\t see RFC6455 Section 5.2.\n\t\t\tFor Python struct.pack formats see: https://docs.python.org/2/library/struct.html#struct.pack\n\t\t'''\n\t\t#Assumes reserved bits are 0\n\t\t#final bit and opcode (first byte)\n\t\tframe = chr(1 << 7 | opcode)\n\t\t#mask bit, payload data length, mask key, paylod data (second + bytes) \n\t\tmask_bit = 1 << 7\n\t\tdatalen = len(data)\t\t\n\t\tif datalen < WebsocketClient.MAX_DATA_NO_EXTENSION:\n\t\t\tframe += chr( mask_bit | datalen )\n\t\telif datalen < WebsocketClient.DATA_2_BYTE_EXTENSION:\n\t\t\tframe += struct.pack('!B', mask_bit | 0x7e) +struct.pack(\"!H\", datalen)\n\t\telse:\n\t\t\tframe += struct.pack('!B', mask_bit | 0x7f) + struct.pack(\"!Q\", datalen) \n\n\t\tprint str(frame)\n\n\t\tkey = os.urandom(4)\n\t\tframe = frame + key + WebsocketClient.mask(key, data)\n\t\treturn frame\n\t\t\n\n\t@staticmethod\n\tdef mask(key, data):\n\t\t''' Masks the data with the given key using the \n\t\t masking method defined in RFC 6455 Section 5.3 '''\n\t\tmasked = []\n\t\tkeybytes = array(\"B\", key)\n\t\tdatabytes = array(\"B\", data) \n\t\tfor i in range(len(databytes)):\n\t\t\tdatabytes[i] ^= keybytes[i % 4]\n\t\treturn databytes.tostring()\n\n\n\n\ndef create_header(socketkey, test, hosturi, port, **kwargs):\n\t'''\n\tCreates the initial websocket creation header.\n\ttest parameter is used for testing, (with echo.websocket.org).\n\t\n\t'''\n\t\n\tif test is True:\n\t\n\t\theader = \"GET /echo HTTP/1.1\\r\\n\"\\\n\t\t\t+\"Upgrade: websocket\\r\\n\"\\\n\t\t\t+\"Connection: Upgrade\\r\\n\"\\\n\t\t\t+\"Host: echo.websocket.org\\r\\n\"\\\n\t\t\t+\"Origin: null\\r\\n\"\\\n\t\t\t+\"Sec-WebSocket-Key: \"+socketkey+\"\\r\\n\"\\\n\t\t\t+\"Sec-WebSocket-Protocol: chat, superchat\\r\\n\"\\\n\t\t\t+\"Sec-WebSocket-Version: 13\\r\\n\\r\\n\"\n\t\treturn header\n\telse:\n\t\tresource = \"/\"\n\t\torigin = \"null\"\n\t\tif kwargs is not None:\n\t\t\tfor key, value in kwargs.iteritems():\n\t\t\t\tif key is \"resource\":\n\t\t\t\t\tresource = value\n\t\t\t\telif key is \"host\":\n\t\t\t\t\thost = value\n\t\t\t\telif key is origin:\n\t\t\t\t\torigin = value\n\t\t\t\t\n\t\theader = \"GET \"+resource+\" HTTP/1.1\\r\\n\"\\\n\t\t\t+\"Upgrade: websocket\\r\\n\"\\\n\t\t\t+\"Connection: Upgrade\\r\\n\"\\\n\t\t\t+\"Host: \"+hosturi+str(port)+\" \\r\\n\"\\\n\t\t\t+\"Origin: \"+origin+\" \\r\\n\"\\\n\t\t\t+\"Sec-WebSocket-Key: \"+socketkey+\"\\r\\n\"\\\n\t\t\t+\"Sec-WebSocket-Version: 13\\r\\n\\r\\n\"\n\t\treturn header\n\n\n\ndef create_header_key():\n\t'''16 bytes. 16 random bytes, base 64 encoded.'''\n\trand \t= os.urandom(16)\n\tencoded = base64.b64encode(rand)\n\treturn encoded\n\ndef expected_value(val):\n\t'''Returns expected base 64 encoded Sha1 hash value of val concatenated with GUID.\n\t This should be the same as the header field Sec-Websocket-Accept, \n\t returned from server.'''\n\tsha1 = base64.b64encode(hashlib.sha1(val+WebsocketClient.GUID).digest())\n\treturn sha1\n\ndef keys_match(headers, key):\n\t'''\n\tChecks whether the key returned by the websocket server in opening handshake is the\n\tsame as the expected value.\n\tsee RFC 6455 Section 4.2 \n\t'''\n\tkvp = {}\n\tfor h in headers:\n\t\tsplit = h.split(':')\n\t\tif len(split) == 1:\n\t\t\tsplit.append(\" \")\n\t\tfor item in split :\n\t\t\titem.strip()\n\t\t\titem.lstrip()\n\t\tkvp[split[0]] = split[1]\n\t\t\t\t\t\n\treturnedkey = kvp['Sec-WebSocket-Accept']\n\tprint returnedkey \n\texpect = expected_value(key)\n\tprint expect\n\tif returnedkey.strip() == expect.strip():\n\t\treturn True\n\telse: return False\n\t\t\t\t\t\t\t\n\t\ndef get_cert(sock, path):\n\tca_certs_path = os.path.join(os.path.dirname(path), 'certfiles.crt')\n\tsslsock = ssl.wrap_socket(sock, ssl.PROTOCOL_SSLv3, ssl.CERT_REQUIRED, ca_certs_path)\n\treturn sslsock\n\nclass WebsocketController(object):\n\t'''\n\tController for websocket functionality. Needs to be passed\n\ton_error, on_close, on_message functions.\n\t'''\n\tdef __init__(self, onerror, onclose, onmessage):\n\t\t'''\n\t\tCreates instance of WebsocketController.\n\t\t'''\n\t\t#callbacks\n\t\tself.on_error \t= onerror\n\t\tself.on_close \t= onclose\n\t\tself.on_message = onmessage\n\t\t#the socket!\n\t\tself.sock \t= None\n\t\t#opening and closing flags\n\t\tself.handshake \t= False\t\t#true if complete opening handshake\n\t\tself.closing \t= False \t#true if begin closing handshake\n\t\tself.response_buffer = []\n\t\tself.cont_frame = \"\"\n\t\tself.fragment \t= False \t#flag for fragmented message expectation.\n\t\tself.is_closed \t= True\t\t#true prior to connection and after connection closed by either endpoint.\n\t\tself.protocol \t= \"ws\"\n\t\tself.uri \t= \"\"\n\t\tself.port \t= \"80\"\t\t#default value\n\tdef process_frame(self, sock, buf):\n\t\t''' Processes recieved frame. '''\n\t\tframeholder = FrameHolder(buf)\n\t\tmsg = frameholder.message\n\t\tif frameholder.valid_frame is False:\n\t\t\tpass\n\t\telse:\n\t\t\tif frameholder.finbit == 0 and self.fragment is False and frameholder.opcode != 0x0: #RFC6455 section 5.4 (fragmentation)\n\t\t\t\tself.fragment = True\n\t\t\t\tself.cont_frame = \"\"\n\t\t\t\tself.cont_frame += frameholder.message\n\t\t\telif frameholder.opcode == 0x8 and self.closing is False: # closing frame. Remote endpoint closed connection.\n\t\t\t\tcls = WebsocketClient.make_frame('1000', 0x8)\n\t\t\t\tsock.sendall(cls)\n\t\t\t\tself.is_closed = True\n\t\t\t\tif self.on_close is not None:\n\t\t\t\t\tself.on_close()\n\t\t\telif frameholder.opcode == 0xA: #ping frame, reply pong and vice versa: RFC 6455 5.5.2 and 5.5.3\n\t\t\t\tframe = WebsocketClient.make_frame(frameholder.message, 0x9)\n\t\t\t\tsock.sendall(frame)\n\t\t\telif frameholder.opcode == 0x9:\n\t\t\t\tframe = WebsocketClient.make_frame(frameholder.message, 0xA)\n\t\t\t\tsock.sendall(frame)\n\t\t\telif frameholder.opcode == 0x1: #message\n\t\t\t\tself.response_buffer.append(msg)\n\t\t\telif frameholder.opcode == 0x0: #continuation fragment\n\t\t\t\tif self.fragment is False:\n\t\t\t\t\tpass #TODO throw an error\n\t\t\t\telif frameholder.finbit == 0:\n\t\t\t\t\tself.cont_frame += frameholder.message\n\t\t\t\telif frameholder.finbit == 1:\n\t\t\t\t\tself.cont_frame += frameholder.message\n\t\t\t\t\tmsg = self.cont_frame\n\t\t\t\t\tself.response_buffer.append(msg)\n\t\t\t\t\tself.fragment = False #reset, fragmented message finished.\n\t\tif self.fragment is False:\t\n\t\t\treturn msg\n\t\telse: return None\n\t\n\tdef begin_connection(self, uri, port = None):\n\t\t''' Starts the websocket connection with initial handshake.\n\t\tIf not port number is given the default value of 80 will\n \t\tbe used.'''\n\t\tif uri is not None:\n\t\t\tprtl = uri.split('://')\n\t\t\tself.protocol = prtl[0]\n\t\t\tprint \"protocol is \"+str(self.protocol)\n\t\t\tself.uri = ''.join(prtl[1:])\n\t\tif port is not None:\n\t\t\tself.port = port\n\t\tkey \t= create_header_key()\n\t\tself.is_closed = False;\n\t\ttry:\n\t\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\texcept socket.error, e:\n\t\t\tprint \"Error \"+str(e)\n\n\t\ttry:\n\t\t\thost = socket.gethostbyname(self.uri)\n\t\t\tif self.protocol is \"wss\" and port is None:\n\t\t\t\tself.port = 443\n\t\t\taddr = (self.uri, self.port)\n\t\t\tprint self.protocol\n\t\t\tself.sock.connect(addr)\n\t\texcept socket.gaierror, e:\n\t\t\tprint \"Error \"+str(e)\n\n\t\texcept socket.error, e:\n\t\t\tprint \"Error \"+str(e)\n\n\n\t\ttry:\n\t\t\tself.sock.sendall(create_header(key,False, self.uri, self.port))\n\t\t\t\n\t\texcept socket.error, e:\n\t\t\tprint \"Error \"+str(e)\n\n\t\twhile self.is_closed is False:\n\n\t\t\ttry:\n\t\t\t\tbuf = self.sock.recv(4096)\n\t\t\t\tif buf is not None and len(buf) > 0:\n\t\t\t\t\tif self.handshake is True:\n\t\t\t\t\t\tmsg = self.process_frame(self.sock, buf)\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint \"Returned message: \"+msg\n\t\t\t\t\t\t\n\t\t\t\t\t#for handshake frame from server\t\t\t\t\n\t\t\t\t\tif self.handshake is False:\n\t\t\t\t\t\theaders = buf.split('\\r\\n')\n\n\t\t\t\t\t\tkeymatch = keys_match(headers, key)\n\t\t\t\t\t\tif keymatch is True:\n\t\t\t\t\t\t\tself.handshake = True # handshake complete\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.sock.close()\n\t\t\t\t\t\t\tself.is_closing = True\n\t\t\t\t\t\t\tself.is_closed = True\n\t\t\t\t\t\t\tif self.on_close is not None:\n\t\t\t\t\t\t\t\tself.on_close()\n\t\t\t\t\t\t\t#TODO throw error. Keys didn't match\n\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\texcept socket.error, e:\n\t\t\t\tprint \"Error: \"+str(e)\n\n\tdef close(self, reason =''):\n\t\t'''Send closing frame. Connection close initiated by local endpoint.'''\n\t\tif self.closing is False:\n\t\t\tself.sock.sendall(WebsocketClient.make_frame(reason, 0x8))\n\t\t\tself.closing is True\n\n\tdef send_message(self, message):\n\t\t'''Sends message string to remote endpoint'''\n\t\tif self.closing is False and self.is_closed is False:\n\t\t\tself.sock.sendall(WebsocketClient.make_frame(unicode(message).encode(\"unicode_escape\"), 0x1))\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef error_in_frame(self, error):\n\t\tif on_frame_error is not None:\n\t\t\ton_frame_error(error)\n\n\nclass FrameHolder(object):\n\t''' Convenient holder class for received frames. \n\t Validates and gets info from raw frame bytes\n\t'''\n\tdef __init__(self, rawframe):\n\t\tself.valid_frame \t= True\n\t\tself.finbit \t\t= None\n\t\tself.opcode \t\t= None\n\t\tself.msg_length \t= None\n\t\tself.message \t\t= None\n\n\t\tframe = array('B', rawframe)\n\t\tfirst_byte = frame[0]\n\t\t\n\t\tself.finbit = first_byte >> 7 \n\t\tself.finbit = self.finbit & 0xFF\n\t\t\n\t\t#opcode is final 4 bits of first byte.\n\t\tself.opcode = frame[0] & 0xF\n\n\t\tlength = frame[1] #first bit (masking bit) must be zero so don't bother to bit shift. \n\t\tself.msg_length = length & 0xFF #get length of payload\n\t\t\n\t\tself.message = 0\n\t\t# get the payload length\n\t\t# extension\n\t\tif length == 126: \t\t\t\n\t\t\tself.message = frame[4:]\n\t\t# 8 byte extension\n\t\telif length == 127: \t\t\n\t\t\tself.message = frame[10:]\n\t\t# standard\n\t\telse:\t\t\t\t\t\n\t\t\tself.message = frame[2:] \n\t\t#payload message.\n\t\tself.message = self.message.tostring().encode('unicode_escape').decode('unicode_escape')\n\t\tprint \"msg = \"+self.message\n\n\n# below is retained for some tests.\n'''\ndef main():\n\twc = WebsocketController(None, None, None)\n\twc.begin_connection()\n\n\n\nif __name__ == '__main__':\n\tmain()'''\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":203644,"cells":{"repo_name":{"kind":"string","value":"googleinterns/connectivity-test"},"path":{"kind":"string","value":"src/derivation_declarations/generators/BGP_generators.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2030"},"content":{"kind":"string","value":"# Copyright 2020 Google LLC\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# https://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any\n\nimport proto.cloud_network_model_pb2 as entities\nimport proto.derivation_rules_pb2 as derivation\nimport proto.rules_pb2 as rules\nfrom src.utils.derivation_utils import findNetwork, listBgpPeers, REGION_LIST, clearNextHopsInRoute\nfrom src.utils.ip_utils import ipv4RangeToStr\n\nDestination = derivation.DestinationAndGeneration.Destination\nDestinationContext = derivation.DestinationAndGeneration.DestinationContext\n\n\ndef BgpPeersGeneratorCommon(derived: rules.Route, context: DestinationContext,\n model: entities.Model) -> rules.Route:\n clearNextHopsInRoute(derived)\n derived.next_hop_tunnel = context.peer_info\n # derived.region currently contains the original route's region\n derived.from_local = derived.region == context.region\n derived.region = context.region\n derived.route_type = rules.Route.DYNAMIC\n derived.url = \"dynamic-route-\" + ipv4RangeToStr(derived.dest_range)\n return derived\n\n\ndef BgpPeersGenerator(derived: rules.Route, context: DestinationContext,\n model: entities.Model) -> rules.Route:\n derived = BgpPeersGeneratorCommon(derived, context, model)\n derived.from_local = True\n return derived\n\n\ndef OtherRegionsWhenGlobalRoutingGenerator(derived: rules.Route, context: DestinationContext,\n model: entities.Model) -> rules.Route:\n return BgpPeersGeneratorCommon(derived, context, model)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203645,"cells":{"repo_name":{"kind":"string","value":"bunnyitvn/webptn"},"path":{"kind":"string","value":"build/lib.linux-i686-2.7/django/contrib/gis/geos/point.py"},"copies":{"kind":"string","value":"224"},"size":{"kind":"string","value":"4351"},"content":{"kind":"string","value":"from ctypes import c_uint\nfrom django.contrib.gis.geos.error import GEOSException\nfrom django.contrib.gis.geos.geometry import GEOSGeometry\nfrom django.contrib.gis.geos import prototypes as capi\nfrom django.utils import six\nfrom django.utils.six.moves import xrange\n\nclass Point(GEOSGeometry):\n _minlength = 2\n _maxlength = 3\n\n def __init__(self, x, y=None, z=None, srid=None):\n \"\"\"\n The Point object may be initialized with either a tuple, or individual\n parameters.\n\n For Example:\n >>> p = Point((5, 23)) # 2D point, passed in as a tuple\n >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters\n \"\"\"\n if isinstance(x, (tuple, list)):\n # Here a tuple or list was passed in under the `x` parameter.\n ndim = len(x)\n coords = x\n elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):\n # Here X, Y, and (optionally) Z were passed in individually, as parameters.\n if isinstance(z, six.integer_types + (float,)):\n ndim = 3\n coords = [x, y, z]\n else:\n ndim = 2\n coords = [x, y]\n else:\n raise TypeError('Invalid parameters given for Point initialization.')\n\n point = self._create_point(ndim, coords)\n\n # Initializing using the address returned from the GEOS\n # createPoint factory.\n super(Point, self).__init__(point, srid=srid)\n\n def _create_point(self, ndim, coords):\n \"\"\"\n Create a coordinate sequence, set X, Y, [Z], and create point\n \"\"\"\n if ndim < 2 or ndim > 3:\n raise TypeError('Invalid point dimension: %s' % str(ndim))\n\n cs = capi.create_cs(c_uint(1), c_uint(ndim))\n i = iter(coords)\n capi.cs_setx(cs, 0, next(i))\n capi.cs_sety(cs, 0, next(i))\n if ndim == 3: capi.cs_setz(cs, 0, next(i))\n\n return capi.create_point(cs)\n\n def _set_list(self, length, items):\n ptr = self._create_point(length, items)\n if ptr:\n capi.destroy_geom(self.ptr)\n self._ptr = ptr\n self._set_cs()\n else:\n # can this happen?\n raise GEOSException('Geometry resulting from slice deletion was invalid.')\n\n def _set_single(self, index, value):\n self._cs.setOrdinate(index, 0, value)\n\n def __iter__(self):\n \"Allows iteration over coordinates of this Point.\"\n for i in xrange(len(self)):\n yield self[i]\n\n def __len__(self):\n \"Returns the number of dimensions for this Point (either 0, 2 or 3).\"\n if self.empty: return 0\n if self.hasz: return 3\n else: return 2\n\n def _get_single_external(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n elif index == 2:\n return self.z\n\n _get_single_internal = _get_single_external\n\n def get_x(self):\n \"Returns the X component of the Point.\"\n return self._cs.getOrdinate(0, 0)\n\n def set_x(self, value):\n \"Sets the X component of the Point.\"\n self._cs.setOrdinate(0, 0, value)\n\n def get_y(self):\n \"Returns the Y component of the Point.\"\n return self._cs.getOrdinate(1, 0)\n\n def set_y(self, value):\n \"Sets the Y component of the Point.\"\n self._cs.setOrdinate(1, 0, value)\n\n def get_z(self):\n \"Returns the Z component of the Point.\"\n if self.hasz:\n return self._cs.getOrdinate(2, 0)\n else:\n return None\n\n def set_z(self, value):\n \"Sets the Z component of the Point.\"\n if self.hasz:\n self._cs.setOrdinate(2, 0, value)\n else:\n raise GEOSException('Cannot set Z on 2D Point.')\n\n # X, Y, Z properties\n x = property(get_x, set_x)\n y = property(get_y, set_y)\n z = property(get_z, set_z)\n\n ### Tuple setting and retrieval routines. ###\n def get_coords(self):\n \"Returns a tuple of the point.\"\n return self._cs.tuple\n\n def set_coords(self, tup):\n \"Sets the coordinates of the point with the given tuple.\"\n self._cs[0] = tup\n\n # The tuple and coords properties\n tuple = property(get_coords, set_coords)\n coords = tuple\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203646,"cells":{"repo_name":{"kind":"string","value":"Jimaklas/grd2poly"},"path":{"kind":"string","value":"grd2poly.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1764"},"content":{"kind":"string","value":"import win32com.client\nfrom input import GRD_FILE\n\n# # Generate modules of necessary typelibs (AutoCAD Civil 3D 2008)\n# comtypes.client.GetModule(\"C:\\\\Program Files\\\\Common Files\\\\Autodesk Shared\\\\acax17enu.tlb\")\n# comtypes.client.GetModule(\"C:\\\\Program Files\\\\AutoCAD Civil 3D 2008\\\\AecXBase.tlb\")\n# comtypes.client.GetModule(\"C:\\\\Program Files\\\\AutoCAD Civil 3D 2008\\\\AecXUIBase.tlb\")\n# comtypes.client.GetModule(\"C:\\\\Program Files\\\\AutoCAD Civil 3D 2008\\\\Civil\\\\AeccXLand.tlb\")\n# comtypes.client.GetModule(\"C:\\\\Program Files\\\\AutoCAD Civil 3D 2008\\\\Civil\\\\AeccXUiLand.tlb\")\n# raise SystemExit\n\n# Get running instance of the AutoCAD application\nacadApp = win32com.client.Dispatch(\"AutoCAD.Application\")\naeccApp = acadApp.GetInterfaceObject(\"AeccXUiLand.AeccApplication.5.0\")\n\n# Document object\ndoc = aeccApp.ActiveDocument\nalignment, point_clicked = doc.Utility.GetEntity(None, None, Prompt=\"Select an alignment:\")\n\ncommand = \"pl \"\nf = open(GRD_FILE, \"r\")\nline = f.readline()\nwhile 1:\n try:\n line = f.readline()\n section, station = line.strip().split()\n station = float(station)\n line = f.readline()\n while line[0] != \"*\":\n offset, h = line.strip().split()\n offset = float(offset)\n h = float(h)\n # draw the next polyline vertex\n print \"Point at station %s (section %s) - offset %s\" % (station, section, offset)\n x, y = alignment.PointLocation(station, offset)\n command = command + \"%s,%s \" % (x, y)\n line = f.readline()\n except ValueError: # raised when trying to read past EOF (why not IOError? - need to think on it)\n break\n\ndoc.SendCommand(command + \" \")\nf.close()\n# x, y = alignment.PointLocation(0.0, 10.0)\n# print x, y\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203647,"cells":{"repo_name":{"kind":"string","value":"Bakterija/mmplayer"},"path":{"kind":"string","value":"mmplayer/media_player/providers/audio_gstplayer_modified.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1047"},"content":{"kind":"string","value":"from kivy.core.audio.audio_gstplayer import SoundGstplayer\nfrom kivy.logger import Logger\nfrom kivy.compat import PY2\nfrom os.path import realpath\nfrom kivy.lib.gstplayer import GstPlayer, get_gst_version\nif PY2:\n from urllib import pathname2url\nelse:\n from urllib.request import pathname2url\n\n\ndef _on_gstplayer_message(mtype, message):\n if mtype == 'error':\n Logger.error('AudioGstplayer: {}'.format(message))\n elif mtype == 'warning':\n Logger.warning('AudioGstplayer: {}'.format(message))\n elif mtype == 'info':\n Logger.info('AudioGstplayer: {}'.format(message))\n\n\nclass SoundGstplayerModified(SoundGstplayer):\n '''This is a modified SoundGstplayer that works'''\n\n def load(self, uri):\n self.unload()\n\n if not uri:\n return\n if not '://' in uri:\n uri = 'file:' + pathname2url(realpath(uri))\n\n self.player = GstPlayer(uri, None, self._on_gst_eos_sync,\n _on_gstplayer_message)\n self.player.load()\n return self\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203648,"cells":{"repo_name":{"kind":"string","value":"PaulWay/spacewalk"},"path":{"kind":"string","value":"search-server/spacewalk-search/scripts/search.py"},"copies":{"kind":"string","value":"19"},"size":{"kind":"string","value":"2581"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nimport xmlrpclib\nfrom optparse import OptionParser\n\nindexName = \"package\"\nusage = \"usage: %prog [options] search term\"\ndesc = \"%prog searches for package (default) or systems with the given \\\nsearch criteria\"\n\nparser = OptionParser(usage=usage, description=desc)\nparser.add_option(\"--sessionid\", dest=\"sessionid\", type=\"int\", help=\"PXT sessionid\")\nparser.add_option(\"--package\", action=\"store_true\", dest=\"package\",\n help=\"search packages\", default=True)\nparser.add_option(\"--system\", action=\"store_true\", dest=\"system\",\n help=\"search systems\", default=False)\nparser.add_option(\"--indexName\", dest=\"indexName\", type=\"string\",\n help=\"lucene index name to search ex: package server hwdevice snapshotTag errata\")\nparser.add_option(\"--serverAddr\", dest=\"serverAddr\", type=\"string\", default=\"localhost\",\n help=\"Server to authenticate to, NOT WHERE SEARCH SERVER RUNS\")\nparser.add_option(\"--username\", dest=\"username\", type=\"string\", help=\"username\")\nparser.add_option(\"--password\", dest=\"password\", type=\"string\", help=\"password\")\nparser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False,\n help=\"enable debug output\")\n\n(options, terms) = parser.parse_args()\nif len(terms) < 1:\n parser.error(\"please supply a search term\\n\" + str(parser.print_help()))\n\nif not options.sessionid and (not options.username or not options.password):\n print parser.print_help()\n parser.exit()\n\n\nif options.package:\n indexName = \"package\"\n\nif options.system:\n indexName = \"server\"\n\nif options.indexName:\n indexName = options.indexName\n\n\nsessionid = None\nif options.sessionid:\n sessionid = options.sessionid\n print \"Using passed in authentication info, sessionid = %s\" % (sessionid)\nelse:\n xmlrpcURL = \"http://%s/rhn/rpc/api\" % (options.serverAddr)\n print \"Getting authentication information from: %s\" % (xmlrpcURL)\n rhnclient = xmlrpclib.Server(xmlrpcURL)\n authSessionId = rhnclient.auth.login(options.username, options.password)\n sessionid = int(authSessionId.split('x')[0])\n\nurl = \"http://localhost:2828/RPC2\"\nprint \"Connecting to SearchServer: (%s)\" % url\nclient = xmlrpclib.Server(url, verbose=options.debug)\n\nterm = \" \".join(terms)\nprint \"searching for (%s) matching criteria: (%s)\" % (indexName, str(term))\nitems = client.index.search(sessionid, indexName, term)\n\nprint \"We got (%d) items back.\" % len(items)\nprint items\n\n#Remember to logout if the user didn't supply the sessionid\nif not options.sessionid:\n rhnclient.auth.logout(authSessionId)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203649,"cells":{"repo_name":{"kind":"string","value":"MITHyperloopTeam/software_core"},"path":{"kind":"string","value":"software/simulation/sysid/brake_actuator_sysid.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2012"},"content":{"kind":"string","value":"import numpy as np\nfrom scipy.optimize import minimize\n\ninput_file = \"/home/gizatt/mit-hyperloop-es/logs/20161014/velocity_steps_extract.csv\"\n\ndataset_raw = np.loadtxt(input_file, skiprows=1, delimiter=\",\")\n# indexes into that\nj_recv_time = 0\nj_utime = 1\nj_dist = 2 \nj_vel = 3\nj_pressure = 4\nj_m = 5\nj_po = 6\nj_pv = 7\n\ni_p_res = 0 # reservoir pressure\ni_A_cyl = 1 # cylinder area\ni_K_spring = 2 # spring constant\ni_B_cyl = 3 # cylinder movement constant\ni_C_mtr = 4 # motor pressure increase rate\ni_K_pv = 5 \ni_K_po = 6 \n\nx0 = np.array([\n 80, # reservoir pressure\n 100, # cylinder area\n 1000, # spring constant\n 1, # cylinder movement constant\n 100, # motor pressure increase rate\n 100, \n 10000\n ])\n\ndef brake_actuator_fitting_function(x):\n \"\"\"Using x as brake system parameters, calculates error over brake testing dataset\"\"\"\n err = 0\n for i in range(0, dataset_raw.shape[0] - 1):\n # predict next point from current one\n dt = (dataset_raw[i+1, j_utime] - dataset_raw[i, j_utime]) / 1000.0 / 1000.0\n d_vel = x[i_A_cyl]*dataset_raw[i, j_pressure] - x[i_K_spring]\n pred_vel = dataset_raw[i, j_vel] + dt * d_vel\n\n d_pos = dt * d_vel + dataset_raw[i, j_vel]\n pred_pos = dataset_raw[i, j_dist] + dt * d_pos\n\n d_pcyl = -x[i_B_cyl]*dataset_raw[i, j_vel] + \\\n x[i_C_mtr]*dataset_raw[i, j_m] + \\\n -x[i_K_pv]*np.sqrt(dataset_raw[i, j_pv] * np.abs(dataset_raw[i, j_pressure] - x[i_p_res])) * np.sign(dataset_raw[i, j_pressure] - x[i_p_res]) + \\\n -x[i_K_po]*np.sqrt(dataset_raw[i, j_po] * np.abs(dataset_raw[i, j_pressure] - x[i_p_res])) * np.sign(dataset_raw[i, j_pressure] - x[i_p_res])\n pred_pcyl = dataset_raw[i, j_pressure] + dt*d_pcyl\n\n err += (pred_vel - dataset_raw[i+1, j_vel])**2 + (pred_pos - dataset_raw[i+1, j_dist])**2 + (pred_pcyl - dataset_raw[i+1, j_pressure])**2\n print x, \", err \", err\n return err\n\nres = minimize(brake_actuator_fitting_function, x0, method='SLSQP',\n options={'disp': True})\n\nprint \"final: \", res "},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":203650,"cells":{"repo_name":{"kind":"string","value":"devendermishrajio/nova_test_latest"},"path":{"kind":"string","value":"nova/cells/filters/__init__.py"},"copies":{"kind":"string","value":"61"},"size":{"kind":"string","value":"2105"},"content":{"kind":"string","value":"# Copyright (c) 2012-2013 Rackspace Hosting\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nCell scheduler filters\n\"\"\"\n\nfrom oslo_log import log as logging\n\nfrom nova import filters\nfrom nova import policy\n\nLOG = logging.getLogger(__name__)\n\n\nclass BaseCellFilter(filters.BaseFilter):\n \"\"\"Base class for cell filters.\"\"\"\n\n def authorized(self, ctxt):\n \"\"\"Return whether or not the context is authorized for this filter\n based on policy.\n The policy action is \"cells_scheduler_filter:\" where \n is the name of the filter class.\n \"\"\"\n name = 'cells_scheduler_filter:' + self.__class__.__name__\n target = {'project_id': ctxt.project_id,\n 'user_id': ctxt.user_id}\n return policy.enforce(ctxt, name, target, do_raise=False)\n\n def _filter_one(self, cell, filter_properties):\n return self.cell_passes(cell, filter_properties)\n\n def cell_passes(self, cell, filter_properties):\n \"\"\"Return True if the CellState passes the filter, otherwise False.\n Override this in a subclass.\n \"\"\"\n raise NotImplementedError()\n\n\nclass CellFilterHandler(filters.BaseFilterHandler):\n def __init__(self):\n super(CellFilterHandler, self).__init__(BaseCellFilter)\n\n\ndef all_filters():\n \"\"\"Return a list of filter classes found in this directory.\n\n This method is used as the default for available scheduler filters\n and should return a list of all filter classes available.\n \"\"\"\n return CellFilterHandler().get_all_classes()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203651,"cells":{"repo_name":{"kind":"string","value":"ph4r05/plyprotobuf"},"path":{"kind":"string","value":"plyproto/parser.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14479"},"content":{"kind":"string","value":"__author__ = \"Dusan (Ph4r05) Klinec\"\n__copyright__ = \"Copyright (C) 2014 Dusan (ph4r05) Klinec\"\n__license__ = \"Apache License, Version 2.0\"\n__version__ = \"1.0\"\n\nimport ply.lex as lex\nimport ply.yacc as yacc\nfrom .model import *\n\nclass ProtobufLexer(object):\n keywords = ('double', 'float', 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',\n 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', 'bool', 'string', 'bytes',\n 'message', 'required', 'optional', 'repeated', 'enum', 'extensions', 'max', 'extends', 'extend',\n 'to', 'package', 'service', 'rpc', 'returns', 'true', 'false', 'option', 'import')\n\n tokens = [\n 'NAME',\n 'NUM',\n 'STRING_LITERAL',\n 'LINE_COMMENT', 'BLOCK_COMMENT',\n\n 'LBRACE', 'RBRACE', 'LBRACK', 'RBRACK',\n 'LPAR', 'RPAR', 'EQ', 'SEMI', 'DOT',\n 'STARTTOKEN'\n\n ] + [k.upper() for k in keywords]\n literals = '()+-*/=?:,.^|&~!=[]{};<>@%'\n\n t_NUM = r'[+-]?\\d+'\n t_STRING_LITERAL = r'\\\"([^\\\\\\n]|(\\\\.))*?\\\"'\n\n t_ignore_LINE_COMMENT = '//.*'\n def t_BLOCK_COMMENT(self, t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n\n t_LBRACE = '{'\n t_RBRACE = '}'\n t_LBRACK = '\\\\['\n t_RBRACK = '\\\\]'\n t_LPAR = '\\\\('\n t_RPAR = '\\\\)'\n t_EQ = '='\n t_SEMI = ';'\n t_DOT = '\\\\.'\n t_ignore = ' \\t\\f'\n t_STARTTOKEN = '\\\\+'\n\n def t_NAME(self, t):\n '[A-Za-z_$][A-Za-z0-9_$]*'\n if t.value in ProtobufLexer.keywords:\n #print \"type: %s val %s t %s\" % (t.type, t.value, t)\n t.type = t.value.upper()\n return t\n\n def t_newline(self, t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n def t_newline2(self, t):\n r'(\\r\\n)+'\n t.lexer.lineno += len(t.value) / 2\n\n def t_error(self, t):\n print(\"Illegal character '{}' ({}) in line {}\".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))\n t.lexer.skip(1)\n\nclass LexHelper:\n offset = 0\n def get_max_linespan(self, p):\n defSpan=[1e60, -1]\n mSpan=[1e60, -1]\n for sp in range(0, len(p)):\n csp = p.linespan(sp)\n if csp[0] == 0 and csp[1] == 0:\n if hasattr(p[sp], \"linespan\"):\n csp = p[sp].linespan\n else:\n continue\n if csp == None or len(csp) != 2: continue\n if csp[0] == 0 and csp[1] == 0: continue\n if csp[0] < mSpan[0]: mSpan[0] = csp[0]\n if csp[1] > mSpan[1]: mSpan[1] = csp[1]\n if defSpan == mSpan: return (0,0)\n return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])\n\n def get_max_lexspan(self, p):\n defSpan=[1e60, -1]\n mSpan=[1e60, -1]\n for sp in range(0, len(p)):\n csp = p.lexspan(sp)\n if csp[0] == 0 and csp[1] == 0:\n if hasattr(p[sp], \"lexspan\"):\n csp = p[sp].lexspan\n else:\n continue\n if csp == None or len(csp) != 2: continue\n if csp[0] == 0 and csp[1] == 0: continue\n if csp[0] < mSpan[0]: mSpan[0] = csp[0]\n if csp[1] > mSpan[1]: mSpan[1] = csp[1]\n if defSpan == mSpan: return (0,0)\n return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset])\n\n def set_parse_object(self, dst, p):\n dst.setLexData(linespan=self.get_max_linespan(p), lexspan=self.get_max_lexspan(p))\n dst.setLexObj(p)\n\nclass ProtobufParser(object):\n tokens = ProtobufLexer.tokens\n offset = 0\n lh = LexHelper()\n\n def setOffset(self, of):\n self.offset = of\n self.lh.offset = of\n\n def p_empty(self, p):\n '''empty :'''\n pass\n\n def p_field_modifier(self,p):\n '''field_modifier : REQUIRED\n | OPTIONAL\n | REPEATED'''\n p[0] = LU.i(p,1)\n\n def p_primitive_type(self, p):\n '''primitive_type : DOUBLE\n | FLOAT\n | INT32\n | INT64\n | UINT32\n | UINT64\n | SINT32\n | SINT64\n | FIXED32\n | FIXED64\n | SFIXED32\n | SFIXED64\n | BOOL\n | STRING\n | BYTES'''\n p[0] = LU.i(p,1)\n\n def p_field_id(self, p):\n '''field_id : NUM'''\n p[0] = LU.i(p,1)\n\n def p_rvalue(self, p):\n '''rvalue : NUM\n | TRUE\n | FALSE'''\n p[0] = LU.i(p,1)\n\n def p_rvalue2(self, p):\n '''rvalue : NAME'''\n p[0] = Name(LU.i(p, 1))\n self.lh.set_parse_object(p[0], p)\n p[0].deriveLex()\n\n def p_field_directive(self, p):\n '''field_directive : LBRACK NAME EQ rvalue RBRACK'''\n p[0] = FieldDirective(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n def p_field_directive_times(self, p):\n '''field_directive_times : field_directive_plus'''\n p[0] = p[1]\n\n def p_field_directive_times2(self, p):\n '''field_directive_times : empty'''\n p[0] = []\n\n def p_field_directive_plus(self, p):\n '''field_directive_plus : field_directive\n | field_directive_plus field_directive'''\n if len(p) == 2:\n p[0] = [LU(p,1)]\n else:\n p[0] = p[1] + [LU(p,2)]\n\n def p_dotname(self, p):\n '''dotname : NAME\n | dotname DOT NAME'''\n if len(p) == 2:\n p[0] = [LU(p,1)]\n else:\n p[0] = p[1] + [LU(p,3)]\n\n # Hack for cases when there is a field named 'message' or 'max'\n def p_fieldName(self, p):\n '''field_name : NAME\n | MESSAGE\n | MAX'''\n p[0] = Name(LU.i(p,1))\n self.lh.set_parse_object(p[0], p)\n p[0].deriveLex()\n\n def p_field_type(self, p):\n '''field_type : primitive_type'''\n p[0] = FieldType(LU.i(p,1))\n self.lh.set_parse_object(p[0], p)\n\n def p_field_type2(self, p):\n '''field_type : dotname'''\n p[0] = DotName(LU.i(p, 1))\n self.lh.set_parse_object(p[0], p)\n p[0].deriveLex()\n\n # Root of the field declaration.\n def p_field_definition(self, p):\n '''field_definition : field_modifier field_type field_name EQ field_id field_directive_times SEMI'''\n p[0] = FieldDefinition(LU.i(p,1), LU.i(p,2), LU.i(p, 3), LU.i(p,5), LU.i(p,6))\n self.lh.set_parse_object(p[0], p)\n\n # Root of the enum field declaration.\n def p_enum_field(self, p):\n '''enum_field : field_name EQ NUM SEMI'''\n p[0] = EnumFieldDefinition(LU.i(p, 1), LU.i(p,3))\n self.lh.set_parse_object(p[0], p)\n\n def p_enum_body_part(self, p):\n '''enum_body_part : enum_field\n | option_directive'''\n p[0] = p[1]\n\n def p_enum_body(self, p):\n '''enum_body : enum_body_part\n | enum_body enum_body_part'''\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1] + [p[2]]\n\n def p_enum_body_opt(self, p):\n '''enum_body_opt : empty'''\n p[0] = []\n\n def p_enum_body_opt2(self, p):\n '''enum_body_opt : enum_body'''\n p[0] = p[1]\n\n # Root of the enum declaration.\n # enum_definition ::= 'enum' ident '{' { ident '=' integer ';' }* '}'\n def p_enum_definition(self, p):\n '''enum_definition : ENUM NAME LBRACE enum_body_opt RBRACE'''\n p[0] = EnumDefinition(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n def p_extensions_to(self, p):\n '''extensions_to : MAX'''\n p[0] = ExtensionsMax()\n self.lh.set_parse_object(p[0], p)\n\n def p_extensions_to2(self, p):\n '''extensions_to : NUM'''\n p[0] = LU.i(p, 1)\n\n # extensions_definition ::= 'extensions' integer 'to' integer ';'\n def p_extensions_definition(self, p):\n '''extensions_definition : EXTENSIONS NUM TO extensions_to SEMI'''\n p[0] = ExtensionsDirective(LU.i(p,2), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n # message_extension ::= 'extend' ident '{' message_body '}'\n def p_message_extension(self, p):\n '''message_extension : EXTEND NAME LBRACE message_body RBRACE'''\n p[0] = MessageExtension(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n def p_message_body_part(self, p):\n '''message_body_part : field_definition\n | enum_definition\n | message_definition\n | extensions_definition\n | message_extension'''\n p[0] = p[1]\n\n # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*\n def p_message_body(self, p):\n '''message_body : empty'''\n p[0] = []\n\n # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }*\n def p_message_body2(self, p):\n '''message_body : message_body_part\n | message_body message_body_part'''\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1] + [p[2]]\n\n # Root of the message declaration.\n # message_definition = MESSAGE_ - ident(\"messageId\") + LBRACE + message_body(\"body\") + RBRACE\n def p_message_definition(self, p):\n '''message_definition : MESSAGE NAME LBRACE message_body RBRACE'''\n p[0] = MessageDefinition(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n # method_definition ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';'\n def p_method_definition(self, p):\n '''method_definition : RPC NAME LPAR NAME RPAR RETURNS LPAR NAME RPAR'''\n p[0] = MethodDefinition(Name(LU.i(p, 2)), Name(LU.i(p, 4)), Name(LU.i(p, 8)))\n self.lh.set_parse_object(p[0], p)\n\n def p_method_definition_opt(self, p):\n '''method_definition_opt : empty'''\n p[0] = []\n\n def p_method_definition_opt2(self, p):\n '''method_definition_opt : method_definition\n | method_definition_opt method_definition'''\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1] + [p[2]]\n\n # service_definition ::= 'service' ident '{' method_definition* '}'\n # service_definition = SERVICE_ - ident(\"serviceName\") + LBRACE + ZeroOrMore(Group(method_definition)) + RBRACE\n def p_service_definition(self, p):\n '''service_definition : SERVICE NAME LBRACE method_definition_opt RBRACE'''\n p[0] = ServiceDefinition(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n # package_directive ::= 'package' ident [ '.' ident]* ';'\n def p_package_directive(self,p):\n '''package_directive : PACKAGE dotname SEMI'''\n p[0] = PackageStatement(Name(LU.i(p, 2)))\n self.lh.set_parse_object(p[0], p)\n\n # import_directive = IMPORT_ - quotedString(\"importFileSpec\") + SEMI\n def p_import_directive(self, p):\n '''import_directive : IMPORT STRING_LITERAL SEMI'''\n p[0] = ImportStatement(Literal(LU.i(p,2)))\n self.lh.set_parse_object(p[0], p)\n\n def p_option_rvalue(self, p):\n '''option_rvalue : NUM\n | TRUE\n | FALSE'''\n p[0] = LU(p, 1)\n\n def p_option_rvalue2(self, p):\n '''option_rvalue : STRING_LITERAL'''\n p[0] = Literal(LU(p,1))\n\n def p_option_rvalue3(self, p):\n '''option_rvalue : NAME'''\n p[0] = Name(LU.i(p,1))\n\n # option_directive = OPTION_ - ident(\"optionName\") + EQ + quotedString(\"optionValue\") + SEMI\n def p_option_directive(self, p):\n '''option_directive : OPTION NAME EQ option_rvalue SEMI'''\n p[0] = OptionStatement(Name(LU.i(p, 2)), LU.i(p,4))\n self.lh.set_parse_object(p[0], p)\n\n # topLevelStatement = Group(message_definition | message_extension | enum_definition | service_definition | import_directive | option_directive)\n def p_topLevel(self,p):\n '''topLevel : message_definition\n | message_extension\n | enum_definition\n | service_definition\n | import_directive\n | option_directive'''\n p[0] = p[1]\n\n def p_package_definition(self, p):\n '''package_definition : package_directive'''\n p[0] = p[1]\n\n def p_packages2(self, p):\n '''package_definition : empty'''\n p[0] = []\n\n def p_statements2(self, p):\n '''statements : topLevel\n | statements topLevel'''\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1] + [p[2]]\n\n def p_statements(self, p):\n '''statements : empty'''\n p[0] = []\n\n # parser = Optional(package_directive) + ZeroOrMore(topLevelStatement)\n def p_protofile(self, p):\n '''protofile : package_definition statements'''\n p[0] = ProtoFile(LU.i(p,1), LU.i(p,2))\n self.lh.set_parse_object(p[0], p)\n\n # Parsing starting point\n def p_goal(self, p):\n '''goal : STARTTOKEN protofile'''\n p[0] = p[2]\n\n def p_error(self, p):\n print('error: {}'.format(p))\n\nclass ProtobufAnalyzer(object):\n\n def __init__(self):\n self.lexer = lex.lex(module=ProtobufLexer(), optimize=1)\n self.parser = yacc.yacc(module=ProtobufParser(), start='goal', optimize=1)\n\n def tokenize_string(self, code):\n self.lexer.input(code)\n for token in self.lexer:\n print(token)\n\n def tokenize_file(self, _file):\n if type(_file) == str:\n _file = file(_file)\n content = ''\n for line in _file:\n content += line\n return self.tokenize_string(content)\n\n def parse_string(self, code, debug=0, lineno=1, prefix='+'):\n self.lexer.lineno = lineno\n self.parser.offset = len(prefix)\n return self.parser.parse(prefix + code, lexer=self.lexer, debug=debug)\n\n def parse_file(self, _file, debug=0):\n if type(_file) == str:\n _file = file(_file)\n content = ''\n for line in _file:\n content += line\n return self.parse_string(content, debug=debug)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203652,"cells":{"repo_name":{"kind":"string","value":"jaduimstra/nilmtk"},"path":{"kind":"string","value":"nilmtk/dataset_converters/redd/convert_redd.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"5462"},"content":{"kind":"string","value":"from __future__ import print_function, division\nimport pandas as pd\nimport numpy as np\nfrom copy import deepcopy\nfrom os.path import join, isdir, isfile\nfrom os import listdir\nimport re\nfrom sys import stdout\nfrom nilmtk.utils import get_datastore\nfrom nilmtk.datastore import Key\nfrom nilmtk.timeframe import TimeFrame\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.utils import get_module_directory, check_directory_exists\nfrom nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore\n\n\"\"\"\nTODO:\n* The bottleneck appears to be CPU. So could be sped up by using \n multiprocessing module to use multiple CPU cores to load REDD channels in \n parallel.\n\"\"\"\n\n\ndef convert_redd(redd_path, output_filename, format='HDF'):\n \"\"\"\n Parameters\n ----------\n redd_path : str\n The root path of the REDD low_freq dataset.\n output_filename : str\n The destination filename (including path and suffix).\n format : str\n format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'\n \"\"\"\n\n def _redd_measurement_mapping_func(house_id, chan_id):\n ac_type = 'apparent' if chan_id <= 2 else 'active'\n return [('power', ac_type)]\n \n # Open DataStore\n store = get_datastore(output_filename, format, mode='w')\n\n # Convert raw data to DataStore\n _convert(redd_path, store, _redd_measurement_mapping_func, 'US/Eastern')\n\n # Add metadata\n save_yaml_to_datastore(join(get_module_directory(), \n 'dataset_converters', \n 'redd', \n 'metadata'),\n store)\n store.close()\n\n print(\"Done converting REDD to HDF5!\")\n\ndef _convert(input_path, store, measurement_mapping_func, tz, sort_index=True):\n \"\"\"\n Parameters\n ----------\n input_path : str\n The root path of the REDD low_freq dataset.\n store : DataStore\n The NILMTK DataStore object.\n measurement_mapping_func : function\n Must take these parameters:\n - house_id\n - chan_id\n Function should return a list of tuples e.g. [('power', 'active')]\n tz : str \n Timezone e.g. 'US/Eastern'\n sort_index : bool\n \"\"\"\n\n check_directory_exists(input_path)\n\n # Iterate though all houses and channels\n houses = _find_all_houses(input_path)\n for house_id in houses:\n print(\"Loading house\", house_id, end=\"... \")\n stdout.flush()\n chans = _find_all_chans(input_path, house_id)\n for chan_id in chans:\n print(chan_id, end=\" \")\n stdout.flush()\n key = Key(building=house_id, meter=chan_id)\n measurements = measurement_mapping_func(house_id, chan_id)\n csv_filename = _get_csv_filename(input_path, key)\n df = _load_csv(csv_filename, measurements, tz)\n\n if sort_index:\n df = df.sort_index() # raw REDD data isn't always sorted\n store.put(str(key), df)\n print()\n\n\ndef _find_all_houses(input_path):\n \"\"\"\n Returns\n -------\n list of integers (house instances)\n \"\"\"\n dir_names = [p for p in listdir(input_path) if isdir(join(input_path, p))]\n return _matching_ints(dir_names, '^house_(\\d)$')\n\n\ndef _find_all_chans(input_path, house_id):\n \"\"\"\n Returns\n -------\n list of integers (channels)\n \"\"\"\n house_path = join(input_path, 'house_{:d}'.format(house_id))\n filenames = [p for p in listdir(house_path) if isfile(join(house_path, p))]\n return _matching_ints(filenames, '^channel_(\\d\\d?).dat$')\n\n\ndef _matching_ints(strings, regex):\n \"\"\"Uses regular expression to select and then extract an integer from\n strings.\n\n Parameters\n ----------\n strings : list of strings\n regex : string\n Regular Expression. Including one group. This group is used to\n extract the integer from each string.\n\n Returns\n -------\n list of ints\n \"\"\"\n ints = []\n p = re.compile(regex)\n for string in strings:\n m = p.match(string)\n if m:\n integer = int(m.group(1))\n ints.append(integer)\n ints.sort()\n return ints\n\n\ndef _get_csv_filename(input_path, key_obj):\n \"\"\"\n Parameters\n ----------\n input_path : (str) the root path of the REDD low_freq dataset\n key_obj : (nilmtk.Key) the house and channel to load\n\n Returns\n ------- \n filename : str\n \"\"\"\n assert isinstance(input_path, str)\n assert isinstance(key_obj, Key)\n\n # Get path\n house_path = 'house_{:d}'.format(key_obj.building)\n path = join(input_path, house_path)\n assert isdir(path)\n\n # Get filename\n filename = 'channel_{:d}.dat'.format(key_obj.meter)\n filename = join(path, filename)\n assert isfile(filename)\n\n return filename\n\n\ndef _load_csv(filename, columns, tz):\n \"\"\"\n Parameters\n ----------\n filename : str\n columns : list of tuples (for hierarchical column index)\n tz : str e.g. 'US/Eastern'\n\n Returns\n -------\n dataframe\n \"\"\"\n # Load data\n df = pd.read_csv(filename, sep=' ', names=columns,\n dtype={m:np.float32 for m in columns})\n \n # Modify the column labels to reflect the power measurements recorded.\n df.columns.set_names(LEVEL_NAMES, inplace=True)\n\n # Convert the integer index column to timezone-aware datetime \n df.index = pd.to_datetime(df.index.values, unit='s', utc=True)\n df = df.tz_convert(tz)\n\n return df\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203653,"cells":{"repo_name":{"kind":"string","value":"dcroc16/skunk_works"},"path":{"kind":"string","value":"google_appengine/lib/distutils/distutils/msvccompiler.py"},"copies":{"kind":"string","value":"250"},"size":{"kind":"string","value":"23637"},"content":{"kind":"string","value":"\"\"\"distutils.msvccompiler\n\nContains MSVCCompiler, an implementation of the abstract CCompiler class\nfor the Microsoft Visual Studio.\n\"\"\"\n\n# Written by Perry Stoll\n# hacked by Robin Becker and Thomas Heller to do a better job of\n# finding DevStudio (through the registry)\n\n__revision__ = \"$Id$\"\n\nimport sys\nimport os\nimport string\n\nfrom distutils.errors import (DistutilsExecError, DistutilsPlatformError,\n CompileError, LibError, LinkError)\nfrom distutils.ccompiler import CCompiler, gen_lib_options\nfrom distutils import log\n\n_can_read_reg = 0\ntry:\n import _winreg\n\n _can_read_reg = 1\n hkey_mod = _winreg\n\n RegOpenKeyEx = _winreg.OpenKeyEx\n RegEnumKey = _winreg.EnumKey\n RegEnumValue = _winreg.EnumValue\n RegError = _winreg.error\n\nexcept ImportError:\n try:\n import win32api\n import win32con\n _can_read_reg = 1\n hkey_mod = win32con\n\n RegOpenKeyEx = win32api.RegOpenKeyEx\n RegEnumKey = win32api.RegEnumKey\n RegEnumValue = win32api.RegEnumValue\n RegError = win32api.error\n\n except ImportError:\n log.info(\"Warning: Can't read registry to find the \"\n \"necessary compiler setting\\n\"\n \"Make sure that Python modules _winreg, \"\n \"win32api or win32con are installed.\")\n pass\n\nif _can_read_reg:\n HKEYS = (hkey_mod.HKEY_USERS,\n hkey_mod.HKEY_CURRENT_USER,\n hkey_mod.HKEY_LOCAL_MACHINE,\n hkey_mod.HKEY_CLASSES_ROOT)\n\ndef read_keys(base, key):\n \"\"\"Return list of registry keys.\"\"\"\n\n try:\n handle = RegOpenKeyEx(base, key)\n except RegError:\n return None\n L = []\n i = 0\n while 1:\n try:\n k = RegEnumKey(handle, i)\n except RegError:\n break\n L.append(k)\n i = i + 1\n return L\n\ndef read_values(base, key):\n \"\"\"Return dict of registry keys and values.\n\n All names are converted to lowercase.\n \"\"\"\n try:\n handle = RegOpenKeyEx(base, key)\n except RegError:\n return None\n d = {}\n i = 0\n while 1:\n try:\n name, value, type = RegEnumValue(handle, i)\n except RegError:\n break\n name = name.lower()\n d[convert_mbcs(name)] = convert_mbcs(value)\n i = i + 1\n return d\n\ndef convert_mbcs(s):\n enc = getattr(s, \"encode\", None)\n if enc is not None:\n try:\n s = enc(\"mbcs\")\n except UnicodeError:\n pass\n return s\n\nclass MacroExpander:\n\n def __init__(self, version):\n self.macros = {}\n self.load_macros(version)\n\n def set_macro(self, macro, path, key):\n for base in HKEYS:\n d = read_values(base, path)\n if d:\n self.macros[\"$(%s)\" % macro] = d[key]\n break\n\n def load_macros(self, version):\n vsbase = r\"Software\\Microsoft\\VisualStudio\\%0.1f\" % version\n self.set_macro(\"VCInstallDir\", vsbase + r\"\\Setup\\VC\", \"productdir\")\n self.set_macro(\"VSInstallDir\", vsbase + r\"\\Setup\\VS\", \"productdir\")\n net = r\"Software\\Microsoft\\.NETFramework\"\n self.set_macro(\"FrameworkDir\", net, \"installroot\")\n try:\n if version > 7.0:\n self.set_macro(\"FrameworkSDKDir\", net, \"sdkinstallrootv1.1\")\n else:\n self.set_macro(\"FrameworkSDKDir\", net, \"sdkinstallroot\")\n except KeyError:\n raise DistutilsPlatformError, \\\n (\"\"\"Python was built with Visual Studio 2003;\nextensions must be built with a compiler than can generate compatible binaries.\nVisual Studio 2003 was not found on this system. If you have Cygwin installed,\nyou can try compiling with MingW32, by passing \"-c mingw32\" to setup.py.\"\"\")\n\n p = r\"Software\\Microsoft\\NET Framework Setup\\Product\"\n for base in HKEYS:\n try:\n h = RegOpenKeyEx(base, p)\n except RegError:\n continue\n key = RegEnumKey(h, 0)\n d = read_values(base, r\"%s\\%s\" % (p, key))\n self.macros[\"$(FrameworkVersion)\"] = d[\"version\"]\n\n def sub(self, s):\n for k, v in self.macros.items():\n s = string.replace(s, k, v)\n return s\n\ndef get_build_version():\n \"\"\"Return the version of MSVC that was used to build Python.\n\n For Python 2.3 and up, the version number is included in\n sys.version. For earlier versions, assume the compiler is MSVC 6.\n \"\"\"\n\n prefix = \"MSC v.\"\n i = string.find(sys.version, prefix)\n if i == -1:\n return 6\n i = i + len(prefix)\n s, rest = sys.version[i:].split(\" \", 1)\n majorVersion = int(s[:-2]) - 6\n minorVersion = int(s[2:3]) / 10.0\n # I don't think paths are affected by minor version in version 6\n if majorVersion == 6:\n minorVersion = 0\n if majorVersion >= 6:\n return majorVersion + minorVersion\n # else we don't know what version of the compiler this is\n return None\n\ndef get_build_architecture():\n \"\"\"Return the processor architecture.\n\n Possible results are \"Intel\", \"Itanium\", or \"AMD64\".\n \"\"\"\n\n prefix = \" bit (\"\n i = string.find(sys.version, prefix)\n if i == -1:\n return \"Intel\"\n j = string.find(sys.version, \")\", i)\n return sys.version[i+len(prefix):j]\n\ndef normalize_and_reduce_paths(paths):\n \"\"\"Return a list of normalized paths with duplicates removed.\n\n The current order of paths is maintained.\n \"\"\"\n # Paths are normalized so things like: /a and /a/ aren't both preserved.\n reduced_paths = []\n for p in paths:\n np = os.path.normpath(p)\n # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.\n if np not in reduced_paths:\n reduced_paths.append(np)\n return reduced_paths\n\n\nclass MSVCCompiler (CCompiler) :\n \"\"\"Concrete class that implements an interface to Microsoft Visual C++,\n as defined by the CCompiler abstract class.\"\"\"\n\n compiler_type = 'msvc'\n\n # Just set this so CCompiler's constructor doesn't barf. We currently\n # don't use the 'set_executables()' bureaucracy provided by CCompiler,\n # as it really isn't necessary for this sort of single-compiler class.\n # Would be nice to have a consistent interface with UnixCCompiler,\n # though, so it's worth thinking about.\n executables = {}\n\n # Private class data (need to distinguish C from C++ source for compiler)\n _c_extensions = ['.c']\n _cpp_extensions = ['.cc', '.cpp', '.cxx']\n _rc_extensions = ['.rc']\n _mc_extensions = ['.mc']\n\n # Needed for the filename generation methods provided by the\n # base class, CCompiler.\n src_extensions = (_c_extensions + _cpp_extensions +\n _rc_extensions + _mc_extensions)\n res_extension = '.res'\n obj_extension = '.obj'\n static_lib_extension = '.lib'\n shared_lib_extension = '.dll'\n static_lib_format = shared_lib_format = '%s%s'\n exe_extension = '.exe'\n\n def __init__ (self, verbose=0, dry_run=0, force=0):\n CCompiler.__init__ (self, verbose, dry_run, force)\n self.__version = get_build_version()\n self.__arch = get_build_architecture()\n if self.__arch == \"Intel\":\n # x86\n if self.__version >= 7:\n self.__root = r\"Software\\Microsoft\\VisualStudio\"\n self.__macros = MacroExpander(self.__version)\n else:\n self.__root = r\"Software\\Microsoft\\Devstudio\"\n self.__product = \"Visual Studio version %s\" % self.__version\n else:\n # Win64. Assume this was built with the platform SDK\n self.__product = \"Microsoft SDK compiler %s\" % (self.__version + 6)\n\n self.initialized = False\n\n def initialize(self):\n self.__paths = []\n if \"DISTUTILS_USE_SDK\" in os.environ and \"MSSdk\" in os.environ and self.find_exe(\"cl.exe\"):\n # Assume that the SDK set up everything alright; don't try to be\n # smarter\n self.cc = \"cl.exe\"\n self.linker = \"link.exe\"\n self.lib = \"lib.exe\"\n self.rc = \"rc.exe\"\n self.mc = \"mc.exe\"\n else:\n self.__paths = self.get_msvc_paths(\"path\")\n\n if len (self.__paths) == 0:\n raise DistutilsPlatformError, \\\n (\"Python was built with %s, \"\n \"and extensions need to be built with the same \"\n \"version of the compiler, but it isn't installed.\" % self.__product)\n\n self.cc = self.find_exe(\"cl.exe\")\n self.linker = self.find_exe(\"link.exe\")\n self.lib = self.find_exe(\"lib.exe\")\n self.rc = self.find_exe(\"rc.exe\") # resource compiler\n self.mc = self.find_exe(\"mc.exe\") # message compiler\n self.set_path_env_var('lib')\n self.set_path_env_var('include')\n\n # extend the MSVC path with the current path\n try:\n for p in string.split(os.environ['path'], ';'):\n self.__paths.append(p)\n except KeyError:\n pass\n self.__paths = normalize_and_reduce_paths(self.__paths)\n os.environ['path'] = string.join(self.__paths, ';')\n\n self.preprocess_options = None\n if self.__arch == \"Intel\":\n self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,\n '/DNDEBUG']\n self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',\n '/Z7', '/D_DEBUG']\n else:\n # Win64\n self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,\n '/DNDEBUG']\n self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',\n '/Z7', '/D_DEBUG']\n\n self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']\n if self.__version >= 7:\n self.ldflags_shared_debug = [\n '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'\n ]\n else:\n self.ldflags_shared_debug = [\n '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'\n ]\n self.ldflags_static = [ '/nologo']\n\n self.initialized = True\n\n # -- Worker methods ------------------------------------------------\n\n def object_filenames (self,\n source_filenames,\n strip_dir=0,\n output_dir=''):\n # Copied from ccompiler.py, extended to return .res as 'object'-file\n # for .rc input file\n if output_dir is None: output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n (base, ext) = os.path.splitext (src_name)\n base = os.path.splitdrive(base)[1] # Chop off the drive\n base = base[os.path.isabs(base):] # If abs, chop off leading /\n if ext not in self.src_extensions:\n # Better to raise an exception instead of silently continuing\n # and later complain about sources and targets having\n # different lengths\n raise CompileError (\"Don't know how to compile %s\" % src_name)\n if strip_dir:\n base = os.path.basename (base)\n if ext in self._rc_extensions:\n obj_names.append (os.path.join (output_dir,\n base + self.res_extension))\n elif ext in self._mc_extensions:\n obj_names.append (os.path.join (output_dir,\n base + self.res_extension))\n else:\n obj_names.append (os.path.join (output_dir,\n base + self.obj_extension))\n return obj_names\n\n # object_filenames ()\n\n\n def compile(self, sources,\n output_dir=None, macros=None, include_dirs=None, debug=0,\n extra_preargs=None, extra_postargs=None, depends=None):\n\n if not self.initialized: self.initialize()\n macros, objects, extra_postargs, pp_opts, build = \\\n self._setup_compile(output_dir, macros, include_dirs, sources,\n depends, extra_postargs)\n\n compile_opts = extra_preargs or []\n compile_opts.append ('/c')\n if debug:\n compile_opts.extend(self.compile_options_debug)\n else:\n compile_opts.extend(self.compile_options)\n\n for obj in objects:\n try:\n src, ext = build[obj]\n except KeyError:\n continue\n if debug:\n # pass the full pathname to MSVC in debug mode,\n # this allows the debugger to find the source file\n # without asking the user to browse for it\n src = os.path.abspath(src)\n\n if ext in self._c_extensions:\n input_opt = \"/Tc\" + src\n elif ext in self._cpp_extensions:\n input_opt = \"/Tp\" + src\n elif ext in self._rc_extensions:\n # compile .RC to .RES file\n input_opt = src\n output_opt = \"/fo\" + obj\n try:\n self.spawn ([self.rc] + pp_opts +\n [output_opt] + [input_opt])\n except DistutilsExecError, msg:\n raise CompileError, msg\n continue\n elif ext in self._mc_extensions:\n\n # Compile .MC to .RC file to .RES file.\n # * '-h dir' specifies the directory for the\n # generated include file\n # * '-r dir' specifies the target directory of the\n # generated RC file and the binary message resource\n # it includes\n #\n # For now (since there are no options to change this),\n # we use the source-directory for the include file and\n # the build directory for the RC file and message\n # resources. This works at least for win32all.\n\n h_dir = os.path.dirname (src)\n rc_dir = os.path.dirname (obj)\n try:\n # first compile .MC to .RC and .H file\n self.spawn ([self.mc] +\n ['-h', h_dir, '-r', rc_dir] + [src])\n base, _ = os.path.splitext (os.path.basename (src))\n rc_file = os.path.join (rc_dir, base + '.rc')\n # then compile .RC to .RES file\n self.spawn ([self.rc] +\n [\"/fo\" + obj] + [rc_file])\n\n except DistutilsExecError, msg:\n raise CompileError, msg\n continue\n else:\n # how to handle this file?\n raise CompileError (\n \"Don't know how to compile %s to %s\" % \\\n (src, obj))\n\n output_opt = \"/Fo\" + obj\n try:\n self.spawn ([self.cc] + compile_opts + pp_opts +\n [input_opt, output_opt] +\n extra_postargs)\n except DistutilsExecError, msg:\n raise CompileError, msg\n\n return objects\n\n # compile ()\n\n\n def create_static_lib (self,\n objects,\n output_libname,\n output_dir=None,\n debug=0,\n target_lang=None):\n\n if not self.initialized: self.initialize()\n (objects, output_dir) = self._fix_object_args (objects, output_dir)\n output_filename = \\\n self.library_filename (output_libname, output_dir=output_dir)\n\n if self._need_link (objects, output_filename):\n lib_args = objects + ['/OUT:' + output_filename]\n if debug:\n pass # XXX what goes here?\n try:\n self.spawn ([self.lib] + lib_args)\n except DistutilsExecError, msg:\n raise LibError, msg\n\n else:\n log.debug(\"skipping %s (up-to-date)\", output_filename)\n\n # create_static_lib ()\n\n def link (self,\n target_desc,\n objects,\n output_filename,\n output_dir=None,\n libraries=None,\n library_dirs=None,\n runtime_library_dirs=None,\n export_symbols=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n build_temp=None,\n target_lang=None):\n\n if not self.initialized: self.initialize()\n (objects, output_dir) = self._fix_object_args (objects, output_dir)\n (libraries, library_dirs, runtime_library_dirs) = \\\n self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)\n\n if runtime_library_dirs:\n self.warn (\"I don't know what to do with 'runtime_library_dirs': \"\n + str (runtime_library_dirs))\n\n lib_opts = gen_lib_options (self,\n library_dirs, runtime_library_dirs,\n libraries)\n if output_dir is not None:\n output_filename = os.path.join (output_dir, output_filename)\n\n if self._need_link (objects, output_filename):\n\n if target_desc == CCompiler.EXECUTABLE:\n if debug:\n ldflags = self.ldflags_shared_debug[1:]\n else:\n ldflags = self.ldflags_shared[1:]\n else:\n if debug:\n ldflags = self.ldflags_shared_debug\n else:\n ldflags = self.ldflags_shared\n\n export_opts = []\n for sym in (export_symbols or []):\n export_opts.append(\"/EXPORT:\" + sym)\n\n ld_args = (ldflags + lib_opts + export_opts +\n objects + ['/OUT:' + output_filename])\n\n # The MSVC linker generates .lib and .exp files, which cannot be\n # suppressed by any linker switches. The .lib files may even be\n # needed! Make sure they are generated in the temporary build\n # directory. Since they have different names for debug and release\n # builds, they can go into the same directory.\n if export_symbols is not None:\n (dll_name, dll_ext) = os.path.splitext(\n os.path.basename(output_filename))\n implib_file = os.path.join(\n os.path.dirname(objects[0]),\n self.library_filename(dll_name))\n ld_args.append ('/IMPLIB:' + implib_file)\n\n if extra_preargs:\n ld_args[:0] = extra_preargs\n if extra_postargs:\n ld_args.extend(extra_postargs)\n\n self.mkpath (os.path.dirname (output_filename))\n try:\n self.spawn ([self.linker] + ld_args)\n except DistutilsExecError, msg:\n raise LinkError, msg\n\n else:\n log.debug(\"skipping %s (up-to-date)\", output_filename)\n\n # link ()\n\n\n # -- Miscellaneous methods -----------------------------------------\n # These are all used by the 'gen_lib_options() function, in\n # ccompiler.py.\n\n def library_dir_option (self, dir):\n return \"/LIBPATH:\" + dir\n\n def runtime_library_dir_option (self, dir):\n raise DistutilsPlatformError, \\\n \"don't know how to set runtime library search path for MSVC++\"\n\n def library_option (self, lib):\n return self.library_filename (lib)\n\n\n def find_library_file (self, dirs, lib, debug=0):\n # Prefer a debugging library if found (and requested), but deal\n # with it if we don't have one.\n if debug:\n try_names = [lib + \"_d\", lib]\n else:\n try_names = [lib]\n for dir in dirs:\n for name in try_names:\n libfile = os.path.join(dir, self.library_filename (name))\n if os.path.exists(libfile):\n return libfile\n else:\n # Oops, didn't find it in *any* of 'dirs'\n return None\n\n # find_library_file ()\n\n # Helper methods for using the MSVC registry settings\n\n def find_exe(self, exe):\n \"\"\"Return path to an MSVC executable program.\n\n Tries to find the program in several places: first, one of the\n MSVC program search paths from the registry; next, the directories\n in the PATH environment variable. If any of those work, return an\n absolute path that is known to exist. If none of them work, just\n return the original program name, 'exe'.\n \"\"\"\n\n for p in self.__paths:\n fn = os.path.join(os.path.abspath(p), exe)\n if os.path.isfile(fn):\n return fn\n\n # didn't find it; try existing path\n for p in string.split(os.environ['Path'],';'):\n fn = os.path.join(os.path.abspath(p),exe)\n if os.path.isfile(fn):\n return fn\n\n return exe\n\n def get_msvc_paths(self, path, platform='x86'):\n \"\"\"Get a list of devstudio directories (include, lib or path).\n\n Return a list of strings. The list will be empty if unable to\n access the registry or appropriate registry keys not found.\n \"\"\"\n\n if not _can_read_reg:\n return []\n\n path = path + \" dirs\"\n if self.__version >= 7:\n key = (r\"%s\\%0.1f\\VC\\VC_OBJECTS_PLATFORM_INFO\\Win32\\Directories\"\n % (self.__root, self.__version))\n else:\n key = (r\"%s\\6.0\\Build System\\Components\\Platforms\"\n r\"\\Win32 (%s)\\Directories\" % (self.__root, platform))\n\n for base in HKEYS:\n d = read_values(base, key)\n if d:\n if self.__version >= 7:\n return string.split(self.__macros.sub(d[path]), \";\")\n else:\n return string.split(d[path], \";\")\n # MSVC 6 seems to create the registry entries we need only when\n # the GUI is run.\n if self.__version == 6:\n for base in HKEYS:\n if read_values(base, r\"%s\\6.0\" % self.__root) is not None:\n self.warn(\"It seems you have Visual Studio 6 installed, \"\n \"but the expected registry settings are not present.\\n\"\n \"You must at least run the Visual Studio GUI once \"\n \"so that these entries are created.\")\n break\n return []\n\n def set_path_env_var(self, name):\n \"\"\"Set environment variable 'name' to an MSVC path type value.\n\n This is equivalent to a SET command prior to execution of spawned\n commands.\n \"\"\"\n\n if name == \"lib\":\n p = self.get_msvc_paths(\"library\")\n else:\n p = self.get_msvc_paths(name)\n if p:\n os.environ[name] = string.join(p, ';')\n\n\nif get_build_version() >= 8.0:\n log.debug(\"Importing new compiler from distutils.msvc9compiler\")\n OldMSVCCompiler = MSVCCompiler\n from distutils.msvc9compiler import MSVCCompiler\n # get_build_architecture not really relevant now we support cross-compile\n from distutils.msvc9compiler import MacroExpander\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203654,"cells":{"repo_name":{"kind":"string","value":"vdmann/cse-360-image-hosting-website"},"path":{"kind":"string","value":"lib/python2.7/site-packages/requests/packages/chardet/big5prober.py"},"copies":{"kind":"string","value":"2931"},"size":{"kind":"string","value":"1684"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is Mozilla Communicator client code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 1998\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\nfrom .mbcharsetprober import MultiByteCharSetProber\nfrom .codingstatemachine import CodingStateMachine\nfrom .chardistribution import Big5DistributionAnalysis\nfrom .mbcssm import Big5SMModel\n\n\nclass Big5Prober(MultiByteCharSetProber):\n def __init__(self):\n MultiByteCharSetProber.__init__(self)\n self._mCodingSM = CodingStateMachine(Big5SMModel)\n self._mDistributionAnalyzer = Big5DistributionAnalysis()\n self.reset()\n\n def get_charset_name(self):\n return \"Big5\"\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203655,"cells":{"repo_name":{"kind":"string","value":"chouseknecht/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/network/aci/mso_schema_template_anp.py"},"copies":{"kind":"string","value":"26"},"size":{"kind":"string","value":"5536"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Dag Wieers (@dagwieers) \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: mso_schema_template_anp\nshort_description: Manage Application Network Profiles (ANPs) in schema templates\ndescription:\n- Manage ANPs in schema templates on Cisco ACI Multi-Site.\nauthor:\n- Dag Wieers (@dagwieers)\nversion_added: '2.8'\noptions:\n schema:\n description:\n - The name of the schema.\n type: str\n required: yes\n template:\n description:\n - The name of the template.\n type: str\n required: yes\n anp:\n description:\n - The name of the ANP to manage.\n type: str\n aliases: [ name ]\n display_name:\n description:\n - The name as displayed on the MSO web interface.\n type: str\n state:\n description:\n - Use C(present) or C(absent) for adding or removing.\n - Use C(query) for listing an object or multiple objects.\n type: str\n choices: [ absent, present, query ]\n default: present\nseealso:\n- module: mso_schema_template\n- module: mso_schema_template_anp_epg\nextends_documentation_fragment: mso\n'''\n\nEXAMPLES = r'''\n- name: Add a new ANP\n mso_schema_template_anp:\n host: mso_host\n username: admin\n password: SomeSecretPassword\n schema: Schema 1\n template: Template 1\n anp: ANP 1\n state: present\n delegate_to: localhost\n\n- name: Remove an ANP\n mso_schema_template_anp:\n host: mso_host\n username: admin\n password: SomeSecretPassword\n schema: Schema 1\n template: Template 1\n anp: ANP 1\n state: absent\n delegate_to: localhost\n\n- name: Query a specific ANPs\n mso_schema_template_anp:\n host: mso_host\n username: admin\n password: SomeSecretPassword\n schema: Schema 1\n template: Template 1\n state: query\n delegate_to: localhost\n register: query_result\n\n- name: Query all ANPs\n mso_schema_template_anp:\n host: mso_host\n username: admin\n password: SomeSecretPassword\n schema: Schema 1\n template: Template 1\n state: query\n delegate_to: localhost\n register: query_result\n'''\n\nRETURN = r'''\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, issubset\n\n\ndef main():\n argument_spec = mso_argument_spec()\n argument_spec.update(\n schema=dict(type='str', required=True),\n template=dict(type='str', required=True),\n anp=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects\n display_name=dict(type='str'),\n state=dict(type='str', default='present', choices=['absent', 'present', 'query']),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ['state', 'absent', ['anp']],\n ['state', 'present', ['anp']],\n ],\n )\n\n schema = module.params['schema']\n template = module.params['template']\n anp = module.params['anp']\n display_name = module.params['display_name']\n state = module.params['state']\n\n mso = MSOModule(module)\n\n # Get schema_id\n schema_obj = mso.get_obj('schemas', displayName=schema)\n if not schema_obj:\n mso.fail_json(msg=\"Provided schema '{0}' does not exist\".format(schema))\n\n schema_path = 'schemas/{id}'.format(**schema_obj)\n\n # Get template\n templates = [t['name'] for t in schema_obj['templates']]\n if template not in templates:\n mso.fail_json(msg=\"Provided template '{0}' does not exist. Existing templates: {1}\".format(template, ', '.join(templates)))\n template_idx = templates.index(template)\n\n # Get ANP\n anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']]\n\n if anp is not None and anp in anps:\n anp_idx = anps.index(anp)\n mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]\n\n if state == 'query':\n if anp is None:\n mso.existing = schema_obj['templates'][template_idx]['anps']\n elif not mso.existing:\n mso.fail_json(msg=\"ANP '{anp}' not found\".format(anp=anp))\n mso.exit_json()\n\n anps_path = '/templates/{0}/anps'.format(template)\n anp_path = '/templates/{0}/anps/{1}'.format(template, anp)\n ops = []\n\n mso.previous = mso.existing\n if state == 'absent':\n if mso.existing:\n mso.sent = mso.existing = {}\n ops.append(dict(op='remove', path=anp_path))\n\n elif state == 'present':\n\n if display_name is None and not mso.existing:\n display_name = anp\n\n epgs = []\n if mso.existing:\n epgs = None\n\n payload = dict(\n name=anp,\n displayName=display_name,\n epgs=epgs,\n )\n\n mso.sanitize(payload, collate=True)\n\n if mso.existing:\n if display_name is not None:\n ops.append(dict(op='replace', path=anp_path + '/displayName', value=display_name))\n else:\n ops.append(dict(op='add', path=anps_path + '/-', value=mso.sent))\n\n mso.existing = mso.proposed\n\n if not module.check_mode:\n mso.request(schema_path, method='PATCH', data=ops)\n\n mso.exit_json()\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203656,"cells":{"repo_name":{"kind":"string","value":"tequa/ammisoft"},"path":{"kind":"string","value":"ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/prompt_toolkit/key_binding/bindings/named_commands.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"15922"},"content":{"kind":"string","value":"\"\"\"\nKey bindings which are also known by GNU readline by the given names.\n\nSee: http://www.delorie.com/gnu/docs/readline/rlman_13.html\n\"\"\"\nfrom __future__ import unicode_literals\nfrom prompt_toolkit.enums import IncrementalSearchDirection, SEARCH_BUFFER\nfrom prompt_toolkit.selection import PasteMode\nfrom six.moves import range\nimport six\n\nfrom .completion import generate_completions, display_completions_like_readline\nfrom prompt_toolkit.document import Document\nfrom prompt_toolkit.enums import EditingMode\nfrom prompt_toolkit.key_binding.input_processor import KeyPress\nfrom prompt_toolkit.keys import Keys\n\n__all__ = (\n 'get_by_name',\n)\n\n\n# Registry that maps the Readline command names to their handlers.\n_readline_commands = {}\n\ndef register(name):\n \"\"\"\n Store handler in the `_readline_commands` dictionary.\n \"\"\"\n assert isinstance(name, six.text_type)\n def decorator(handler):\n assert callable(handler)\n\n _readline_commands[name] = handler\n return handler\n return decorator\n\n\ndef get_by_name(name):\n \"\"\"\n Return the handler for the (Readline) command with the given name.\n \"\"\"\n try:\n return _readline_commands[name]\n except KeyError:\n raise KeyError('Unknown readline command: %r' % name)\n\n#\n# Commands for moving\n# See: http://www.delorie.com/gnu/docs/readline/rlman_14.html\n#\n\n@register('beginning-of-line')\ndef beginning_of_line(event):\n \" Move to the start of the current line. \"\n buff = event.current_buffer\n buff.cursor_position += buff.document.get_start_of_line_position(after_whitespace=False)\n\n\n@register('end-of-line')\ndef end_of_line(event):\n \" Move to the end of the line. \"\n buff = event.current_buffer\n buff.cursor_position += buff.document.get_end_of_line_position()\n\n\n@register('forward-char')\ndef forward_char(event):\n \" Move forward a character. \"\n buff = event.current_buffer\n buff.cursor_position += buff.document.get_cursor_right_position(count=event.arg)\n\n\n@register('backward-char')\ndef backward_char(event):\n \" Move back a character. \"\n buff = event.current_buffer\n buff.cursor_position += buff.document.get_cursor_left_position(count=event.arg)\n\n\n@register('forward-word')\ndef forward_word(event):\n \"\"\"\n Move forward to the end of the next word. Words are composed of letters and\n digits.\n \"\"\"\n buff = event.current_buffer\n pos = buff.document.find_next_word_ending(count=event.arg)\n\n if pos:\n buff.cursor_position += pos\n\n\n@register('backward-word')\ndef backward_word(event):\n \"\"\"\n Move back to the start of the current or previous word. Words are composed\n of letters and digits.\n \"\"\"\n buff = event.current_buffer\n pos = buff.document.find_previous_word_beginning(count=event.arg)\n\n if pos:\n buff.cursor_position += pos\n\n\n@register('clear-screen')\ndef clear_screen(event):\n \"\"\"\n Clear the screen and redraw everything at the top of the screen.\n \"\"\"\n event.cli.renderer.clear()\n\n\n@register('redraw-current-line')\ndef redraw_current_line(event):\n \"\"\"\n Refresh the current line.\n (Readline defines this command, but prompt-toolkit doesn't have it.)\n \"\"\"\n pass\n\n#\n# Commands for manipulating the history.\n# See: http://www.delorie.com/gnu/docs/readline/rlman_15.html\n#\n\n@register('accept-line')\ndef accept_line(event):\n \" Accept the line regardless of where the cursor is. \"\n b = event.current_buffer\n b.accept_action.validate_and_handle(event.cli, b)\n\n\n@register('previous-history')\ndef previous_history(event):\n \" Move `back' through the history list, fetching the previous command. \"\n event.current_buffer.history_backward(count=event.arg)\n\n\n@register('next-history')\ndef next_history(event):\n \" Move `forward' through the history list, fetching the next command. \"\n event.current_buffer.history_forward(count=event.arg)\n\n\n@register('beginning-of-history')\ndef beginning_of_history(event):\n \" Move to the first line in the history. \"\n event.current_buffer.go_to_history(0)\n\n\n@register('end-of-history')\ndef end_of_history(event):\n \"\"\"\n Move to the end of the input history, i.e., the line currently being entered.\n \"\"\"\n event.current_buffer.history_forward(count=10**100)\n buff = event.current_buffer\n buff.go_to_history(len(buff._working_lines) - 1)\n\n\n@register('reverse-search-history')\ndef reverse_search_history(event):\n \"\"\"\n Search backward starting at the current line and moving `up' through\n the history as necessary. This is an incremental search.\n \"\"\"\n event.cli.current_search_state.direction = IncrementalSearchDirection.BACKWARD\n event.cli.push_focus(SEARCH_BUFFER)\n\n\n#\n# Commands for changing text\n#\n\n@register('end-of-file')\ndef end_of_file(event):\n \"\"\"\n Exit.\n \"\"\"\n event.cli.exit()\n\n\n@register('delete-char')\ndef delete_char(event):\n \" Delete character before the cursor. \"\n deleted = event.current_buffer.delete(count=event.arg)\n if not deleted:\n event.cli.output.bell()\n\n\n@register('backward-delete-char')\ndef backward_delete_char(event):\n \" Delete the character behind the cursor. \"\n if event.arg < 0:\n # When a negative argument has been given, this should delete in front\n # of the cursor.\n deleted = event.current_buffer.delete(count=-event.arg)\n else:\n deleted = event.current_buffer.delete_before_cursor(count=event.arg)\n\n if not deleted:\n event.cli.output.bell()\n\n\n@register('self-insert')\ndef self_insert(event):\n \" Insert yourself. \"\n event.current_buffer.insert_text(event.data * event.arg)\n\n\n@register('transpose-chars')\ndef transpose_chars(event):\n \"\"\"\n Emulate Emacs transpose-char behavior: at the beginning of the buffer,\n do nothing. At the end of a line or buffer, swap the characters before\n the cursor. Otherwise, move the cursor right, and then swap the\n characters before the cursor.\n \"\"\"\n b = event.current_buffer\n p = b.cursor_position\n if p == 0:\n return\n elif p == len(b.text) or b.text[p] == '\\n':\n b.swap_characters_before_cursor()\n else:\n b.cursor_position += b.document.get_cursor_right_position()\n b.swap_characters_before_cursor()\n\n\n@register('uppercase-word')\ndef uppercase_word(event):\n \"\"\"\n Uppercase the current (or following) word.\n \"\"\"\n buff = event.current_buffer\n\n for i in range(event.arg):\n pos = buff.document.find_next_word_ending()\n words = buff.document.text_after_cursor[:pos]\n buff.insert_text(words.upper(), overwrite=True)\n\n\n@register('downcase-word')\ndef downcase_word(event):\n \"\"\"\n Lowercase the current (or following) word.\n \"\"\"\n buff = event.current_buffer\n\n for i in range(event.arg): # XXX: not DRY: see meta_c and meta_u!!\n pos = buff.document.find_next_word_ending()\n words = buff.document.text_after_cursor[:pos]\n buff.insert_text(words.lower(), overwrite=True)\n\n\n@register('capitalize-word')\ndef capitalize_word(event):\n \"\"\"\n Capitalize the current (or following) word.\n \"\"\"\n buff = event.current_buffer\n\n for i in range(event.arg):\n pos = buff.document.find_next_word_ending()\n words = buff.document.text_after_cursor[:pos]\n buff.insert_text(words.title(), overwrite=True)\n\n\n@register('quoted-insert')\ndef quoted_insert(event):\n \"\"\"\n Add the next character typed to the line verbatim. This is how to insert\n key sequences like C-q, for example.\n \"\"\"\n event.cli.quoted_insert = True\n\n\n#\n# Killing and yanking.\n#\n\n@register('kill-line')\ndef kill_line(event):\n \"\"\"\n Kill the text from the cursor to the end of the line.\n\n If we are at the end of the line, this should remove the newline.\n (That way, it is possible to delete multiple lines by executing this\n command multiple times.)\n \"\"\"\n buff = event.current_buffer\n if event.arg < 0:\n deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position())\n else:\n if buff.document.current_char == '\\n':\n deleted = buff.delete(1)\n else:\n deleted = buff.delete(count=buff.document.get_end_of_line_position())\n event.cli.clipboard.set_text(deleted)\n\n\n@register('kill-word')\ndef kill_word(event):\n \"\"\"\n Kill from point to the end of the current word, or if between words, to the\n end of the next word. Word boundaries are the same as forward-word.\n \"\"\"\n buff = event.current_buffer\n pos = buff.document.find_next_word_ending(count=event.arg)\n\n if pos:\n deleted = buff.delete(count=pos)\n event.cli.clipboard.set_text(deleted)\n\n\n@register('unix-word-rubout')\ndef unix_word_rubout(event, WORD=True):\n \"\"\"\n Kill the word behind point, using whitespace as a word boundary.\n Usually bound to ControlW.\n \"\"\"\n buff = event.current_buffer\n pos = buff.document.find_start_of_previous_word(count=event.arg, WORD=WORD)\n\n if pos is None:\n # Nothing found? delete until the start of the document. (The\n # input starts with whitespace and no words were found before the\n # cursor.)\n pos = - buff.cursor_position\n\n if pos:\n deleted = buff.delete_before_cursor(count=-pos)\n\n # If the previous key press was also Control-W, concatenate deleted\n # text.\n if event.is_repeat:\n deleted += event.cli.clipboard.get_data().text\n\n event.cli.clipboard.set_text(deleted)\n else:\n # Nothing to delete. Bell.\n event.cli.output.bell()\n\n\n@register('backward-kill-word')\ndef backward_kill_word(event):\n \"\"\"\n Kills the word before point, using \"not a letter nor a digit\" as a word boundary.\n Usually bound to M-Del or M-Backspace.\n \"\"\"\n unix_word_rubout(event, WORD=False)\n\n\n@register('delete-horizontal-space')\ndef delete_horizontal_space(event):\n \" Delete all spaces and tabs around point. \"\n buff = event.current_buffer\n text_before_cursor = buff.document.text_before_cursor\n text_after_cursor = buff.document.text_after_cursor\n\n delete_before = len(text_before_cursor) - len(text_before_cursor.rstrip('\\t '))\n delete_after = len(text_after_cursor) - len(text_after_cursor.lstrip('\\t '))\n\n buff.delete_before_cursor(count=delete_before)\n buff.delete(count=delete_after)\n\n\n@register('unix-line-discard')\ndef unix_line_discard(event):\n \"\"\"\n Kill backward from the cursor to the beginning of the current line.\n \"\"\"\n buff = event.current_buffer\n\n if buff.document.cursor_position_col == 0 and buff.document.cursor_position > 0:\n buff.delete_before_cursor(count=1)\n else:\n deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position())\n event.cli.clipboard.set_text(deleted)\n\n\n@register('yank')\ndef yank(event):\n \"\"\"\n Paste before cursor.\n \"\"\"\n event.current_buffer.paste_clipboard_data(\n event.cli.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.EMACS)\n\n@register('yank-nth-arg')\ndef yank_nth_arg(event):\n \"\"\"\n Insert the first argument of the previous command. With an argument, insert\n the nth word from the previous command (start counting at 0).\n \"\"\"\n n = (event.arg if event.arg_present else None)\n event.current_buffer.yank_nth_arg(n)\n\n\n@register('yank-last-arg')\ndef yank_last_arg(event):\n \"\"\"\n Like `yank_nth_arg`, but if no argument has been given, yank the last word\n of each line.\n \"\"\"\n n = (event.arg if event.arg_present else None)\n event.current_buffer.yank_last_arg(n)\n\n@register('yank-pop')\ndef yank_pop(event):\n \"\"\"\n Rotate the kill ring, and yank the new top. Only works following yank or\n yank-pop.\n \"\"\"\n buff = event.current_buffer\n doc_before_paste = buff.document_before_paste\n clipboard = event.cli.clipboard\n\n if doc_before_paste is not None:\n buff.document = doc_before_paste\n clipboard.rotate()\n buff.paste_clipboard_data(\n clipboard.get_data(), paste_mode=PasteMode.EMACS)\n\n#\n# Completion.\n#\n\n@register('complete')\ndef complete(event):\n \" Attempt to perform completion. \"\n display_completions_like_readline(event)\n\n\n@register('menu-complete')\ndef menu_complete(event):\n \"\"\"\n Generate completions, or go to the next completion. (This is the default\n way of completing input in prompt_toolkit.)\n \"\"\"\n generate_completions(event)\n\n\n@register('menu-complete-backward')\ndef menu_complete_backward(event):\n \" Move backward through the list of possible completions. \"\n event.current_buffer.complete_previous()\n\n#\n# Keyboard macros.\n#\n\n@register('start-kbd-macro')\ndef start_kbd_macro(event):\n \"\"\"\n Begin saving the characters typed into the current keyboard macro.\n \"\"\"\n event.cli.input_processor.start_macro()\n\n\n@register('end-kbd-macro')\ndef start_kbd_macro(event):\n \"\"\"\n Stop saving the characters typed into the current keyboard macro and save\n the definition.\n \"\"\"\n event.cli.input_processor.end_macro()\n\n\n@register('call-last-kbd-macro')\ndef start_kbd_macro(event):\n \"\"\"\n Re-execute the last keyboard macro defined, by making the characters in the\n macro appear as if typed at the keyboard.\n \"\"\"\n event.cli.input_processor.call_macro()\n\n\n@register('print-last-kbd-macro')\ndef print_last_kbd_macro(event):\n \" Print the last keboard macro. \"\n # TODO: Make the format suitable for the inputrc file.\n def print_macro():\n for k in event.cli.input_processor.macro:\n print(k)\n event.cli.run_in_terminal(print_macro)\n\n#\n# Miscellaneous Commands.\n#\n\n@register('undo')\ndef undo(event):\n \" Incremental undo. \"\n event.current_buffer.undo()\n\n\n@register('insert-comment')\ndef insert_comment(event):\n \"\"\"\n Without numeric argument, comment all lines.\n With numeric argument, uncomment all lines.\n In any case accept the input.\n \"\"\"\n buff = event.current_buffer\n\n # Transform all lines.\n if event.arg != 1:\n def change(line):\n return line[1:] if line.startswith('#') else line\n else:\n def change(line):\n return '#' + line\n\n buff.document = Document(\n text='\\n'.join(map(change, buff.text.splitlines())),\n cursor_position=0)\n\n # Accept input.\n buff.accept_action.validate_and_handle(event.cli, buff)\n\n\n@register('vi-editing-mode')\ndef vi_editing_mode(event):\n \" Switch to Vi editing mode. \"\n event.cli.editing_mode = EditingMode.VI\n\n\n@register('emacs-editing-mode')\ndef emacs_editing_mode(event):\n \" Switch to Emacs editing mode. \"\n event.cli.editing_mode = EditingMode.EMACS\n\n\n@register('prefix-meta')\ndef prefix_meta(event):\n \"\"\"\n Metafy the next character typed. This is for keyboards without a meta key.\n\n Sometimes people also want to bind other keys to Meta, e.g. 'jj'::\n\n registry.add_key_binding('j', 'j', filter=ViInsertMode())(prefix_meta)\n \"\"\"\n event.cli.input_processor.feed(KeyPress(Keys.Escape))\n\n\n@register('operate-and-get-next')\ndef operate_and_get_next(event):\n \"\"\"\n Accept the current line for execution and fetch the next line relative to\n the current line from the history for editing.\n \"\"\"\n buff = event.current_buffer\n new_index = buff.working_index + 1\n\n # Accept the current input. (This will also redraw the interface in the\n # 'done' state.)\n buff.accept_action.validate_and_handle(event.cli, buff)\n\n # Set the new index at the start of the next run.\n def set_working_index():\n if new_index < len(buff._working_lines):\n buff.working_index = new_index\n\n event.cli.pre_run_callables.append(set_working_index)\n\n\n@register('edit-and-execute-command')\ndef edit_and_execute(event):\n \"\"\"\n Invoke an editor on the current command line, and accept the result.\n \"\"\"\n buff = event.current_buffer\n\n buff.open_in_editor(event.cli)\n buff.accept_action.validate_and_handle(event.cli, buff)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203657,"cells":{"repo_name":{"kind":"string","value":"liyitest/rr"},"path":{"kind":"string","value":"openstack_dashboard/test/integration_tests/regions/baseregion.py"},"copies":{"kind":"string","value":"40"},"size":{"kind":"string","value":"4462"},"content":{"kind":"string","value":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport types\n\nfrom openstack_dashboard.test.integration_tests import basewebobject\n\n\nclass BaseRegion(basewebobject.BaseWebObject):\n \"\"\"Base class for region module\n\n * there is necessity to override some basic methods for obtaining elements\n as in content of regions it is required to do relative searches\n\n * self.driver cannot be easily replaced with self.src_elem because that\n would result in functionality loss, self.driver is WebDriver and\n src_elem is WebElement its usage is different.\n\n * this does not mean that self.src_elem cannot be self.driver\n \"\"\"\n\n _default_src_locator = None\n\n # private methods\n def __init__(self, driver, conf, src_elem=None):\n super(BaseRegion, self).__init__(driver, conf)\n if src_elem is None and self._default_src_locator:\n # fake self.src_elem must be set up in\n # order self._get_element work\n self.src_elem = driver\n src_elem = self._get_element(*self._default_src_locator)\n\n self.src_elem = src_elem or driver\n\n # variable for storing names of dynamic properties and\n # associated 'getters' - meaning method that are supplying\n # regions or web elements\n self._dynamic_properties = {}\n\n def __getattr__(self, name):\n \"\"\"It is not possible to create property bounded just to object\n and not class at runtime, therefore it is necessary to\n override __getattr__ and make fake 'properties' by storing them in\n the protected attribute _dynamic_attributes and returning result\n of the method associated with the specified attribute.\n\n This way the feeling of having regions accessed as 'properties'\n is created, which is one of the requirement of page object pattern.\n \"\"\"\n try:\n return self._dynamic_properties[name]()\n except KeyError:\n msg = \"'{0}' object has no attribute '{1}'\"\n raise AttributeError(msg.format(type(self).__name__, name))\n\n # protected methods and classes\n class _DynamicProperty(object):\n \"\"\"Serves as new property holder.\"\"\"\n\n def __init__(self, method, index=None):\n \"\"\"In case object was created with index != None,\n it is assumed that the result of self.method should be tuple()\n and just certain index should be returned\n \"\"\"\n self.method = method\n self.index = index\n\n def __call__(self, *args, **kwargs):\n result = self.method()\n return result if self.index is None else result[self.index]\n\n def _init_dynamic_properties(self, new_attr_names, method):\n \"\"\"Create new object's 'properties' at runtime.\"\"\"\n for index, new_attr_name in enumerate(new_attr_names):\n self._init_dynamic_property(new_attr_name, method, index)\n\n def _init_dynamic_property(self, new_attr_name, method, index=None):\n \"\"\"Create new object's property at runtime. If index argument is\n supplied it is assumed that method returns tuple() and only element\n on ${index} position is returned.\n \"\"\"\n if (new_attr_name in dir(self) or\n new_attr_name in self._dynamic_properties):\n raise AttributeError(\"%s class has already attribute %s.\"\n \"The new property could not be \"\n \"created.\" % (self.__class__.__name__,\n new_attr_name))\n new_method = self.__class__._DynamicProperty(method, index)\n inst_method = types.MethodType(new_method, self)\n self._dynamic_properties[new_attr_name] = inst_method\n\n def _get_element(self, *locator):\n return self.src_elem.find_element(*locator)\n\n def _get_elements(self, *locator):\n return self.src_elem.find_elements(*locator)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203658,"cells":{"repo_name":{"kind":"string","value":"defance/edx-platform"},"path":{"kind":"string","value":"common/test/acceptance/pages/lms/matlab_problem.py"},"copies":{"kind":"string","value":"179"},"size":{"kind":"string","value":"1024"},"content":{"kind":"string","value":"\"\"\"\nMatlab Problem Page.\n\"\"\"\nfrom bok_choy.page_object import PageObject\n\n\nclass MatlabProblemPage(PageObject):\n \"\"\"\n View of matlab problem page.\n \"\"\"\n\n url = None\n\n def is_browser_on_page(self):\n return self.q(css='.ungraded-matlab-result').present\n\n @property\n def problem_name(self):\n \"\"\"\n Return the current problem name.\n \"\"\"\n return self.q(css='.problem-header').text[0]\n\n def set_response(self, response_str):\n \"\"\"\n Input a response to the prompt.\n \"\"\"\n input_css = \"$('.CodeMirror')[0].CodeMirror.setValue('{}');\".format(response_str)\n self.browser.execute_script(input_css)\n\n def click_run_code(self):\n \"\"\"\n Click the run code button.\n \"\"\"\n self.q(css='input.save').click()\n self.wait_for_ajax()\n\n def get_grader_msg(self, class_name):\n \"\"\"\n Returns the text value of given class.\n \"\"\"\n self.wait_for_ajax()\n return self.q(css=class_name).text\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203659,"cells":{"repo_name":{"kind":"string","value":"zapstar/gae-facebook"},"path":{"kind":"string","value":"main.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3589"},"content":{"kind":"string","value":"#Import WebApp2 framework on Google App Engine\nimport webapp2\n\n#Import Sessions from WebApp2 Extras\nfrom webapp2_extras import sessions\n\n#Import Quote Function from URL Library\nfrom urllib import quote\n\n#Import Parse_QueryString from URL Parse\nfrom urlparse import parse_qs\n\n#Import URLFetch from Google App Engine API\nfrom google.appengine.api import urlfetch\n\n#Import JSON Loads from JSON\nfrom json import loads\n\n#Import the Session State Variable Generator\n#Random String of 23 characters, unguessable\nimport state_variable\n\n#Import the BaseSessionHandler Class\nimport session_module\n\nclass MainHandler(session_module.BaseSessionHandler):\n\t#The APP_ID and the APP_SECRET variables contain information\n\t#required for Facebook Authentication\n\tAPP_ID = '267910489968296'\n\tAPP_SECRET = '02583f888c364e2d54fc081830c8f870'\n\t\n\t#If Offline then use this\n\t#my_url = 'http://localhost:8080/'\n\t#else if one Google App Engine use this as my_url\n\tmy_url = 'http://facebook-gae-python.appspot.com/'\n\n\tdef get(self):\n\t\t#Check whether 'code' is in the GET variables of the URL\n\t\t#if not then execute the below code to set the state variable in\n\t\t#the session\n\t\tif self.request.get('code') == '':\n\t\t\t#If not generate a state variable\n\t\t\tsession_state = state_variable.SessionStateVariable()\n\t\t\t#Set the state variable in the session\n\t\t\tself.session['state'] = session_state.generateState()\n\t\t\t#The Dialog URL for Facebook Login\n\t\t\tdialog_url = 'http://www.facebook.com/dialog/oauth?client_id=' + \\\n\t\t\t\tself.APP_ID + '&redirect_uri=' + quote(self.my_url) + \\\n\t\t\t\t'&state=' + self.session.get('state')\n\t\t\t#Redirect to the Facebook Page (Please note the Redirection URL must\n\t\t\t#be updated on Facebook App's Site URL or Canvas URL\n\t\t\tself.redirect(dialog_url)\n\t\telse:\n\t\t\t#If state variable is already set then set the class variable\n\t\t\tself.state = self.session.get('state')\n\t\t\n\t\t#Check whether the State Variable is same as that returned by Facebook\n\t\t#Else report the CSRF Violation\n\t\tif self.request.get('state') == self.session.get('state'):\n\t\t\n\t\t\t#The token URL for generation of OAuth Access Token for Graph API \n\t\t\t#Requests from your application\n\t\t\ttoken_url = 'https://graph.facebook.com/oauth/access_token?client_id=' + \\\n\t\t\t\tself.APP_ID + '&redirect_uri=' + quote(self.my_url) + \\\n\t\t\t\t'&client_secret=' + self.APP_SECRET + '&code=' + self.request.get('code')\n\t\t\t\n\t\t\t#Get the token from the Token URL\n\t\t\ttoken_response = urlfetch.fetch(token_url)\n\t\t\t\n\t\t\t#Parse the string to get the Access token\n\t\t\tparams = parse_qs(token_response.content)\n\t\t\t\n\t\t\t#Now params['access_token'][0] has the access_token for use\n\t\t\t\n\t\t\t#Requesting Facebook Graph API\n\t\t\t#the Graph URL\n\t\t\tgraph_url = u'https://graph.facebook.com'\n\t\t\t#The API String for example /me or /me/movies etc.\n\t\t\tapi_string = u'/me'\n\t\t\t#The concatenated URL for Graph API Request\n\t\t\tapi_request_url = graph_url + api_string + u'?access_token=' + params['access_token'][0]\n\t\t\t#Fetch the Response from Graph API Request\n\t\t\tapi_response = urlfetch.fetch(api_request_url)\n\t\t\t#Get the contents of the Response\n\t\t\tjson_response = api_response.content\n\t\t\t#Convert the JSON String into a dictionary\n\t\t\tapi_answer = loads(json_response)\n\t\t\t#Print your name on the screen!\n\t\t\tself.response.out.write('Hello ' + api_answer['name'])\n\t\telse:\n\t\t\t#CSRF Violation Response (if the state variables don't match)\n\t\t\tself.response.out.write('The states dont match. You may a victim of CSRF')\n#End of MainHandler Class\n\n#The WebApp2 WSGI Application definition\napp = webapp2.WSGIApplication([('/', MainHandler)], debug=True, config = session_module.session_config)"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203660,"cells":{"repo_name":{"kind":"string","value":"jamison904/T999_minimum_kernel"},"path":{"kind":"string","value":"tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py"},"copies":{"kind":"string","value":"11088"},"size":{"kind":"string","value":"3246"},"content":{"kind":"string","value":"# Core.py - Python extension for perf script, core functions\n#\n# Copyright (C) 2010 by Tom Zanussi \n#\n# This software may be distributed under the terms of the GNU General\n# Public License (\"GPL\") version 2 as published by the Free Software\n# Foundation.\n\nfrom collections import defaultdict\n\ndef autodict():\n return defaultdict(autodict)\n\nflag_fields = autodict()\nsymbolic_fields = autodict()\n\ndef define_flag_field(event_name, field_name, delim):\n flag_fields[event_name][field_name]['delim'] = delim\n\ndef define_flag_value(event_name, field_name, value, field_str):\n flag_fields[event_name][field_name]['values'][value] = field_str\n\ndef define_symbolic_field(event_name, field_name):\n # nothing to do, really\n pass\n\ndef define_symbolic_value(event_name, field_name, value, field_str):\n symbolic_fields[event_name][field_name]['values'][value] = field_str\n\ndef flag_str(event_name, field_name, value):\n string = \"\"\n\n if flag_fields[event_name][field_name]:\n\tprint_delim = 0\n keys = flag_fields[event_name][field_name]['values'].keys()\n keys.sort()\n for idx in keys:\n if not value and not idx:\n string += flag_fields[event_name][field_name]['values'][idx]\n break\n if idx and (value & idx) == idx:\n if print_delim and flag_fields[event_name][field_name]['delim']:\n string += \" \" + flag_fields[event_name][field_name]['delim'] + \" \"\n string += flag_fields[event_name][field_name]['values'][idx]\n print_delim = 1\n value &= ~idx\n\n return string\n\ndef symbol_str(event_name, field_name, value):\n string = \"\"\n\n if symbolic_fields[event_name][field_name]:\n keys = symbolic_fields[event_name][field_name]['values'].keys()\n keys.sort()\n for idx in keys:\n if not value and not idx:\n\t\tstring = symbolic_fields[event_name][field_name]['values'][idx]\n break\n\t if (value == idx):\n\t\tstring = symbolic_fields[event_name][field_name]['values'][idx]\n break\n\n return string\n\ntrace_flags = { 0x00: \"NONE\", \\\n 0x01: \"IRQS_OFF\", \\\n 0x02: \"IRQS_NOSUPPORT\", \\\n 0x04: \"NEED_RESCHED\", \\\n 0x08: \"HARDIRQ\", \\\n 0x10: \"SOFTIRQ\" }\n\ndef trace_flag_str(value):\n string = \"\"\n print_delim = 0\n\n keys = trace_flags.keys()\n\n for idx in keys:\n\tif not value and not idx:\n\t string += \"NONE\"\n\t break\n\n\tif idx and (value & idx) == idx:\n\t if print_delim:\n\t\tstring += \" | \";\n\t string += trace_flags[idx]\n\t print_delim = 1\n\t value &= ~idx\n\n return string\n\n\ndef taskState(state):\n\tstates = {\n\t\t0 : \"R\",\n\t\t1 : \"S\",\n\t\t2 : \"D\",\n\t\t64: \"DEAD\"\n\t}\n\n\tif state not in states:\n\t\treturn \"Unknown\"\n\n\treturn states[state]\n\n\nclass EventHeaders:\n\tdef __init__(self, common_cpu, common_secs, common_nsecs,\n\t\t common_pid, common_comm):\n\t\tself.cpu = common_cpu\n\t\tself.secs = common_secs\n\t\tself.nsecs = common_nsecs\n\t\tself.pid = common_pid\n\t\tself.comm = common_comm\n\n\tdef ts(self):\n\t\treturn (self.secs * (10 ** 9)) + self.nsecs\n\n\tdef ts_format(self):\n\t\treturn \"%d.%d\" % (self.secs, int(self.nsecs / 1000))\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203661,"cells":{"repo_name":{"kind":"string","value":"erdanieee/imagePicker"},"path":{"kind":"string","value":"node_modules/dmg-builder/vendor/dmgbuild/colors.py"},"copies":{"kind":"string","value":"12"},"size":{"kind":"string","value":"13054"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\nimport math\n\nclass Color (object):\n def to_rgb(self):\n raise Exception('Must implement to_rgb() in subclasses')\n\nclass RGB (Color):\n def __init__(self, r, g, b):\n self.r = r\n self.g = g\n self.b = b\n\n def to_rgb(self):\n return self\n \nclass HSL (Color):\n def __init__(self, h, s, l):\n self.h = h\n self.s = s\n self.l = l\n\n @staticmethod\n def _hue_to_rgb(t1, t2, hue):\n if hue < 0:\n hue += 6\n elif hue >= 6:\n hue -= 6\n\n if hue < 1:\n return (t2 - t1) * hue + t1\n elif hue < 3:\n return t2\n elif hue < 4:\n return (t2 - t1) * (4 - hue) + t1\n else:\n return t1\n \n def to_rgb(self):\n hue = self.h / 60.0\n if self.l <= 0.5:\n t2 = self.l * (self.s + 1)\n else:\n t2 = self.l + self.s - (self.l * self.s)\n t1 = self.l * 2 - t2\n r = self._hue_to_rgb(t1, t2, hue + 2)\n g = self._hue_to_rgb(t1, t2, hue)\n b = self._hue_to_rgb(t1, t2, hue - 2)\n return RGB(r, g, b)\n \nclass HWB (Color):\n def __init__(self, h, w, b):\n self.h = h\n self.w = w\n self.b = b\n\n @staticmethod\n def _hue_to_rgb(hue):\n if hue < 0:\n hue += 6\n elif hue >= 6:\n hue -= 6\n\n if hue < 1:\n return hue\n elif hue < 3:\n return 1\n elif hue < 4:\n return (4 - hue)\n else:\n return 0\n \n def to_rgb(self):\n hue = self.h / 60.0\n t1 = 1 - self.w - self.b\n r = self._hue_to_rgb(hue + 2) * t1 + self.w\n g = self._hue_to_rgb(hue) * t1 + self.w\n b = self._hue_to_rgb(hue - 2) * t1 + self.w\n return RGB(r, g, b)\n \nclass CMYK (Color):\n def __init__(self, c, m, y, k):\n self.c = c\n self.m = m\n self.y = y\n self.k = k\n\n def to_rgb(self):\n r = 1.0 - min(1.0, self.c + self.k)\n g = 1.0 - min(1.0, self.m + self.k)\n b = 1.0 - min(1.0, self.y + self.k)\n return RGB(r, g, b)\n \nclass Gray (Color):\n def __init__(self, g):\n self.g = g\n\n def to_rgb(self):\n return RGB(g, g, g)\n \n_x11_colors = {\n\t'aliceblue': (240, 248, 255),\n 'antiquewhite': (250, 235, 215),\n 'aqua':\t( 0, 255, 255),\n 'aquamarine': (127, 255, 212),\n 'azure': (240, 255, 255),\n 'beige': (245, 245, 220),\n 'bisque': (255, 228, 196),\n 'black': ( 0, 0, 0),\n 'blanchedalmond': (255, 235, 205),\n 'blue': ( 0, 0, 255),\n 'blueviolet': (138, 43, 226),\n 'brown': (165, 42, 42),\n 'burlywood': (222, 184, 135),\n 'cadetblue': ( 95, 158, 160),\n 'chartreuse': (127, 255, 0),\n 'chocolate': (210, 105, 30),\n 'coral': (255, 127, 80),\n 'cornflowerblue': (100, 149, 237),\n 'cornsilk': (255, 248, 220),\n 'crimson': (220, 20, 60),\n 'cyan': ( 0, 255, 255),\n 'darkblue': ( 0, 0, 139),\n 'darkcyan': ( 0, 139, 139),\n 'darkgoldenrod': (184, 134, 11),\n 'darkgray': (169, 169, 169),\n 'darkgreen': ( 0, 100, 0),\n 'darkgrey': (169, 169, 169),\n 'darkkhaki': (189, 183, 107),\n 'darkmagenta': (139, 0, 139),\n 'darkolivegreen': ( 85, 107, 47),\n 'darkorange': (255, 140, 0),\n 'darkorchid': (153, 50, 204),\n 'darkred': (139, 0, 0),\n 'darksalmon': (233, 150, 122),\n 'darkseagreen': (143, 188, 143),\n 'darkslateblue': ( 72, 61, 139),\n 'darkslategray': ( 47, 79, 79),\n 'darkslategrey': ( 47, 79, 79),\n 'darkturquoise': ( 0, 206, 209),\n 'darkviolet': (148, 0, 211),\n 'deeppink': (255, 20, 147),\n 'deepskyblue': ( 0, 191, 255),\n 'dimgray': (105, 105, 105),\n 'dimgrey': (105, 105, 105),\n 'dodgerblue': ( 30, 144, 255),\n 'firebrick': (178, 34, 34),\n 'floralwhite': (255, 250, 240),\n 'forestgreen': ( 34, 139, 34),\n 'fuchsia': (255, 0, 255),\n 'gainsboro': (220, 220, 220),\n 'ghostwhite': (248, 248, 255),\n 'gold': (255, 215, 0),\n 'goldenrod': (218, 165, 32),\n 'gray': (128, 128, 128),\n 'grey': (128, 128, 128),\n 'green': ( 0, 128, 0),\n 'greenyellow': (173, 255, 47),\n 'honeydew': (240, 255, 240),\n 'hotpink': (255, 105, 180),\n 'indianred': (205, 92, 92),\n 'indigo': ( 75, 0, 130),\n 'ivory': (255, 255, 240),\n 'khaki': (240, 230, 140),\n 'lavender': (230, 230, 250),\n 'lavenderblush': (255, 240, 245),\n 'lawngreen': (124, 252, 0),\n 'lemonchiffon': (255, 250, 205),\n 'lightblue': (173, 216, 230),\n 'lightcoral': (240, 128, 128),\n 'lightcyan': (224, 255, 255),\n 'lightgoldenrodyellow': (250, 250, 210),\n 'lightgray': (211, 211, 211),\n 'lightgreen': (144, 238, 144),\n 'lightgrey': (211, 211, 211),\n 'lightpink': (255, 182, 193),\n 'lightsalmon': (255, 160, 122),\n 'lightseagreen': ( 32, 178, 170),\n 'lightskyblue': (135, 206, 250),\n 'lightslategray': (119, 136, 153),\n 'lightslategrey': (119, 136, 153),\n 'lightsteelblue': (176, 196, 222),\n 'lightyellow': (255, 255, 224),\n 'lime': ( 0, 255, 0),\n 'limegreen': ( 50, 205, 50),\n 'linen': (250, 240, 230),\n 'magenta': (255, 0, 255),\n 'maroon': (128, 0, 0),\n 'mediumaquamarine': (102, 205, 170),\n 'mediumblue': ( 0, 0, 205),\n 'mediumorchid': (186, 85, 211),\n 'mediumpurple': (147, 112, 219),\n 'mediumseagreen': ( 60, 179, 113),\n 'mediumslateblue': (123, 104, 238),\n 'mediumspringgreen': ( 0, 250, 154),\n 'mediumturquoise': ( 72, 209, 204),\n 'mediumvioletred': (199, 21, 133),\n 'midnightblue': ( 25, 25, 112),\n 'mintcream': (245, 255, 250),\n 'mistyrose': (255, 228, 225),\n 'moccasin': (255, 228, 181),\n 'navajowhite': (255, 222, 173),\n 'navy': ( 0, 0, 128),\n 'oldlace': (253, 245, 230),\n 'olive': (128, 128, 0),\n 'olivedrab': (107, 142, 35),\n 'orange': (255, 165, 0),\n 'orangered': (255, 69, 0),\n 'orchid': (218, 112, 214),\n 'palegoldenrod': (238, 232, 170),\n 'palegreen': (152, 251, 152),\n 'paleturquoise': (175, 238, 238),\n 'palevioletred': (219, 112, 147),\n 'papayawhip': (255, 239, 213),\n 'peachpuff': (255, 218, 185),\n 'peru': (205, 133, 63),\n 'pink': (255, 192, 203),\n 'plum': (221, 160, 221),\n 'powderblue': (176, 224, 230),\n 'purple': (128, 0, 128),\n 'red': (255, 0, 0),\n 'rosybrown': (188, 143, 143),\n 'royalblue': ( 65, 105, 225),\n 'saddlebrown': (139, 69, 19),\n 'salmon': (250, 128, 114),\n 'sandybrown': (244, 164, 96),\n 'seagreen': ( 46, 139, 87),\n 'seashell': (255, 245, 238),\n 'sienna': (160, 82, 45),\n 'silver': (192, 192, 192),\n 'skyblue': (135, 206, 235),\n 'slateblue': (106, 90, 205),\n 'slategray': (112, 128, 144),\n 'slategrey': (112, 128, 144),\n 'snow': (255, 250, 250),\n 'springgreen': ( 0, 255, 127),\n 'steelblue': ( 70, 130, 180),\n 'tan': (210, 180, 140),\n 'teal': ( 0, 128, 128),\n 'thistle': (216, 191, 216),\n 'tomato': (255, 99, 71),\n 'turquoise': ( 64, 224, 208),\n 'violet': (238, 130, 238),\n 'wheat': (245, 222, 179),\n 'white': (255, 255, 255),\n 'whitesmoke': (245, 245, 245),\n 'yellow': (255, 255, 0),\n 'yellowgreen': (154, 205, 50)\n }\n \n_ws_re = re.compile('\\s+')\n_token_re = re.compile('[A-Za-z_][A-Za-z0-9_]*')\n_hex_re = re.compile('#([0-9a-f]{3}(?:[0-9a-f]{3})?)$')\n_number_re = re.compile('[0-9]*(\\.[0-9]*)')\n\nclass ColorParser (object):\n def __init__(self, s):\n self._string = s\n self._pos = 0\n\n def skipws(self):\n m = _ws_re.match(self._string, self._pos)\n if m:\n self._pos = m.end(0)\n \n def expect(self, s, context=''):\n if len(self._string) - self._pos < len(s) \\\n or self._string[self._pos:self._pos + len(s)] != s:\n raise ValueError('bad color \"%s\" - expected \"%s\"%s'\n % (self._string, s, context))\n self._pos += len(s)\n\n def expectEnd(self):\n if self._pos != len(self._string):\n raise ValueError('junk at end of color \"%s\"' % self._string)\n\n def getToken(self):\n m = _token_re.match(self._string, self._pos)\n if m:\n token = m.group(0)\n\n self._pos = m.end(0)\n return token\n return None\n \n def parseNumber(self, context=''):\n m = _number_re.match(self._string, self._pos)\n if m:\n self._pos = m.end(0)\n return float(m.group(0))\n raise ValueError('bad color \"%s\" - expected a number%s'\n % (self._string, context))\n \n def parseColor(self):\n self.skipws()\n\n token = self.getToken()\n if token:\n if token == 'rgb':\n return self.parseRGB()\n elif token == 'hsl':\n return self.parseHSL()\n elif token == 'hwb':\n return self.parseHWB()\n elif token == 'cmyk':\n return self.parseCMYK()\n elif token == 'gray' or token == 'grey':\n return self.parseGray()\n\n try:\n r, g, b = _x11_colors[token]\n except KeyError:\n raise ValueError('unknown color name \"%s\"' % token)\n\n self.expectEnd()\n\n return RGB(r / 255.0, g / 255.0, b / 255.0)\n \n m = _hex_re.match(self._string, self._pos)\n if m:\n hrgb = m.group(1)\n\n if len(hrgb) == 3:\n r = int('0x' + 2 * hrgb[0], 16)\n g = int('0x' + 2 * hrgb[1], 16)\n b = int('0x' + 2 * hrgb[2], 16)\n else:\n r = int('0x' + hrgb[0:2], 16)\n g = int('0x' + hrgb[2:4], 16)\n b = int('0x' + hrgb[4:6], 16)\n\n self._pos = m.end(0)\n self.skipws()\n\n self.expectEnd()\n\n return RGB(r / 255.0, g / 255.0, b / 255.0)\n\n raise ValueError('bad color syntax \"%s\"' % self._string)\n\n def parseRGB(self):\n self.expect('(', 'after \"rgb\"')\n self.skipws()\n\n r = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"rgb\"')\n self.skipws()\n\n g = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"rgb\"')\n self.skipws()\n\n b = self.parseValue()\n\n self.skipws()\n self.expect(')', 'at end of \"rgb\"')\n\n self.skipws()\n self.expectEnd()\n\n return RGB(r, g, b)\n \n def parseHSL(self):\n self.expect('(', 'after \"hsl\"')\n self.skipws()\n\n h = self.parseAngle()\n\n self.skipws()\n self.expect(',', 'in \"hsl\"')\n self.skipws()\n\n s = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"hsl\"')\n self.skipws()\n\n l = self.parseValue()\n\n self.skipws()\n self.expect(')', 'at end of \"hsl\"')\n\n self.skipws()\n self.expectEnd()\n\n return HSL(h, s, l)\n\n def parseHWB(self):\n self.expect('(', 'after \"hwb\"')\n self.skipws()\n\n h = self.parseAngle()\n\n self.skipws()\n self.expect(',', 'in \"hwb\"')\n self.skipws()\n\n w = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"hwb\"')\n self.skipws()\n\n b = self.parseValue()\n\n self.skipws()\n self.expect(')', 'at end of \"hwb\"')\n\n self.skipws()\n self.expectEnd()\n\n return HWB(h, w, b)\n\n def parseCMYK(self):\n self.expect('(', 'after \"cmyk\"')\n self.skipws()\n\n c = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"cmyk\"')\n self.skipws()\n\n m = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"cmyk\"')\n self.skipws()\n\n y = self.parseValue()\n\n self.skipws()\n self.expect(',', 'in \"cmyk\"')\n self.skipws()\n\n k = self.parseValue()\n\n self.skipws()\n self.expect(')', 'at end of \"cmyk\"')\n\n self.skipws()\n self.expectEnd()\n\n return CMYK(c, m, y, k)\n\n def parseGray(self):\n self.expect('(', 'after \"gray\"')\n self.skipws()\n\n g = self.parseValue()\n\n self.skipws()\n self.expect(')', 'at end of \"gray')\n\n self.skipws()\n self.expectEnd()\n\n return Gray(g)\n\n def parseValue(self):\n n = self.parseNumber()\n self.skipws()\n if self._string[self._pos] == '%':\n n = n / 100.0\n self.pos += 1\n return n\n \n def parseAngle(self):\n n = self.parseNumber()\n self.skipws()\n tok = self.getToken()\n if tok == 'rad':\n n = n * 180.0 / math.pi\n elif tok == 'grad' or tok == 'gon':\n n = n * 0.9\n elif tok != 'deg':\n raise ValueError('bad angle unit \"%s\"' % tok)\n return n\n\n_color_re = re.compile('\\s*(#|rgb|hsl|hwb|cmyk|gray|grey|%s)'\n % '|'.join(_x11_colors.keys()))\ndef isAColor(s):\n return _color_re.match(s)\n\ndef parseColor(s):\n return ColorParser(s).parseColor()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203662,"cells":{"repo_name":{"kind":"string","value":"Gregory-Howard/spaCy"},"path":{"kind":"string","value":"spacy/tests/en/test_punct.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"4576"},"content":{"kind":"string","value":"# coding: utf-8\n\"\"\"Test that open, closed and paired punctuation is split off correctly.\"\"\"\n\n\nfrom __future__ import unicode_literals\n\nimport pytest\n\nfrom ...util import compile_prefix_regex\nfrom ...language_data import TOKENIZER_PREFIXES\n\n\nPUNCT_OPEN = ['(', '[', '{', '*']\nPUNCT_CLOSE = [')', ']', '}', '*']\nPUNCT_PAIRED = [('(', ')'), ('[', ']'), ('{', '}'), ('*', '*')]\n\n\n@pytest.mark.parametrize('text', [\"(\", \"((\", \"<\"])\ndef test_tokenizer_handles_only_punct(en_tokenizer, text):\n tokens = en_tokenizer(text)\n assert len(tokens) == len(text)\n\n\n@pytest.mark.parametrize('punct', PUNCT_OPEN)\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_open_punct(en_tokenizer, punct, text):\n tokens = en_tokenizer(punct + text)\n assert len(tokens) == 2\n assert tokens[0].text == punct\n assert tokens[1].text == text\n\n\n@pytest.mark.parametrize('punct', PUNCT_CLOSE)\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_close_punct(en_tokenizer, punct, text):\n tokens = en_tokenizer(text + punct)\n assert len(tokens) == 2\n assert tokens[0].text == text\n assert tokens[1].text == punct\n\n\n@pytest.mark.parametrize('punct', PUNCT_OPEN)\n@pytest.mark.parametrize('punct_add', [\"`\"])\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_two_diff_open_punct(en_tokenizer, punct, punct_add, text):\n tokens = en_tokenizer(punct + punct_add + text)\n assert len(tokens) == 3\n assert tokens[0].text == punct\n assert tokens[1].text == punct_add\n assert tokens[2].text == text\n\n\n@pytest.mark.parametrize('punct', PUNCT_CLOSE)\n@pytest.mark.parametrize('punct_add', [\"'\"])\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_two_diff_close_punct(en_tokenizer, punct, punct_add, text):\n tokens = en_tokenizer(text + punct + punct_add)\n assert len(tokens) == 3\n assert tokens[0].text == text\n assert tokens[1].text == punct\n assert tokens[2].text == punct_add\n\n\n@pytest.mark.parametrize('punct', PUNCT_OPEN)\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_same_open_punct(en_tokenizer, punct, text):\n tokens = en_tokenizer(punct + punct + punct + text)\n assert len(tokens) == 4\n assert tokens[0].text == punct\n assert tokens[3].text == text\n\n\n@pytest.mark.parametrize('punct', PUNCT_CLOSE)\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_same_close_punct(en_tokenizer, punct, text):\n tokens = en_tokenizer(text + punct + punct + punct)\n assert len(tokens) == 4\n assert tokens[0].text == text\n assert tokens[1].text == punct\n\n\n@pytest.mark.parametrize('text', [\"'The\"])\ndef test_tokenizer_splits_open_appostrophe(en_tokenizer, text):\n tokens = en_tokenizer(text)\n assert len(tokens) == 2\n assert tokens[0].text == \"'\"\n\n\n@pytest.mark.parametrize('text', [\"Hello''\"])\ndef test_tokenizer_splits_double_end_quote(en_tokenizer, text):\n tokens = en_tokenizer(text)\n assert len(tokens) == 2\n tokens_punct = en_tokenizer(\"''\")\n assert len(tokens_punct) == 1\n\n\n@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED)\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_splits_open_close_punct(en_tokenizer, punct_open,\n punct_close, text):\n tokens = en_tokenizer(punct_open + text + punct_close)\n assert len(tokens) == 3\n assert tokens[0].text == punct_open\n assert tokens[1].text == text\n assert tokens[2].text == punct_close\n\n\n@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED)\n@pytest.mark.parametrize('punct_open2,punct_close2', [(\"`\", \"'\")])\n@pytest.mark.parametrize('text', [\"Hello\"])\ndef test_tokenizer_two_diff_punct(en_tokenizer, punct_open, punct_close,\n punct_open2, punct_close2, text):\n tokens = en_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)\n assert len(tokens) == 5\n assert tokens[0].text == punct_open2\n assert tokens[1].text == punct_open\n assert tokens[2].text == text\n assert tokens[3].text == punct_close\n assert tokens[4].text == punct_close2\n\n\n@pytest.mark.parametrize('text,punct', [(\"(can't\", \"(\")])\ndef test_tokenizer_splits_pre_punct_regex(text, punct):\n en_search_prefixes = compile_prefix_regex(TOKENIZER_PREFIXES).search\n match = en_search_prefixes(text)\n assert match.group() == punct\n\n\ndef test_tokenizer_splits_bracket_period(en_tokenizer):\n text = \"(And a 6a.m. run through Washington Park).\"\n tokens = en_tokenizer(text)\n assert tokens[len(tokens) - 1].text == \".\"\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203663,"cells":{"repo_name":{"kind":"string","value":"apache/airflow"},"path":{"kind":"string","value":"airflow/hooks/subprocess.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3589"},"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport signal\nfrom collections import namedtuple\nfrom subprocess import PIPE, STDOUT, Popen\nfrom tempfile import TemporaryDirectory, gettempdir\nfrom typing import Dict, List, Optional\n\nfrom airflow.hooks.base import BaseHook\n\nSubprocessResult = namedtuple('SubprocessResult', ['exit_code', 'output'])\n\n\nclass SubprocessHook(BaseHook):\n \"\"\"Hook for running processes with the ``subprocess`` module\"\"\"\n\n def __init__(self) -> None:\n self.sub_process = None\n super().__init__()\n\n def run_command(\n self, command: List[str], env: Optional[Dict[str, str]] = None, output_encoding: str = 'utf-8'\n ) -> SubprocessResult:\n \"\"\"\n Execute the command in a temporary directory which will be cleaned afterwards\n\n If ``env`` is not supplied, ``os.environ`` is passed\n\n :param command: the command to run\n :param env: Optional dict containing environment variables to be made available to the shell\n environment in which ``command`` will be executed. If omitted, ``os.environ`` will be used.\n :param output_encoding: encoding to use for decoding stdout\n :return: :class:`namedtuple` containing ``exit_code`` and ``output``, the last line from stderr\n or stdout\n \"\"\"\n self.log.info('Tmp dir root location: \\n %s', gettempdir())\n\n with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:\n\n def pre_exec():\n # Restore default signal disposition and invoke setsid\n for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):\n if hasattr(signal, sig):\n signal.signal(getattr(signal, sig), signal.SIG_DFL)\n os.setsid()\n\n self.log.info('Running command: %s', command)\n\n self.sub_process = Popen(\n command,\n stdout=PIPE,\n stderr=STDOUT,\n cwd=tmp_dir,\n env=env if env or env == {} else os.environ,\n preexec_fn=pre_exec,\n )\n\n self.log.info('Output:')\n line = ''\n for raw_line in iter(self.sub_process.stdout.readline, b''):\n line = raw_line.decode(output_encoding).rstrip()\n self.log.info(\"%s\", line)\n\n self.sub_process.wait()\n\n self.log.info('Command exited with return code %s', self.sub_process.returncode)\n\n return SubprocessResult(exit_code=self.sub_process.returncode, output=line)\n\n def send_sigterm(self):\n \"\"\"Sends SIGTERM signal to ``self.sub_process`` if one exists.\"\"\"\n self.log.info('Sending SIGTERM signal to process group')\n if self.sub_process and hasattr(self.sub_process, 'pid'):\n os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203664,"cells":{"repo_name":{"kind":"string","value":"mitchrule/Miscellaneous"},"path":{"kind":"string","value":"Django_Project/django/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py"},"copies":{"kind":"string","value":"469"},"size":{"kind":"string","value":"4196"},"content":{"kind":"string","value":"import functools\n\nfrom pip._vendor.requests.adapters import HTTPAdapter\n\nfrom .controller import CacheController\nfrom .cache import DictCache\nfrom .filewrapper import CallbackFileWrapper\n\n\nclass CacheControlAdapter(HTTPAdapter):\n invalidating_methods = set(['PUT', 'DELETE'])\n\n def __init__(self, cache=None,\n cache_etags=True,\n controller_class=None,\n serializer=None,\n heuristic=None,\n *args, **kw):\n super(CacheControlAdapter, self).__init__(*args, **kw)\n self.cache = cache or DictCache()\n self.heuristic = heuristic\n\n controller_factory = controller_class or CacheController\n self.controller = controller_factory(\n self.cache,\n cache_etags=cache_etags,\n serializer=serializer,\n )\n\n def send(self, request, **kw):\n \"\"\"\n Send a request. Use the request information to see if it\n exists in the cache and cache the response if we need to and can.\n \"\"\"\n if request.method == 'GET':\n cached_response = self.controller.cached_request(request)\n if cached_response:\n return self.build_response(request, cached_response,\n from_cache=True)\n\n # check for etags and add headers if appropriate\n request.headers.update(\n self.controller.conditional_headers(request)\n )\n\n resp = super(CacheControlAdapter, self).send(request, **kw)\n\n return resp\n\n def build_response(self, request, response, from_cache=False):\n \"\"\"\n Build a response by making a request or using the cache.\n\n This will end up calling send and returning a potentially\n cached response\n \"\"\"\n if not from_cache and request.method == 'GET':\n\n # apply any expiration heuristics\n if response.status == 304:\n # We must have sent an ETag request. This could mean\n # that we've been expired already or that we simply\n # have an etag. In either case, we want to try and\n # update the cache if that is the case.\n cached_response = self.controller.update_cached_response(\n request, response\n )\n\n if cached_response is not response:\n from_cache = True\n\n # We are done with the server response, read a\n # possible response body (compliant servers will\n # not return one, but we cannot be 100% sure) and\n # release the connection back to the pool.\n response.read(decode_content=False)\n response.release_conn()\n\n response = cached_response\n\n # We always cache the 301 responses\n elif response.status == 301:\n self.controller.cache_response(request, response)\n else:\n # Check for any heuristics that might update headers\n # before trying to cache.\n if self.heuristic:\n response = self.heuristic.apply(response)\n\n # Wrap the response file with a wrapper that will cache the\n # response when the stream has been consumed.\n response._fp = CallbackFileWrapper(\n response._fp,\n functools.partial(\n self.controller.cache_response,\n request,\n response,\n )\n )\n\n resp = super(CacheControlAdapter, self).build_response(\n request, response\n )\n\n # See if we should invalidate the cache.\n if request.method in self.invalidating_methods and resp.ok:\n cache_url = self.controller.cache_url(request.url)\n self.cache.delete(cache_url)\n\n # Give the request a from_cache attr to let people use it\n resp.from_cache = from_cache\n\n return resp\n\n def close(self):\n self.cache.close()\n super(CacheControlAdapter, self).close()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203665,"cells":{"repo_name":{"kind":"string","value":"jaharkes/home-assistant"},"path":{"kind":"string","value":"homeassistant/components/switch/mfi.py"},"copies":{"kind":"string","value":"25"},"size":{"kind":"string","value":"3613"},"content":{"kind":"string","value":"\"\"\"\nSupport for Ubiquiti mFi switches.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/switch.mfi/\n\"\"\"\nimport logging\n\nimport requests\nimport voluptuous as vol\n\nfrom homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)\nfrom homeassistant.const import (\n CONF_HOST, CONF_PORT, CONF_PASSWORD, CONF_USERNAME, CONF_SSL,\n CONF_VERIFY_SSL)\nimport homeassistant.helpers.config_validation as cv\n\nREQUIREMENTS = ['mficlient==0.3.0']\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_SSL = True\nDEFAULT_VERIFY_SSL = True\n\nSWITCH_MODELS = [\n 'Outlet',\n 'Output 5v',\n 'Output 12v',\n 'Output 24v',\n]\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST): cv.string,\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n vol.Optional(CONF_PORT): cv.port,\n vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,\n vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,\n})\n\n\n# pylint: disable=unused-variable\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Setup mFi sensors.\"\"\"\n host = config.get(CONF_HOST)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n use_tls = config.get(CONF_SSL)\n verify_tls = config.get(CONF_VERIFY_SSL)\n default_port = use_tls and 6443 or 6080\n port = int(config.get(CONF_PORT, default_port))\n\n from mficlient.client import FailedToLogin, MFiClient\n\n try:\n client = MFiClient(host, username, password, port=port,\n use_tls=use_tls, verify=verify_tls)\n except (FailedToLogin, requests.exceptions.ConnectionError) as ex:\n _LOGGER.error('Unable to connect to mFi: %s', str(ex))\n return False\n\n add_devices(MfiSwitch(port)\n for device in client.get_devices()\n for port in device.ports.values()\n if port.model in SWITCH_MODELS)\n\n\nclass MfiSwitch(SwitchDevice):\n \"\"\"Representation of an mFi switch-able device.\"\"\"\n\n def __init__(self, port):\n \"\"\"Initialize the mFi device.\"\"\"\n self._port = port\n self._target_state = None\n\n @property\n def should_poll(self):\n \"\"\"Polling is needed.\"\"\"\n return True\n\n @property\n def unique_id(self):\n \"\"\"Return the unique ID of the device.\"\"\"\n return self._port.ident\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._port.label\n\n @property\n def is_on(self):\n \"\"\"Return true if the device is on.\"\"\"\n return self._port.output\n\n def update(self):\n \"\"\"Get the latest state and update the state.\"\"\"\n self._port.refresh()\n if self._target_state is not None:\n self._port.data['output'] = float(self._target_state)\n self._target_state = None\n\n def turn_on(self):\n \"\"\"Turn the switch on.\"\"\"\n self._port.control(True)\n self._target_state = True\n\n def turn_off(self):\n \"\"\"Turn the switch off.\"\"\"\n self._port.control(False)\n self._target_state = False\n\n @property\n def current_power_mwh(self):\n \"\"\"Return the current power usage in mWh.\"\"\"\n return int(self._port.data.get('active_pwr', 0) * 1000)\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes fof the device.\"\"\"\n attr = {}\n attr['volts'] = round(self._port.data.get('v_rms', 0), 1)\n attr['amps'] = round(self._port.data.get('i_rms', 0), 1)\n return attr\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203666,"cells":{"repo_name":{"kind":"string","value":"fernandopinhati/oppia"},"path":{"kind":"string","value":"core/platform/users/gae_current_user_services.py"},"copies":{"kind":"string","value":"30"},"size":{"kind":"string","value":"2532"},"content":{"kind":"string","value":"# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a seam for user-related services.\"\"\"\n\n__author__ = 'Sean Lip'\n\nimport feconf\nimport logging\nimport utils\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\n\ndef create_login_url(slug):\n \"\"\"Creates a login url.\"\"\"\n return users.create_login_url(utils.set_url_query_parameter(\n feconf.SIGNUP_URL, 'return_url', slug))\n\n\ndef create_logout_url(slug):\n \"\"\"Creates a logout url.\"\"\"\n logout_url = utils.set_url_query_parameter('/logout', 'return_url', slug)\n return logout_url\n\n\ndef get_current_user(request):\n \"\"\"Returns the current user.\"\"\"\n return users.get_current_user()\n\n\ndef is_super_admin(user_id, request):\n \"\"\"Checks whether the user with the given user_id owns this app.\n\n For GAE, the user in question is also required to be the current user.\n \"\"\"\n user = users.get_current_user()\n if user is None:\n return False\n\n return user.user_id() == user_id and users.is_current_user_admin()\n\n\ndef get_user_id_from_email(email):\n \"\"\"Given an email address, returns a user id.\n\n Returns None if the email address does not correspond to a valid user id.\n \"\"\"\n class _FakeUser(ndb.Model):\n _use_memcache = False\n _use_cache = False\n user = ndb.UserProperty(required=True)\n\n try:\n u = users.User(email)\n except users.UserNotFoundError:\n logging.error(\n 'The email address %s does not correspond to a valid user_id'\n % email)\n return None\n\n key = _FakeUser(id=email, user=u).put()\n obj = _FakeUser.get_by_id(key.id())\n user_id = obj.user.user_id()\n if user_id:\n return unicode(user_id)\n else:\n return None\n\n\ndef get_user_id(user):\n \"\"\" Given an user object, get the user id. \"\"\"\n return user.user_id()\n\n\ndef get_user_email(user):\n \"\"\" Given an user object, get the user's email. \"\"\"\n return user.email()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203667,"cells":{"repo_name":{"kind":"string","value":"tammoippen/nest-simulator"},"path":{"kind":"string","value":"pynest/nest/tests/test_connect_pairwise_bernoulli.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"3393"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# test_connect_pairwise_bernoulli.py\n#\n# This file is part of NEST.\n#\n# Copyright (C) 2004 The NEST Initiative\n#\n# NEST is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# NEST is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with NEST. If not, see .\n\n\nimport numpy as np\nimport unittest\nimport scipy.stats\nfrom . import test_connect_helpers as hf\nfrom .test_connect_parameters import TestParams\n\n\nclass TestPairwiseBernoulli(TestParams):\n\n # specify connection pattern and specific params\n rule = 'pairwise_bernoulli'\n p = 0.5\n conn_dict = {'rule': rule, 'p': p}\n # sizes of source-, target-population and connection probability for\n # statistical test\n N_s = 50\n N_t = 50\n # Critical values and number of iterations of two level test\n stat_dict = {'alpha2': 0.05, 'n_runs': 20}\n\n def testStatistics(self):\n for fan in ['in', 'out']:\n expected = hf.get_expected_degrees_bernoulli(\n self.p, fan, self.N_s, self.N_t)\n\n pvalues = []\n for i in range(self.stat_dict['n_runs']):\n hf.reset_seed(i, self.nr_threads)\n self.setUpNetwork(conn_dict=self.conn_dict,\n N1=self.N_s, N2=self.N_t)\n degrees = hf.get_degrees(fan, self.pop1, self.pop2)\n degrees = hf.gather_data(degrees)\n # degrees = self.comm.gather(degrees, root=0)\n # if self.rank == 0:\n if degrees is not None:\n chi, p = hf.chi_squared_check(degrees, expected, self.rule)\n pvalues.append(p)\n hf.mpi_barrier()\n if degrees is not None:\n ks, p = scipy.stats.kstest(pvalues, 'uniform')\n self.assertTrue(p > self.stat_dict['alpha2'])\n\n def testAutapses(self):\n conn_params = self.conn_dict.copy()\n N = 10\n conn_params['multapses'] = False\n\n # test that autapses exist\n conn_params['p'] = 1.\n conn_params['autapses'] = True\n pop = hf.nest.Create('iaf_neuron', N)\n hf.nest.Connect(pop, pop, conn_params)\n # make sure all connections do exist\n M = hf.get_connectivity_matrix(pop, pop)\n hf.mpi_assert(np.diag(M), np.ones(N), self)\n hf.nest.ResetKernel()\n\n # test that autapses were excluded\n conn_params['p'] = 1.\n conn_params['autapses'] = False\n pop = hf.nest.Create('iaf_neuron', N)\n hf.nest.Connect(pop, pop, conn_params)\n # make sure all connections do exist\n M = hf.get_connectivity_matrix(pop, pop)\n hf.mpi_assert(np.diag(M), np.zeros(N), self)\n\n\ndef suite():\n suite = unittest.TestLoader().loadTestsFromTestCase(TestPairwiseBernoulli)\n return suite\n\n\ndef run():\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite())\n\n\nif __name__ == '__main__':\n run()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203668,"cells":{"repo_name":{"kind":"string","value":"trankmichael/scikit-learn"},"path":{"kind":"string","value":"sklearn/datasets/svmlight_format.py"},"copies":{"kind":"string","value":"114"},"size":{"kind":"string","value":"15826"},"content":{"kind":"string","value":"\"\"\"This module implements a loader and dumper for the svmlight format\n\nThis format is a text-based format, with one sample per line. It does\nnot store zero valued features hence is suitable for sparse dataset.\n\nThe first element of each line can be used to store a target variable to\npredict.\n\nThis format is used as the default format for both svmlight and the\nlibsvm command line programs.\n\"\"\"\n\n# Authors: Mathieu Blondel \n# Lars Buitinck \n# Olivier Grisel \n# License: BSD 3 clause\n\nfrom contextlib import closing\nimport io\nimport os.path\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom ._svmlight_format import _load_svmlight_file\nfrom .. import __version__\nfrom ..externals import six\nfrom ..externals.six import u, b\nfrom ..externals.six.moves import range, zip\nfrom ..utils import check_array\nfrom ..utils.fixes import frombuffer_empty\n\n\ndef load_svmlight_file(f, n_features=None, dtype=np.float64,\n multilabel=False, zero_based=\"auto\", query_id=False):\n \"\"\"Load datasets in the svmlight / libsvm format into sparse CSR matrix\n\n This format is a text-based format, with one sample per line. It does\n not store zero valued features hence is suitable for sparse dataset.\n\n The first element of each line can be used to store a target variable\n to predict.\n\n This format is used as the default format for both svmlight and the\n libsvm command line programs.\n\n Parsing a text based source can be expensive. When working on\n repeatedly on the same dataset, it is recommended to wrap this\n loader with joblib.Memory.cache to store a memmapped backup of the\n CSR results of the first call and benefit from the near instantaneous\n loading of memmapped structures for the subsequent calls.\n\n In case the file contains a pairwise preference constraint (known\n as \"qid\" in the svmlight format) these are ignored unless the\n query_id parameter is set to True. These pairwise preference\n constraints can be used to constraint the combination of samples\n when using pairwise loss functions (as is the case in some\n learning to rank problems) so that only pairs with the same\n query_id value are considered.\n\n This implementation is written in Cython and is reasonably fast.\n However, a faster API-compatible loader is also available at:\n\n https://github.com/mblondel/svmlight-loader\n\n Parameters\n ----------\n f : {str, file-like, int}\n (Path to) a file to load. If a path ends in \".gz\" or \".bz2\", it will\n be uncompressed on the fly. If an integer is passed, it is assumed to\n be a file descriptor. A file-like or file descriptor will not be closed\n by this function. A file-like object must be opened in binary mode.\n\n n_features : int or None\n The number of features to use. If None, it will be inferred. This\n argument is useful to load several files that are subsets of a\n bigger sliced dataset: each subset might not have examples of\n every feature, hence the inferred shape might vary from one\n slice to another.\n\n multilabel : boolean, optional, default False\n Samples may have several labels each (see\n http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)\n\n zero_based : boolean or \"auto\", optional, default \"auto\"\n Whether column indices in f are zero-based (True) or one-based\n (False). If column indices are one-based, they are transformed to\n zero-based to match Python/NumPy conventions.\n If set to \"auto\", a heuristic check is applied to determine this from\n the file contents. Both kinds of files occur \"in the wild\", but they\n are unfortunately not self-identifying. Using \"auto\" or True should\n always be safe.\n\n query_id : boolean, default False\n If True, will return the query_id array for each file.\n\n dtype : numpy data type, default np.float64\n Data type of dataset to be loaded. This will be the data type of the\n output numpy arrays ``X`` and ``y``.\n\n Returns\n -------\n X: scipy.sparse matrix of shape (n_samples, n_features)\n\n y: ndarray of shape (n_samples,), or, in the multilabel a list of\n tuples of length n_samples.\n\n query_id: array of shape (n_samples,)\n query_id for each sample. Only returned when query_id is set to\n True.\n\n See also\n --------\n load_svmlight_files: similar function for loading multiple files in this\n format, enforcing the same number of features/columns on all of them.\n\n Examples\n --------\n To use joblib.Memory to cache the svmlight file::\n\n from sklearn.externals.joblib import Memory\n from sklearn.datasets import load_svmlight_file\n mem = Memory(\"./mycache\")\n\n @mem.cache\n def get_data():\n data = load_svmlight_file(\"mysvmlightfile\")\n return data[0], data[1]\n\n X, y = get_data()\n \"\"\"\n return tuple(load_svmlight_files([f], n_features, dtype, multilabel,\n zero_based, query_id))\n\n\ndef _gen_open(f):\n if isinstance(f, int): # file descriptor\n return io.open(f, \"rb\", closefd=False)\n elif not isinstance(f, six.string_types):\n raise TypeError(\"expected {str, int, file-like}, got %s\" % type(f))\n\n _, ext = os.path.splitext(f)\n if ext == \".gz\":\n import gzip\n return gzip.open(f, \"rb\")\n elif ext == \".bz2\":\n from bz2 import BZ2File\n return BZ2File(f, \"rb\")\n else:\n return open(f, \"rb\")\n\n\ndef _open_and_load(f, dtype, multilabel, zero_based, query_id):\n if hasattr(f, \"read\"):\n actual_dtype, data, ind, indptr, labels, query = \\\n _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)\n # XXX remove closing when Python 2.7+/3.1+ required\n else:\n with closing(_gen_open(f)) as f:\n actual_dtype, data, ind, indptr, labels, query = \\\n _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)\n\n # convert from array.array, give data the right dtype\n if not multilabel:\n labels = frombuffer_empty(labels, np.float64)\n data = frombuffer_empty(data, actual_dtype)\n indices = frombuffer_empty(ind, np.intc)\n indptr = np.frombuffer(indptr, dtype=np.intc) # never empty\n query = frombuffer_empty(query, np.intc)\n\n data = np.asarray(data, dtype=dtype) # no-op for float{32,64}\n return data, indices, indptr, labels, query\n\n\ndef load_svmlight_files(files, n_features=None, dtype=np.float64,\n multilabel=False, zero_based=\"auto\", query_id=False):\n \"\"\"Load dataset from multiple files in SVMlight format\n\n This function is equivalent to mapping load_svmlight_file over a list of\n files, except that the results are concatenated into a single, flat list\n and the samples vectors are constrained to all have the same number of\n features.\n\n In case the file contains a pairwise preference constraint (known\n as \"qid\" in the svmlight format) these are ignored unless the\n query_id parameter is set to True. These pairwise preference\n constraints can be used to constraint the combination of samples\n when using pairwise loss functions (as is the case in some\n learning to rank problems) so that only pairs with the same\n query_id value are considered.\n\n Parameters\n ----------\n files : iterable over {str, file-like, int}\n (Paths of) files to load. If a path ends in \".gz\" or \".bz2\", it will\n be uncompressed on the fly. If an integer is passed, it is assumed to\n be a file descriptor. File-likes and file descriptors will not be\n closed by this function. File-like objects must be opened in binary\n mode.\n\n n_features: int or None\n The number of features to use. If None, it will be inferred from the\n maximum column index occurring in any of the files.\n\n This can be set to a higher value than the actual number of features\n in any of the input files, but setting it to a lower value will cause\n an exception to be raised.\n\n multilabel: boolean, optional\n Samples may have several labels each (see\n http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)\n\n zero_based: boolean or \"auto\", optional\n Whether column indices in f are zero-based (True) or one-based\n (False). If column indices are one-based, they are transformed to\n zero-based to match Python/NumPy conventions.\n If set to \"auto\", a heuristic check is applied to determine this from\n the file contents. Both kinds of files occur \"in the wild\", but they\n are unfortunately not self-identifying. Using \"auto\" or True should\n always be safe.\n\n query_id: boolean, defaults to False\n If True, will return the query_id array for each file.\n\n dtype : numpy data type, default np.float64\n Data type of dataset to be loaded. This will be the data type of the\n output numpy arrays ``X`` and ``y``.\n\n Returns\n -------\n [X1, y1, ..., Xn, yn]\n where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).\n\n If query_id is set to True, this will return instead [X1, y1, q1,\n ..., Xn, yn, qn] where (Xi, yi, qi) is the result from\n load_svmlight_file(files[i])\n\n Notes\n -----\n When fitting a model to a matrix X_train and evaluating it against a\n matrix X_test, it is essential that X_train and X_test have the same\n number of features (X_train.shape[1] == X_test.shape[1]). This may not\n be the case if you load the files individually with load_svmlight_file.\n\n See also\n --------\n load_svmlight_file\n \"\"\"\n r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))\n for f in files]\n\n if (zero_based is False\n or zero_based == \"auto\" and all(np.min(tmp[1]) > 0 for tmp in r)):\n for ind in r:\n indices = ind[1]\n indices -= 1\n\n n_f = max(ind[1].max() for ind in r) + 1\n if n_features is None:\n n_features = n_f\n elif n_features < n_f:\n raise ValueError(\"n_features was set to {},\"\n \" but input file contains {} features\"\n .format(n_features, n_f))\n\n result = []\n for data, indices, indptr, y, query_values in r:\n shape = (indptr.shape[0] - 1, n_features)\n X = sp.csr_matrix((data, indices, indptr), shape)\n X.sort_indices()\n result += X, y\n if query_id:\n result.append(query_values)\n\n return result\n\n\ndef _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):\n is_sp = int(hasattr(X, \"tocsr\"))\n if X.dtype.kind == 'i':\n value_pattern = u(\"%d:%d\")\n else:\n value_pattern = u(\"%d:%.16g\")\n\n if y.dtype.kind == 'i':\n label_pattern = u(\"%d\")\n else:\n label_pattern = u(\"%.16g\")\n\n line_pattern = u(\"%s\")\n if query_id is not None:\n line_pattern += u(\" qid:%d\")\n line_pattern += u(\" %s\\n\")\n\n if comment:\n f.write(b(\"# Generated by dump_svmlight_file from scikit-learn %s\\n\"\n % __version__))\n f.write(b(\"# Column indices are %s-based\\n\"\n % [\"zero\", \"one\"][one_based]))\n\n f.write(b(\"#\\n\"))\n f.writelines(b(\"# %s\\n\" % line) for line in comment.splitlines())\n\n for i in range(X.shape[0]):\n if is_sp:\n span = slice(X.indptr[i], X.indptr[i + 1])\n row = zip(X.indices[span], X.data[span])\n else:\n nz = X[i] != 0\n row = zip(np.where(nz)[0], X[i, nz])\n\n s = \" \".join(value_pattern % (j + one_based, x) for j, x in row)\n\n if multilabel:\n nz_labels = np.where(y[i] != 0)[0]\n labels_str = \",\".join(label_pattern % j for j in nz_labels)\n else:\n labels_str = label_pattern % y[i]\n\n if query_id is not None:\n feat = (labels_str, query_id[i], s)\n else:\n feat = (labels_str, s)\n\n f.write((line_pattern % feat).encode('ascii'))\n\n\ndef dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,\n multilabel=False):\n \"\"\"Dump the dataset in svmlight / libsvm file format.\n\n This format is a text-based format, with one sample per line. It does\n not store zero valued features hence is suitable for sparse dataset.\n\n The first element of each line can be used to store a target variable\n to predict.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape = [n_samples]\n Target values.\n\n f : string or file-like in binary mode\n If string, specifies the path that will contain the data.\n If file-like, data will be written to f. f should be opened in binary\n mode.\n\n zero_based : boolean, optional\n Whether column indices should be written zero-based (True) or one-based\n (False).\n\n comment : string, optional\n Comment to insert at the top of the file. This should be either a\n Unicode string, which will be encoded as UTF-8, or an ASCII byte\n string.\n If a comment is given, then it will be preceded by one that identifies\n the file as having been dumped by scikit-learn. Note that not all\n tools grok comments in SVMlight files.\n\n query_id : array-like, shape = [n_samples]\n Array containing pairwise preference constraints (qid in svmlight\n format).\n\n multilabel: boolean, optional\n Samples may have several labels each (see\n http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)\n \"\"\"\n if comment is not None:\n # Convert comment string to list of lines in UTF-8.\n # If a byte string is passed, then check whether it's ASCII;\n # if a user wants to get fancy, they'll have to decode themselves.\n # Avoid mention of str and unicode types for Python 3.x compat.\n if isinstance(comment, bytes):\n comment.decode(\"ascii\") # just for the exception\n else:\n comment = comment.encode(\"utf-8\")\n if six.b(\"\\0\") in comment:\n raise ValueError(\"comment string contains NUL byte\")\n\n y = np.asarray(y)\n if y.ndim != 1 and not multilabel:\n raise ValueError(\"expected y of shape (n_samples,), got %r\"\n % (y.shape,))\n\n Xval = check_array(X, accept_sparse='csr')\n if Xval.shape[0] != y.shape[0]:\n raise ValueError(\"X.shape[0] and y.shape[0] should be the same, got\"\n \" %r and %r instead.\" % (Xval.shape[0], y.shape[0]))\n\n # We had some issues with CSR matrices with unsorted indices (e.g. #1501),\n # so sort them here, but first make sure we don't modify the user's X.\n # TODO We can do this cheaper; sorted_indices copies the whole matrix.\n if Xval is X and hasattr(Xval, \"sorted_indices\"):\n X = Xval.sorted_indices()\n else:\n X = Xval\n if hasattr(X, \"sort_indices\"):\n X.sort_indices()\n\n if query_id is not None:\n query_id = np.asarray(query_id)\n if query_id.shape[0] != y.shape[0]:\n raise ValueError(\"expected query_id of shape (n_samples,), got %r\"\n % (query_id.shape,))\n\n one_based = not zero_based\n\n if hasattr(f, \"write\"):\n _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)\n else:\n with open(f, \"wb\") as f:\n _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203669,"cells":{"repo_name":{"kind":"string","value":"ubiar/odoo"},"path":{"kind":"string","value":"addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/logreport.py"},"copies":{"kind":"string","value":"386"},"size":{"kind":"string","value":"1736"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport logging\nimport tempfile\nLOG_DEBUG='debug'\nLOG_INFO='info'\nLOG_WARNING='warn'\nLOG_ERROR='error'\nLOG_CRITICAL='critical'\n_logger = logging.getLogger(__name__)\n\ndef log_detail(self):\n import os\n logfile_name = os.path.join(tempfile.gettempdir(), \"openerp_report_designer.log\")\n hdlr = logging.FileHandler(logfile_name)\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n _logger.addHandler(hdlr)\n _logger.setLevel(logging.INFO)\n\nclass Logger(object):\n def log_write(self, name, level, msg):\n getattr(_logger,level)(msg)\n\n def shutdown(self):\n logging.shutdown()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203670,"cells":{"repo_name":{"kind":"string","value":"noinil/pinang"},"path":{"kind":"string","value":"share/figures/energy_function/native.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1420"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom pylab import *\n\nfigure(figsize=(8,5), dpi=80)\nsubplot(111)\n\nX = np.linspace(0.5, 2, 256,endpoint=False)\nC = 5*pow((1/X),12) - 6*pow((1/X),10)\n# C1 = 5*pow((0.5/X),12) - 6*pow((0.5/X),10)\nA = 5*pow((1/X),12)\nR = - 6*pow((1/X),10)\n\nplot(X, C, color=\"green\", linewidth=2.5, linestyle=\"-\",\n label=r\"$V_{native} = \\varepsilon [5(\\frac{\\sigma}{r})^{12} - 6(\\frac{\\sigma}{r})^{10} ]$\")\nplot(X, A, color=\"blue\", linewidth=1.5, linestyle=\"--\",\n label=r\"$V_{repulsion} = 5 \\varepsilon (\\frac{\\sigma}{r})^{12}$\")\nplot(X, R, color=\"red\", linewidth=1.5, linestyle=\"--\",\n label=r\"$V_{attraction} = - 6 \\varepsilon (\\frac{\\sigma}{r})^{10}$\")\n# plot(X, C1, color=\"black\", linewidth=1, linestyle=\"-.\",\n# label=r\"$V_{native} = \\varepsilon [5(\\frac{\\sigma}{r})^{12} - 6(\\frac{\\sigma}{r})^{10} ]$\")\n\nax = gca()\nax.spines['right'].set_color('none')\nax.spines['bottom'].set_color('none')\nax.xaxis.set_ticks_position('top')\nax.spines['top'].set_position(('data',0))\nax.yaxis.set_ticks_position('left')\nax.spines['left'].set_position(('data',0.8))\n\nxlim(0.6, X.max()*1.01)\nxticks([0.8,1],\n [r'$0.8\\sigma$', r'$\\sigma$'])\n\nylim(-3,5)\nyticks([-1],\n [r'$-\\varepsilon$'])\n\nt = 1\nplot([t,t],[0,-1],\n color ='green', linewidth=1., linestyle=\"--\")\n\nplot([0.8,t],[-1,-1],\n color ='green', linewidth=1., linestyle=\"--\")\n\nlegend(loc='upper right')\n\nsavefig(\"native.svg\", dpi=72)\nshow()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203671,"cells":{"repo_name":{"kind":"string","value":"shivam1111/odoo"},"path":{"kind":"string","value":"addons/hr_holidays/tests/common.py"},"copies":{"kind":"string","value":"389"},"size":{"kind":"string","value":"4347"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Business Applications\n# Copyright (c) 2013-TODAY OpenERP S.A. \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.tests import common\n\n\nclass TestHrHolidaysBase(common.TransactionCase):\n\n def setUp(self):\n super(TestHrHolidaysBase, self).setUp()\n cr, uid = self.cr, self.uid\n\n # Usefull models\n self.hr_employee = self.registry('hr.employee')\n self.hr_holidays = self.registry('hr.holidays')\n self.hr_holidays_status = self.registry('hr.holidays.status')\n self.res_users = self.registry('res.users')\n self.res_partner = self.registry('res.partner')\n\n # Find Employee group\n group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')\n self.group_employee_id = group_employee_ref and group_employee_ref[1] or False\n\n # Find Hr User group\n group_hr_user_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_user')\n self.group_hr_user_ref_id = group_hr_user_ref and group_hr_user_ref[1] or False\n\n # Find Hr Manager group\n group_hr_manager_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')\n self.group_hr_manager_ref_id = group_hr_manager_ref and group_hr_manager_ref[1] or False\n\n # Test partners to use through the various tests\n self.hr_partner_id = self.res_partner.create(cr, uid, {\n 'name': 'Gertrude AgrolaitPartner',\n 'email': 'gertrude.partner@agrolait.com',\n })\n self.email_partner_id = self.res_partner.create(cr, uid, {\n 'name': 'Patrick Ratatouille',\n 'email': 'patrick.ratatouille@agrolait.com',\n })\n\n # Test users to use through the various tests\n self.user_hruser_id = self.res_users.create(cr, uid, {\n 'name': 'Armande HrUser',\n 'login': 'Armande',\n 'alias_name': 'armande',\n 'email': 'armande.hruser@example.com',\n 'groups_id': [(6, 0, [self.group_employee_id, self.group_hr_user_ref_id])]\n }, {'no_reset_password': True})\n self.user_hrmanager_id = self.res_users.create(cr, uid, {\n 'name': 'Bastien HrManager',\n 'login': 'bastien',\n 'alias_name': 'bastien',\n 'email': 'bastien.hrmanager@example.com',\n 'groups_id': [(6, 0, [self.group_employee_id, self.group_hr_manager_ref_id])]\n }, {'no_reset_password': True})\n self.user_none_id = self.res_users.create(cr, uid, {\n 'name': 'Charlie Avotbonkeur',\n 'login': 'charlie',\n 'alias_name': 'charlie',\n 'email': 'charlie.noone@example.com',\n 'groups_id': [(6, 0, [])]\n }, {'no_reset_password': True})\n self.user_employee_id = self.res_users.create(cr, uid, {\n 'name': 'David Employee',\n 'login': 'david',\n 'alias_name': 'david',\n 'email': 'david.employee@example.com',\n 'groups_id': [(6, 0, [self.group_employee_id])]\n }, {'no_reset_password': True})\n\n # Hr Data\n self.employee_emp_id = self.hr_employee.create(cr, uid, {\n 'name': 'David Employee',\n 'user_id': self.user_employee_id,\n })\n self.employee_hruser_id = self.hr_employee.create(cr, uid, {\n 'name': 'Armande HrUser',\n 'user_id': self.user_hruser_id,\n })\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203672,"cells":{"repo_name":{"kind":"string","value":"2014c2g4/2015cda0623"},"path":{"kind":"string","value":"static/Brython3.1.0-20150301-090019/Lib/browser/markdown.py"},"copies":{"kind":"string","value":"623"},"size":{"kind":"string","value":"13060"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\ntry:\n import _jsre as re\nexcept:\n import re\n\nimport random\nimport time\n\nletters = 'abcdefghijklmnopqrstuvwxyz'\nletters += letters.upper()+'0123456789'\n\nclass URL:\n def __init__(self,src):\n elts = src.split(maxsplit=1)\n self.href = elts[0]\n self.alt = ''\n if len(elts)==2:\n alt = elts[1]\n if alt[0]=='\"' and alt[-1]=='\"':self.alt=alt[1:-1]\n elif alt[0]==\"'\" and alt[-1]==\"'\":self.alt=alt[1:-1]\n elif alt[0]==\"(\" and alt[-1]==\")\":self.alt=alt[1:-1]\n \nclass CodeBlock:\n def __init__(self,line):\n self.lines = [line]\n if line.startswith(\"```\") and len(line)>3:\n self.info = line[3:]\n else:\n self.info = None\n \n def to_html(self):\n if self.lines[0].startswith(\"`\"):\n self.lines.pop(0)\n res = escape('\\n'.join(self.lines))\n res = unmark(res)\n _class = self.info or \"marked\"\n res = '
%s
\\n' %(_class, res)\n return res,[]\n\nclass HtmlBlock:\n\n def __init__(self, src):\n self.src = src\n \n def to_html(self):\n return self.src\n \nclass Marked:\n def __init__(self, line=''):\n self.line = line\n self.children = []\n\n def to_html(self):\n return apply_markdown(self.line)\n \n# get references\nrefs = {}\nref_pattern = r\"^\\[(.*)\\]:\\s+(.*)\"\n\ndef mark(src):\n\n global refs\n t0 = time.time()\n refs = {}\n # split source in sections\n # sections can be :\n # - a block-level HTML element (markdown syntax will not be processed)\n # - a script\n # - a span-level HTML tag (markdown syntax will be processed)\n # - a code block\n \n # normalise line feeds\n src = src.replace('\\r\\n','\\n')\n \n # lines followed by dashes\n src = re.sub(r'(.*?)\\n=+\\n', '\\n# \\\\1\\n', src)\n src = re.sub(r'(.*?)\\n-+\\n', '\\n## \\\\1\\n', src) \n\n lines = src.split('\\n')+['']\n \n i = bq = 0\n ul = ol = 0\n \n while i in a blockquote\n if lines[i].startswith('>'):\n nb = 1\n while nb':\n nb += 1\n lines[i] = lines[i][nb:]\n if nb>bq:\n lines.insert(i,'
'*(nb-bq))\n i += 1\n bq = nb\n elif nb'*(bq-nb))\n i += 1\n bq = nb\n elif bq>0:\n lines.insert(i,'
'*bq)\n i += 1\n bq = 0\n\n # unordered lists\n if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \\\n and len(lines[i].lstrip())>1 \\\n and lines[i].lstrip()[1]==' ' \\\n and (i==0 or ul or not lines[i-1].strip()):\n # line indentation indicates nesting level\n nb = 1+len(lines[i])-len(lines[i].lstrip())\n lines[i] = '
  • '+lines[i][nb:]\n if nb>ul:\n lines.insert(i,'
      '*(nb-ul))\n i += 1\n elif nb'*(ul-nb))\n i += 1\n ul = nb\n elif ul and not lines[i].strip():\n if i1 and nline[1]==' ':\n pass\n else:\n lines.insert(i,'
    '*ul)\n i += 1\n ul = 0\n\n # ordered lists\n mo = re.search(r'^(\\d+\\.)',lines[i])\n if mo:\n if not ol:\n lines.insert(i,'
      ')\n i += 1\n lines[i] = '
    1. '+lines[i][len(mo.groups()[0]):]\n ol = 1\n elif ol and not lines[i].strip() and i')\n i += 1\n ol = 0\n \n i += 1\n \n if ul:\n lines.append(''*ul)\n if ol:\n lines.append('
    '*ol)\n if bq:\n lines.append(''*bq)\n\n t1 = time.time()\n #print('part 1', t1-t0) \n sections = []\n scripts = []\n section = Marked()\n\n i = 0\n while i'):\n scripts.append('\\n'.join(lines[i+1:j]))\n for k in range(i,j+1):\n lines[k] = ''\n break\n j += 1\n i = j\n continue\n\n # atext header\n elif line.startswith('#'):\n level = 1\n line = lines[i]\n while level','&gt;')\n czone = czone.replace('_','&#95;')\n czone = czone.replace('*','&#42;')\n return czone\n\ndef s_escape(mo):\n # used in re.sub\n czone = mo.string[mo.start():mo.end()]\n return escape(czone)\n\ndef unmark(code_zone):\n # convert _ to &#95; inside inline code\n code_zone = code_zone.replace('_','&#95;')\n return code_zone\n\ndef s_unmark(mo):\n # convert _ to &#95; inside inline code\n code_zone = mo.string[mo.start():mo.end()]\n code_zone = code_zone.replace('_','&#95;')\n return code_zone\n\ndef apply_markdown(src):\n\n scripts = []\n key = None\n\n t0 = time.time()\n i = 0\n while i-1 and src[start_a:end_a].find('\\n')==-1:\n link = src[start_a:end_a]\n rest = src[end_a+1:].lstrip()\n if rest and rest[0]=='(':\n j = 0\n while True:\n end_href = rest.find(')',j)\n if end_href == -1:\n break\n if rest[end_href-1]=='\\\\':\n j = end_href+1\n else:\n break\n if end_href>-1 and rest[:end_href].find('\\n')==-1:\n tag = ''+link+''\n src = src[:start_a-1]+tag+rest[end_href+1:]\n i = start_a+len(tag)\n elif rest and rest[0]=='[':\n j = 0\n while True:\n end_key = rest.find(']',j)\n if end_key == -1:\n break\n if rest[end_key-1]=='\\\\':\n j = end_key+1\n else:\n break\n if end_key>-1 and rest[:end_key].find('\\n')==-1:\n if not key:\n key = link\n if key.lower() not in refs:\n raise KeyError('unknown reference %s' %key)\n url = refs[key.lower()]\n tag = ''+link+''\n src = src[:start_a-1]+tag+rest[end_key+1:]\n i = start_a+len(tag)\n \n i += 1\n\n t1 = time.time()\n #print('apply markdown 1', t1-t0)\n # before applying the markup with _ and *, isolate HTML tags because \n # they can contain these characters\n\n # We replace them temporarily by a random string\n rstr = ''.join(random.choice(letters) for i in range(16))\n \n i = 0\n state = None\n start = -1\n data = ''\n tags = []\n while i' and state is None:\n tags.append(src[i:j+1])\n src = src[:i]+rstr+src[j+1:]\n i += len(rstr)\n break\n elif state=='\"' or state==\"'\":\n data += src[j]\n elif src[j]=='\\n':\n # if a sign < is not followed by > in the same ligne, it\n # is the sign \"lesser than\"\n src = src[:i]+'&lt;'+src[i+1:]\n j=i+4\n break\n j += 1\n elif src[i]=='`' and i>0 and src[i-1]!='\\\\':\n # ignore the content of inline code\n j = i+1\n while j\", \"&\" and \"_\" in inline code\n code_pattern = r'\\`(.*?)\\`'\n src = re.sub(code_pattern,s_escape,src)\n\n # replace escaped ` _ * by HTML characters\n src = src.replace(r'\\\\`','&#96;')\n src = src.replace(r'\\_','&#95;')\n src = src.replace(r'\\*','&#42;')\n\n # emphasis\n strong_patterns = [('STRONG',r'\\*\\*(.*?)\\*\\*'),('B',r'__(.*?)__')]\n for tag,strong_pattern in strong_patterns:\n src = re.sub(strong_pattern,r'<%s>\\1' %(tag,tag),src)\n\n em_patterns = [('EM',r'\\*(.*?)\\*'),('I',r'\\_(.*?)\\_')]\n for tag,em_pattern in em_patterns:\n src = re.sub(em_pattern,r'<%s>\\1' %(tag,tag),src)\n\n # inline code\n code_pattern = r'\\`(.*?)\\`'\n src = re.sub(code_pattern,r'\\1',src)\n \n # restore tags\n while True:\n pos = src.rfind(rstr)\n if pos==-1:\n break\n repl = tags.pop()\n src = src[:pos]+repl+src[pos+len(rstr):]\n\n src = '

    '+src+'

    '\n\n t3 = time.time()\n #print('apply markdown 3', t3-t2)\n\n return src,scripts\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203673,"cells":{"repo_name":{"kind":"string","value":"Fujin-Suzukaze/GT-I9505-Kernel-JB-4.3"},"path":{"kind":"string","value":"tools/perf/scripts/python/futex-contention.py"},"copies":{"kind":"string","value":"11261"},"size":{"kind":"string","value":"1486"},"content":{"kind":"string","value":"# futex contention\n# (c) 2010, Arnaldo Carvalho de Melo \n# Licensed under the terms of the GNU GPL License version 2\n#\n# Translation of:\n#\n# http://sourceware.org/systemtap/wiki/WSFutexContention\n#\n# to perf python scripting.\n#\n# Measures futex contention\n\nimport os, sys\nsys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\nfrom Util import *\n\nprocess_names = {}\nthread_thislock = {}\nthread_blocktime = {}\n\nlock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time\nprocess_names = {} # long-lived pid-to-execname mapping\n\ndef syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,\n\t\t\t nr, uaddr, op, val, utime, uaddr2, val3):\n\tcmd = op & FUTEX_CMD_MASK\n\tif cmd != FUTEX_WAIT:\n\t\treturn # we don't care about originators of WAKE events\n\n\tprocess_names[tid] = comm\n\tthread_thislock[tid] = uaddr\n\tthread_blocktime[tid] = nsecs(s, ns)\n\ndef syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,\n\t\t\t nr, ret):\n\tif thread_blocktime.has_key(tid):\n\t\telapsed = nsecs(s, ns) - thread_blocktime[tid]\n\t\tadd_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)\n\t\tdel thread_blocktime[tid]\n\t\tdel thread_thislock[tid]\n\ndef trace_begin():\n\tprint \"Press control+C to stop and show the summary\"\n\ndef trace_end():\n\tfor (tid, lock) in lock_waits:\n\t\tmin, max, avg, count = lock_waits[tid, lock]\n\t\tprint \"%s[%d] lock %x contended %d times, %d avg ns\" % \\\n\t\t (process_names[tid], tid, lock, count, avg)\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203674,"cells":{"repo_name":{"kind":"string","value":"gpkulkarni/linux-arm64"},"path":{"kind":"string","value":"scripts/checkkconfigsymbols.py"},"copies":{"kind":"string","value":"88"},"size":{"kind":"string","value":"15783"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n\n\"\"\"Find Kconfig symbols that are referenced but not defined.\"\"\"\n\n# (c) 2014-2015 Valentin Rothberg \n# (c) 2014 Stefan Hengelein \n#\n# Licensed under the terms of the GNU GPL License version 2\n\n\nimport difflib\nimport os\nimport re\nimport signal\nimport sys\nfrom multiprocessing import Pool, cpu_count\nfrom optparse import OptionParser\nfrom subprocess import Popen, PIPE, STDOUT\n\n\n# regex expressions\nOPERATORS = r\"&|\\(|\\)|\\||\\!\"\nFEATURE = r\"(?:\\w*[A-Z0-9]\\w*){2,}\"\nDEF = r\"^\\s*(?:menu){,1}config\\s+(\" + FEATURE + r\")\\s*\"\nEXPR = r\"(?:\" + OPERATORS + r\"|\\s|\" + FEATURE + r\")+\"\nDEFAULT = r\"default\\s+.*?(?:if\\s.+){,1}\"\nSTMT = r\"^\\s*(?:if|select|depends\\s+on|(?:\" + DEFAULT + r\"))\\s+\" + EXPR\nSOURCE_FEATURE = r\"(?:\\W|\\b)+[D]{,1}CONFIG_(\" + FEATURE + r\")\"\n\n# regex objects\nREGEX_FILE_KCONFIG = re.compile(r\".*Kconfig[\\.\\w+\\-]*$\")\nREGEX_FEATURE = re.compile(r'(?!\\B)' + FEATURE + r'(?!\\B)')\nREGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)\nREGEX_KCONFIG_DEF = re.compile(DEF)\nREGEX_KCONFIG_EXPR = re.compile(EXPR)\nREGEX_KCONFIG_STMT = re.compile(STMT)\nREGEX_KCONFIG_HELP = re.compile(r\"^\\s+(help|---help---)\\s*$\")\nREGEX_FILTER_FEATURES = re.compile(r\"[A-Za-z0-9]$\")\nREGEX_NUMERIC = re.compile(r\"0[xX][0-9a-fA-F]+|[0-9]+\")\nREGEX_QUOTES = re.compile(\"(\\\"(.*?)\\\")\")\n\n\ndef parse_options():\n \"\"\"The user interface of this module.\"\"\"\n usage = \"%prog [options]\\n\\n\" \\\n \"Run this tool to detect Kconfig symbols that are referenced but \" \\\n \"not defined in\\nKconfig. The output of this tool has the \" \\\n \"format \\'Undefined symbol\\\\tFile list\\'\\n\\n\" \\\n \"If no option is specified, %prog will default to check your\\n\" \\\n \"current tree. Please note that specifying commits will \" \\\n \"\\'git reset --hard\\'\\nyour current tree! You may save \" \\\n \"uncommitted changes to avoid losing data.\"\n\n parser = OptionParser(usage=usage)\n\n parser.add_option('-c', '--commit', dest='commit', action='store',\n default=\"\",\n help=\"Check if the specified commit (hash) introduces \"\n \"undefined Kconfig symbols.\")\n\n parser.add_option('-d', '--diff', dest='diff', action='store',\n default=\"\",\n help=\"Diff undefined symbols between two commits. The \"\n \"input format bases on Git log's \"\n \"\\'commmit1..commit2\\'.\")\n\n parser.add_option('-f', '--find', dest='find', action='store_true',\n default=False,\n help=\"Find and show commits that may cause symbols to be \"\n \"missing. Required to run with --diff.\")\n\n parser.add_option('-i', '--ignore', dest='ignore', action='store',\n default=\"\",\n help=\"Ignore files matching this pattern. Note that \"\n \"the pattern needs to be a Python regex. To \"\n \"ignore defconfigs, specify -i '.*defconfig'.\")\n\n parser.add_option('-s', '--sim', dest='sim', action='store', default=\"\",\n help=\"Print a list of maximum 10 string-similar symbols.\")\n\n parser.add_option('', '--force', dest='force', action='store_true',\n default=False,\n help=\"Reset current Git tree even when it's dirty.\")\n\n (opts, _) = parser.parse_args()\n\n if opts.commit and opts.diff:\n sys.exit(\"Please specify only one option at once.\")\n\n if opts.diff and not re.match(r\"^[\\w\\-\\.]+\\.\\.[\\w\\-\\.]+$\", opts.diff):\n sys.exit(\"Please specify valid input in the following format: \"\n \"\\'commmit1..commit2\\'\")\n\n if opts.commit or opts.diff:\n if not opts.force and tree_is_dirty():\n sys.exit(\"The current Git tree is dirty (see 'git status'). \"\n \"Running this script may\\ndelete important data since it \"\n \"calls 'git reset --hard' for some performance\\nreasons. \"\n \" Please run this script in a clean Git tree or pass \"\n \"'--force' if you\\nwant to ignore this warning and \"\n \"continue.\")\n\n if opts.commit:\n opts.find = False\n\n if opts.ignore:\n try:\n re.match(opts.ignore, \"this/is/just/a/test.c\")\n except:\n sys.exit(\"Please specify a valid Python regex.\")\n\n return opts\n\n\ndef main():\n \"\"\"Main function of this module.\"\"\"\n opts = parse_options()\n\n if opts.sim and not opts.commit and not opts.diff:\n sims = find_sims(opts.sim, opts.ignore)\n if sims:\n print \"%s: %s\" % (yel(\"Similar symbols\"), ', '.join(sims))\n else:\n print \"%s: no similar symbols found\" % yel(\"Similar symbols\")\n sys.exit(0)\n\n # dictionary of (un)defined symbols\n defined = {}\n undefined = {}\n\n if opts.commit or opts.diff:\n head = get_head()\n\n # get commit range\n commit_a = None\n commit_b = None\n if opts.commit:\n commit_a = opts.commit + \"~\"\n commit_b = opts.commit\n elif opts.diff:\n split = opts.diff.split(\"..\")\n commit_a = split[0]\n commit_b = split[1]\n undefined_a = {}\n undefined_b = {}\n\n # get undefined items before the commit\n execute(\"git reset --hard %s\" % commit_a)\n undefined_a, _ = check_symbols(opts.ignore)\n\n # get undefined items for the commit\n execute(\"git reset --hard %s\" % commit_b)\n undefined_b, defined = check_symbols(opts.ignore)\n\n # report cases that are present for the commit but not before\n for feature in sorted(undefined_b):\n # feature has not been undefined before\n if not feature in undefined_a:\n files = sorted(undefined_b.get(feature))\n undefined[feature] = files\n # check if there are new files that reference the undefined feature\n else:\n files = sorted(undefined_b.get(feature) -\n undefined_a.get(feature))\n if files:\n undefined[feature] = files\n\n # reset to head\n execute(\"git reset --hard %s\" % head)\n\n # default to check the entire tree\n else:\n undefined, defined = check_symbols(opts.ignore)\n\n # now print the output\n for feature in sorted(undefined):\n print red(feature)\n\n files = sorted(undefined.get(feature))\n print \"%s: %s\" % (yel(\"Referencing files\"), \", \".join(files))\n\n sims = find_sims(feature, opts.ignore, defined)\n sims_out = yel(\"Similar symbols\")\n if sims:\n print \"%s: %s\" % (sims_out, ', '.join(sims))\n else:\n print \"%s: %s\" % (sims_out, \"no similar symbols found\")\n\n if opts.find:\n print \"%s:\" % yel(\"Commits changing symbol\")\n commits = find_commits(feature, opts.diff)\n if commits:\n for commit in commits:\n commit = commit.split(\" \", 1)\n print \"\\t- %s (\\\"%s\\\")\" % (yel(commit[0]), commit[1])\n else:\n print \"\\t- no commit found\"\n print # new line\n\n\ndef yel(string):\n \"\"\"\n Color %string yellow.\n \"\"\"\n return \"\\033[33m%s\\033[0m\" % string\n\n\ndef red(string):\n \"\"\"\n Color %string red.\n \"\"\"\n return \"\\033[31m%s\\033[0m\" % string\n\n\ndef execute(cmd):\n \"\"\"Execute %cmd and return stdout. Exit in case of error.\"\"\"\n pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n (stdout, _) = pop.communicate() # wait until finished\n if pop.returncode != 0:\n sys.exit(stdout)\n return stdout\n\n\ndef find_commits(symbol, diff):\n \"\"\"Find commits changing %symbol in the given range of %diff.\"\"\"\n commits = execute(\"git log --pretty=oneline --abbrev-commit -G %s %s\"\n % (symbol, diff))\n return [x for x in commits.split(\"\\n\") if x]\n\n\ndef tree_is_dirty():\n \"\"\"Return true if the current working tree is dirty (i.e., if any file has\n been added, deleted, modified, renamed or copied but not committed).\"\"\"\n stdout = execute(\"git status --porcelain\")\n for line in stdout:\n if re.findall(r\"[URMADC]{1}\", line[:2]):\n return True\n return False\n\n\ndef get_head():\n \"\"\"Return commit hash of current HEAD.\"\"\"\n stdout = execute(\"git rev-parse HEAD\")\n return stdout.strip('\\n')\n\n\ndef partition(lst, size):\n \"\"\"Partition list @lst into eveni-sized lists of size @size.\"\"\"\n return [lst[i::size] for i in xrange(size)]\n\n\ndef init_worker():\n \"\"\"Set signal handler to ignore SIGINT.\"\"\"\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n\ndef find_sims(symbol, ignore, defined = []):\n \"\"\"Return a list of max. ten Kconfig symbols that are string-similar to\n @symbol.\"\"\"\n if defined:\n return sorted(difflib.get_close_matches(symbol, set(defined), 10))\n\n pool = Pool(cpu_count(), init_worker)\n kfiles = []\n for gitfile in get_files():\n if REGEX_FILE_KCONFIG.match(gitfile):\n kfiles.append(gitfile)\n\n arglist = []\n for part in partition(kfiles, cpu_count()):\n arglist.append((part, ignore))\n\n for res in pool.map(parse_kconfig_files, arglist):\n defined.extend(res[0])\n\n return sorted(difflib.get_close_matches(symbol, set(defined), 10))\n\n\ndef get_files():\n \"\"\"Return a list of all files in the current git directory.\"\"\"\n # use 'git ls-files' to get the worklist\n stdout = execute(\"git ls-files\")\n if len(stdout) > 0 and stdout[-1] == \"\\n\":\n stdout = stdout[:-1]\n\n files = []\n for gitfile in stdout.rsplit(\"\\n\"):\n if \".git\" in gitfile or \"ChangeLog\" in gitfile or \\\n \".log\" in gitfile or os.path.isdir(gitfile) or \\\n gitfile.startswith(\"tools/\"):\n continue\n files.append(gitfile)\n return files\n\n\ndef check_symbols(ignore):\n \"\"\"Find undefined Kconfig symbols and return a dict with the symbol as key\n and a list of referencing files as value. Files matching %ignore are not\n checked for undefined symbols.\"\"\"\n pool = Pool(cpu_count(), init_worker)\n try:\n return check_symbols_helper(pool, ignore)\n except KeyboardInterrupt:\n pool.terminate()\n pool.join()\n sys.exit(1)\n\n\ndef check_symbols_helper(pool, ignore):\n \"\"\"Helper method for check_symbols(). Used to catch keyboard interrupts in\n check_symbols() in order to properly terminate running worker processes.\"\"\"\n source_files = []\n kconfig_files = []\n defined_features = []\n referenced_features = dict() # {file: [features]}\n\n for gitfile in get_files():\n if REGEX_FILE_KCONFIG.match(gitfile):\n kconfig_files.append(gitfile)\n else:\n if ignore and not re.match(ignore, gitfile):\n continue\n # add source files that do not match the ignore pattern\n source_files.append(gitfile)\n\n # parse source files\n arglist = partition(source_files, cpu_count())\n for res in pool.map(parse_source_files, arglist):\n referenced_features.update(res)\n\n\n # parse kconfig files\n arglist = []\n for part in partition(kconfig_files, cpu_count()):\n arglist.append((part, ignore))\n for res in pool.map(parse_kconfig_files, arglist):\n defined_features.extend(res[0])\n referenced_features.update(res[1])\n defined_features = set(defined_features)\n\n # inverse mapping of referenced_features to dict(feature: [files])\n inv_map = dict()\n for _file, features in referenced_features.iteritems():\n for feature in features:\n inv_map[feature] = inv_map.get(feature, set())\n inv_map[feature].add(_file)\n referenced_features = inv_map\n\n undefined = {} # {feature: [files]}\n for feature in sorted(referenced_features):\n # filter some false positives\n if feature == \"FOO\" or feature == \"BAR\" or \\\n feature == \"FOO_BAR\" or feature == \"XXX\":\n continue\n if feature not in defined_features:\n if feature.endswith(\"_MODULE\"):\n # avoid false positives for kernel modules\n if feature[:-len(\"_MODULE\")] in defined_features:\n continue\n undefined[feature] = referenced_features.get(feature)\n return undefined, defined_features\n\n\ndef parse_source_files(source_files):\n \"\"\"Parse each source file in @source_files and return dictionary with source\n files as keys and lists of references Kconfig symbols as values.\"\"\"\n referenced_features = dict()\n for sfile in source_files:\n referenced_features[sfile] = parse_source_file(sfile)\n return referenced_features\n\n\ndef parse_source_file(sfile):\n \"\"\"Parse @sfile and return a list of referenced Kconfig features.\"\"\"\n lines = []\n references = []\n\n if not os.path.exists(sfile):\n return references\n\n with open(sfile, \"r\") as stream:\n lines = stream.readlines()\n\n for line in lines:\n if not \"CONFIG_\" in line:\n continue\n features = REGEX_SOURCE_FEATURE.findall(line)\n for feature in features:\n if not REGEX_FILTER_FEATURES.search(feature):\n continue\n references.append(feature)\n\n return references\n\n\ndef get_features_in_line(line):\n \"\"\"Return mentioned Kconfig features in @line.\"\"\"\n return REGEX_FEATURE.findall(line)\n\n\ndef parse_kconfig_files(args):\n \"\"\"Parse kconfig files and return tuple of defined and references Kconfig\n symbols. Note, @args is a tuple of a list of files and the @ignore\n pattern.\"\"\"\n kconfig_files = args[0]\n ignore = args[1]\n defined_features = []\n referenced_features = dict()\n\n for kfile in kconfig_files:\n defined, references = parse_kconfig_file(kfile)\n defined_features.extend(defined)\n if ignore and re.match(ignore, kfile):\n # do not collect references for files that match the ignore pattern\n continue\n referenced_features[kfile] = references\n return (defined_features, referenced_features)\n\n\ndef parse_kconfig_file(kfile):\n \"\"\"Parse @kfile and update feature definitions and references.\"\"\"\n lines = []\n defined = []\n references = []\n skip = False\n\n if not os.path.exists(kfile):\n return defined, references\n\n with open(kfile, \"r\") as stream:\n lines = stream.readlines()\n\n for i in range(len(lines)):\n line = lines[i]\n line = line.strip('\\n')\n line = line.split(\"#\")[0] # ignore comments\n\n if REGEX_KCONFIG_DEF.match(line):\n feature_def = REGEX_KCONFIG_DEF.findall(line)\n defined.append(feature_def[0])\n skip = False\n elif REGEX_KCONFIG_HELP.match(line):\n skip = True\n elif skip:\n # ignore content of help messages\n pass\n elif REGEX_KCONFIG_STMT.match(line):\n line = REGEX_QUOTES.sub(\"\", line)\n features = get_features_in_line(line)\n # multi-line statements\n while line.endswith(\"\\\\\"):\n i += 1\n line = lines[i]\n line = line.strip('\\n')\n features.extend(get_features_in_line(line))\n for feature in set(features):\n if REGEX_NUMERIC.match(feature):\n # ignore numeric values\n continue\n references.append(feature)\n\n return defined, references\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203675,"cells":{"repo_name":{"kind":"string","value":"denisff/python-for-android"},"path":{"kind":"string","value":"python-build/python-libs/gdata/src/gdata/tlslite/integration/TLSSocketServerMixIn.py"},"copies":{"kind":"string","value":"320"},"size":{"kind":"string","value":"2203"},"content":{"kind":"string","value":"\"\"\"TLS Lite + SocketServer.\"\"\"\n\nfrom gdata.tlslite.TLSConnection import TLSConnection\n\nclass TLSSocketServerMixIn:\n \"\"\"\n This class can be mixed in with any L{SocketServer.TCPServer} to\n add TLS support.\n\n To use this class, define a new class that inherits from it and\n some L{SocketServer.TCPServer} (with the mix-in first). Then\n implement the handshake() method, doing some sort of server\n handshake on the connection argument. If the handshake method\n returns True, the RequestHandler will be triggered. Below is a\n complete example of a threaded HTTPS server::\n\n from SocketServer import *\n from BaseHTTPServer import *\n from SimpleHTTPServer import *\n from tlslite.api import *\n\n s = open(\"./serverX509Cert.pem\").read()\n x509 = X509()\n x509.parse(s)\n certChain = X509CertChain([x509])\n\n s = open(\"./serverX509Key.pem\").read()\n privateKey = parsePEMKey(s, private=True)\n\n sessionCache = SessionCache()\n\n class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,\n HTTPServer):\n def handshake(self, tlsConnection):\n try:\n tlsConnection.handshakeServer(certChain=certChain,\n privateKey=privateKey,\n sessionCache=sessionCache)\n tlsConnection.ignoreAbruptClose = True\n return True\n except TLSError, error:\n print \"Handshake failure:\", str(error)\n return False\n\n httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)\n httpd.serve_forever()\n \"\"\"\n\n\n def finish_request(self, sock, client_address):\n tlsConnection = TLSConnection(sock)\n if self.handshake(tlsConnection) == True:\n self.RequestHandlerClass(tlsConnection, client_address, self)\n tlsConnection.close()\n\n #Implement this method to do some form of handshaking. Return True\n #if the handshake finishes properly and the request is authorized.\n def handshake(self, tlsConnection):\n raise NotImplementedError()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203676,"cells":{"repo_name":{"kind":"string","value":"JohnGeorgiadis/invenio"},"path":{"kind":"string","value":"invenio/modules/jsonalchemy/jsonext/readers/json_reader.py"},"copies":{"kind":"string","value":"17"},"size":{"kind":"string","value":"3532"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2013 CERN.\n#\n# Invenio is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Invenio is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Invenio; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"Json Reader.\"\"\"\n\nimport re\n\nfrom invenio.modules.jsonalchemy.reader import ModelParser\nfrom invenio.modules.jsonalchemy.reader import Reader\n\n\nclass JsonReader(Reader):\n \"\"\"JSON reader.\"\"\"\n\n __master_format__ = 'json'\n\n @staticmethod\n def split_blob(blob, schema=None, **kwargs):\n \"\"\"\n In case of several objs inside the blob this method specify how to\n split then and work one by one afterwards.\n \"\"\"\n return blob.splitlines()\n\n def _prepare_blob(self):\n \"\"\"\n\n \"\"\"\n model_fields = ModelParser.resolve_models(\n self._json.model_info.names,\n self._json.additional_info.namespace).get('fields', {})\n model_json_ids = list(model_fields.keys())\n model_field_names = list(model_fields.values())\n for key in list(self._blob.keys()):\n if key in model_field_names and key not in model_json_ids:\n _key = model_json_ids[model_field_names.index(key)]\n self._blob[_key] = self._blob[key]\n del self._blob[key]\n\n def _get_elements_from_blob(self, regex_key):\n if regex_key in ('entire_record', '*'):\n return self._blob\n elements = []\n for k in regex_key:\n regex = re.compile(k)\n keys = filter(regex.match, self._blob.keys())\n values = []\n for key in keys:\n values.append(self._blob.get(key))\n elements.extend(values)\n return elements\n\n def _unpack_rule(self, json_id, field_name=None):\n super(JsonReader, self)._unpack_rule(json_id, field_name)\n\n def _apply_virtual_rules(self, json_id, field_name, rule):\n \"\"\"JSON if a bit special as you can set the value of this fields\"\"\"\n if json_id in self._blob:\n field_defs = []\n field_defs.append(('calculated',\n rule['rules'].get('calculated', [])))\n field_defs.append(('derived', rule['rules'].get('derived', [])))\n for (field_type, ffield_def) in field_defs:\n for field_def in ffield_def:\n info = self._find_field_metadata(json_id, field_name,\n field_type, field_def)\n self._json['__meta_metadata__'][field_name] = info\n self._json.__setitem__(field_name, self._blob[json_id],\n extend=False,\n exclude=['decorators', 'extensions'])\n return\n else:\n super(JsonReader, self)._apply_virtual_rules(json_id, field_name,\n rule)\n\n\nreader = JsonReader\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203677,"cells":{"repo_name":{"kind":"string","value":"bollwyvl/nbviewer"},"path":{"kind":"string","value":"tasks.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"2576"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport shutil\nimport tempfile\n\nimport invoke\n\nfrom notebook import DEFAULT_STATIC_FILES_PATH\n\n\nAPP_ROOT = os.path.dirname(__file__)\nNPM_BIN = os.path.join(APP_ROOT, \"node_modules\", \".bin\")\n\n\n@invoke.task\ndef test():\n invoke.run(\"nosetests -v\")\n\n\n@invoke.task\ndef bower():\n invoke.run(\n \"cd {}/nbviewer/static &&\".format(APP_ROOT) +\n \" {}/bower install\".format(NPM_BIN) +\n \" --config.interactive=false --allow-root\"\n )\n\n\n@invoke.task\ndef less(debug=False):\n if debug:\n extra = \"--source-map\"\n else:\n extra = \" --clean-css='--s1 --advanced --compatibility=ie8'\"\n\n\n tmpl = (\n \"cd {}/nbviewer/static/less \".format(APP_ROOT) +\n \" && {}/lessc\".format(NPM_BIN) +\n \" {1} \"\n \" --include-path={2}\"\n \" --autoprefix='> 1%'\"\n \" {0}.less ../build/{0}.css\"\n )\n\n args = (extra, DEFAULT_STATIC_FILES_PATH)\n\n [\n invoke.run(tmpl.format(less_file, *args))\n for less_file in [\"styles\", \"notebook\", \"slides\"]\n ]\n\n\n@invoke.task\ndef screenshots(root=\"http://localhost:5000/\", dest=\"./screenshots\"):\n dest = os.path.abspath(dest)\n\n script = \"\"\"\n root = \"{root}\"\n\n urls = ({{name, url}} for name, url of {{\n home: \"\"\n dir: \"github/ipython/ipython/tree/3.x/examples/\"\n user: \"github/ipython/\"\n gists: \"gist/fperez/\"\n notebook: \"github/ipython/ipython/blob/3.x/examples/Notebook/Notebook%20Basics.ipynb\"}})\n\n screens = ({{name, w, h}} for name, [w, h] of {{\n smartphone_portrait: [320, 480]\n smartphone_landscape: [480, 320]\n tablet_portrait: [768, 1024]\n tablet_landscape: [1024, 768]\n desktop_standard: [1280, 1024]\n desktop_1080p: [1920, 1080]\n }})\n \n casper.start root\n\n casper.each screens, (_, screen) ->\n @then ->\n @viewport screen.w, screen.h, ->\n _.each urls, (_, page) ->\n @thenOpen root + page.url, ->\n @wait 1000\n @then ->\n @echo \"#{{page.name}} #{{screen.name}}\"\n @capture \"{dest}/#{{page.name}}-#{{screen.name}}.png\"\n\n casper.run()\n \"\"\".format(root=root, dest=dest)\n \n tmpdir = tempfile.mkdtemp()\n tmpfile = os.path.join(tmpdir, \"screenshots.coffee\")\n with open(tmpfile, \"w+\") as f:\n f.write(script)\n invoke.run(\"casperjs test {script}\".format(script=tmpfile))\n \n shutil.rmtree(tmpdir)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203678,"cells":{"repo_name":{"kind":"string","value":"scattering/ipeek"},"path":{"kind":"string","value":"server/plot_dcs.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3478"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport h5py\nimport simplejson\nimport os\nimport numpy as np\n#import matplotlib.pyplot as plt\nfrom time import time\n\ndef Elam(lam):\n \"\"\"\n convert wavelength in angstroms to energy in meV\n \"\"\"\n return 81.81/lam**2\n\ndef Ek(k):\n \"\"\"\n convert wave-vector in inver angstroms to energy in meV\n \"\"\"\n return 2.072*k**2\n\ndef kE(E):\n return np.sqrt(E/2.072)\n\ndef Qfunc(ki, kf, theta):\n \"\"\"\n evaluate the magnitude of Q from ki, kf, and theta\n theta is the angle between kf and ki, sometimes called 2 theta, units of degrees\n \"\"\"\n return np.sqrt( ki**2 + kf**2 - 2*ki*kf*np.cos(theta*np.pi/180) )\n\ndef Ef_from_timechannel(timeChannel, t_SD_min, speedRatDenom, masterSpeed):\n \"\"\"\n using the parameters\n t_SD_min = minimum sample to detector time\n speedRatDenom = to set FOL chopper speed\n masterSpeed = chopper speed (except for FOL chopper)\n using the variabl\n timeChannel, where I am numbering from 1 \n \"\"\"\n return 8.41e7 / (t_SD_min + (timeChannel+1)* (6e4 *(speedRatDenom/masterSpeed)) )**2\n\ndef process_raw_dcs(data_path):\n # t0 = time()\n os.chdir(data_path) # change working directory\n detInfo = np.genfromtxt('dcs_detector_info.txt', skip_header=1, skip_footer=17)\n detToTwoTheta = detInfo[:,9] # 10th column\n\n\n os.system('gzip -dc livedata.dcs.gz > livedata.dcs')\n #os.system('C:\\\\Software\\\\Octave-3.6.4\\\\bin\\\\octave --eval \"load livedata.dcs; save -hdf5 livedata.hdf;\"')\n os.system('octave --eval \"load livedata.dcs; save -hdf5 livedata.hdf;\"') \n f = h5py.File('livedata.hdf')\n data = f['histodata']['value'].value\n\n ch_wl = f['ch_wl']['value'].value\n Ei = Elam(ch_wl)\n ki = kE(Ei)\n dE =0.5*(-0.10395+0.05616 *Ei+0.00108 *Ei**2) #take the putative resolution and halve it\n masterSpeed = f['ch_ms']['value'].value\n speedRatDenom = f['ch_srdenom']['value'].value\n t_SD_min = f['tsdmin']['value'].value\n\n Q_max = Qfunc(ki,ki,150)\n Q_min = 0\n E_bins = np.linspace(-Ei, Ei, int(2*Ei/dE) )\n Q_bins = np.linspace(Q_min,Q_max,301)\n\n #for every point in {timechannel, detectorchannel} space, map into a bin of {E,Q} space\n #remember, data is organized as data[detectorchannel][timechannel]\n i,j = np.indices(data.shape)\n ef = Ef_from_timechannel(j, t_SD_min, speedRatDenom, masterSpeed)\n Q_ = Qfunc(ki, kE(ef), detToTwoTheta[:, None])\n\n E_transfer = Ei-ef\n E_mask = (E_transfer > -Ei)\n\n EQ_data, xedges, yedges = np.histogram2d(Q_[E_mask], E_transfer[E_mask], bins=(Q_bins, E_bins), range=([Q_min,Q_max], [-Ei, Ei]), weights=data[E_mask])\n\n\n stop_date = ''.join(chr(a) for a in f['stop_date']['value'].value.flatten())\n start_date = ''.join(chr(a) for a in f['start_date']['value'].value.flatten())\n \n output = {\n \"title\": \"DCS snapshot\",\n \"dims\": {\n \"ymin\": -Ei,\n \"ymax\": Ei,\n \"ydim\": EQ_data.shape[1],\n \"xmin\": 0,\n \"xmax\": Q_max,\n \"xdim\": EQ_data.shape[0],\n \"zmin\": EQ_data.min(),\n \"zmax\": EQ_data.max()\n },\n \"type\": \"2d\",\n \"ylabel\": \"Ei-Ef [meV]\",\n \"xlabel\": \"|Q| [Å⁻¹]\",\n \"z\": [EQ_data.T.tolist()],\n \"options\": {},\n \"metadata\": {\n \"stop_date\": stop_date,\n \"start_date\": start_date\n }\n }\n\n #print time()-t0\n return simplejson.dumps([output])\n\n\n"},"license":{"kind":"string","value":"unlicense"}}},{"rowIdx":203679,"cells":{"repo_name":{"kind":"string","value":"autodrive/utils3"},"path":{"kind":"string","value":"utils3/tests_remote/tests_remote.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4137"},"content":{"kind":"string","value":"import os\nimport unittest\n\nfrom .. import git_util\n\n\nclass TestGitUtilRemotes(unittest.TestCase):\n def test_get_remote_branch_list(self):\n # function under test\n result_set = set(git_util.get_remote_branch_list())\n # sample file in the test script folder\n filename = os.path.join(os.path.split(__file__)[0], 'remote_branch_list.txt')\n\n pattern_str = 'heads'\n\n # get sample file\n if os.path.exists(filename):\n with open(filename, 'r') as f:\n tags_list = [tag_str.strip() for tag_str in f.readlines()]\n else:\n # if file missing\n # make a list from git ls-remote\n Warning('''file %s might be missing\nmake a list from git ls-remote''' % (filename))\n result_txt = git_util.git('ls-remote --%s' % pattern_str)\n result_line_list = result_txt.splitlines()\n\n if result_line_list[0].startswith('From '):\n result_line_list.pop(0)\n\n tags_list = []\n with open(filename, 'w') as f_out:\n\n # build list of expected tags\n for line_txt in result_line_list:\n line_split_list = line_txt.split()\n # filter remote tags\n filtered_line_split_list = [txt for txt in line_split_list if\n txt.startswith('refs/%s/' % pattern_str)\n and (not txt.endswith('^{}'))]\n if filtered_line_split_list:\n for tag_item in filtered_line_split_list:\n tag_items = tag_item.split('/')[2:]\n tag_txt = '/'.join(tag_items)\n f_out.write(tag_txt + '\\n')\n tags_list.append(tag_txt)\n # finished making a list from git ls-remote\n\n expected_set = set(tags_list)\n\n self.assertFalse(expected_set - result_set, msg='''\nexpected set = %r\nresult_set = %r\n'''%(expected_set, result_set))\n\n def test_get_remote_tag_list(self):\n result_list = git_util.get_remote_tag_list()\n result_set = set(result_list)\n\n input_file_name = os.path.join(os.path.split(__file__)[0], 'tags_list.txt')\n\n if os.path.exists(input_file_name):\n with open(input_file_name, 'r') as f:\n tags_list = [tag_str.strip() for tag_str in f.readlines()]\n else:\n print('''test_get_remote_tag_list() : file %s might be missing\nmake a list from git ls-remote''' % (input_file_name))\n result_txt = git_util.git('ls-remote --tags')\n result_line_list = result_txt.splitlines()\n\n if result_line_list[0].startswith('From '):\n result_line_list.pop(0)\n\n tags_list = []\n with open(input_file_name, 'w') as f_out:\n\n # build list of expected tags\n for line_txt in result_line_list:\n line_split_list = line_txt.split()\n # filter remote tags\n filtered_line_split_list = [txt for txt in line_split_list if txt.startswith('refs/tags/')\n and (not txt.endswith('^{}'))]\n if filtered_line_split_list:\n for tag_item in filtered_line_split_list:\n tag_items = tag_item.split('/')[2:]\n tag_txt = '/'.join(tag_items)\n f_out.write(tag_txt + '\\n')\n tags_list.append(tag_txt)\n # finished making a list from git ls-remote\n\n expected_set = set(tags_list)\n\n self.assertFalse(expected_set - result_set, msg='''\nexpected set = %r\nresult_set = %r\n'''%(expected_set, result_set))\n\n def test_is_branch_in_remote_branch_list(self):\n self.assertTrue(git_util.is_branch_in_remote_branch_list('master', 'origin', False))\n self.assertFalse(git_util.is_branch_in_remote_branch_list('__m_a_s_t_e_r__', 'origin', False))\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203680,"cells":{"repo_name":{"kind":"string","value":"ravindrasingh22/ansible"},"path":{"kind":"string","value":"v1/ansible/runner/connection_plugins/local.py"},"copies":{"kind":"string","value":"110"},"size":{"kind":"string","value":"5581"},"content":{"kind":"string","value":"# (c) 2012, Michael DeHaan \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nimport traceback\nimport os\nimport pipes\nimport shutil\nimport subprocess\nimport select\nimport fcntl\nfrom ansible import errors\nfrom ansible import utils\nfrom ansible.callbacks import vvv\n\n\nclass Connection(object):\n ''' Local based connections '''\n\n def __init__(self, runner, host, port, *args, **kwargs):\n self.runner = runner\n self.host = host\n # port is unused, since this is local\n self.port = port\n self.has_pipelining = False\n\n # TODO: add su(needs tty), pbrun, pfexec\n self.become_methods_supported=['sudo']\n\n def connect(self, port=None):\n ''' connect to the local host; nothing to do here '''\n\n return self\n\n def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):\n ''' run a command on the local host '''\n\n # su requires to be run from a terminal, and therefore isn't supported here (yet?)\n if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:\n raise errors.AnsibleError(\"Internal Error: this module does not support running commands via %s\" % self.runner.become_method)\n\n if in_data:\n raise errors.AnsibleError(\"Internal Error: this module does not support optimized module pipelining\")\n\n if self.runner.become and sudoable:\n local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe)\n else:\n if executable:\n local_cmd = executable.split() + ['-c', cmd]\n else:\n local_cmd = cmd\n executable = executable.split()[0] if executable else None\n\n vvv(\"EXEC %s\" % (local_cmd), host=self.host)\n p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),\n cwd=self.runner.basedir, executable=executable,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if self.runner.become and sudoable and self.runner.become_pass:\n fcntl.fcntl(p.stdout, fcntl.F_SETFL,\n fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)\n fcntl.fcntl(p.stderr, fcntl.F_SETFL,\n fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)\n become_output = ''\n while success_key not in become_output:\n\n if prompt and become_output.endswith(prompt):\n break\n if utils.su_prompts.check_su_prompt(become_output):\n break\n\n rfd, wfd, efd = select.select([p.stdout, p.stderr], [],\n [p.stdout, p.stderr], self.runner.timeout)\n if p.stdout in rfd:\n chunk = p.stdout.read()\n elif p.stderr in rfd:\n chunk = p.stderr.read()\n else:\n stdout, stderr = p.communicate()\n raise errors.AnsibleError('timeout waiting for %s password prompt:\\n' % self.runner.become_method + become_output)\n if not chunk:\n stdout, stderr = p.communicate()\n raise errors.AnsibleError('%s output closed while waiting for password prompt:\\n' % self.runner.become_method + become_output)\n become_output += chunk\n if success_key not in become_output:\n p.stdin.write(self.runner.become_pass + '\\n')\n fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)\n fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)\n\n stdout, stderr = p.communicate()\n return (p.returncode, '', stdout, stderr)\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to local '''\n\n vvv(\"PUT %s TO %s\" % (in_path, out_path), host=self.host)\n if not os.path.exists(in_path):\n raise errors.AnsibleFileNotFound(\"file or module does not exist: %s\" % in_path)\n try:\n shutil.copyfile(in_path, out_path)\n except shutil.Error:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to copy: %s and %s are the same\" % (in_path, out_path))\n except IOError:\n traceback.print_exc()\n raise errors.AnsibleError(\"failed to transfer file to %s\" % out_path)\n\n def fetch_file(self, in_path, out_path):\n vvv(\"FETCH %s TO %s\" % (in_path, out_path), host=self.host)\n ''' fetch a file from local to local -- for copatibility '''\n self.put_file(in_path, out_path)\n\n def close(self):\n ''' terminate the connection; nothing to do here '''\n pass\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203681,"cells":{"repo_name":{"kind":"string","value":"rg3915/django-basic-apps"},"path":{"kind":"string","value":"basic/music/urls.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"1276"},"content":{"kind":"string","value":"from django.conf.urls.defaults import *\n\n\nurlpatterns = patterns('basic.music.views',\n url(r'^genres/(?P[-\\w]+)/$',\n view='genre_detail',\n name='music_genre_detail',\n ),\n url (r'^genres/$',\n view='genre_list',\n name='music_genre_list',\n ),\n url(r'^labels/(?P[-\\w]+)/$',\n view='label_detail',\n name='music_label_detail',\n ),\n url (r'^labels/$',\n view='label_list',\n name='music_label_list',\n ),\n url(r'^bands/(?P[-\\w]+)/$',\n view='band_detail',\n name='music_band_detail',\n ),\n url (r'^bands/$',\n view='band_list',\n name='music_band_list',\n ),\n url(r'^albums/(?P[-\\w]+)/$',\n view='album_detail',\n name='music_album_detail',\n ),\n url (r'^albums/$',\n view='album_list',\n name='music_album_list',\n ),\n url(r'^tracks/(?P[-\\w]+)/$',\n view='track_detail',\n name='music_track_detail',\n ),\n url (r'^tracks/$',\n view='track_list',\n name='music_track_list',\n ),\n)\n\n\nurlpatterns += patterns('',\n url (r'^$',\n view='django.views.generic.simple.direct_to_template',\n kwargs={'template': 'music/index.html'},\n name='music_index',\n ),\n)"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203682,"cells":{"repo_name":{"kind":"string","value":"pycepa/pycepa"},"path":{"kind":"string","value":"modules/Tor/cell/cell.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11202"},"content":{"kind":"string","value":"from cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom time import time\nfrom datetime import datetime\nimport struct\nimport os\nimport socket\nimport logging\nlog = logging.getLogger(__name__)\n\n# default protocol version before negotiation\nproto_version = 3\n\nclass CellError(Exception):\n \"\"\"\n Generic cell error.\n \"\"\"\n pass\n\nclass TorError(Exception):\n \"\"\"\n Generic tor protocol error.\n \"\"\"\n pass\n\nclass FixedCell(object):\n \"\"\"\n Fixed length cell.\n \"\"\"\n cell_type = -1\n\n def __init__(self, circuit_id=None):\n self.fixed = True\n self.circuit_id = circuit_id or 0\n\n def unpack(self, data):\n self.data = data\n\n def pack(self, data):\n \"\"\"\n Pack the circuit id, cell type, and data.\n \"\"\"\n if proto_version < 4:\n data = struct.pack('>HB509s', self.circuit_id, self.cell_type, data)\n else:\n data = struct.pack('>IB509s', self.circuit_id, self.cell_type, data)\n return data\n\nclass VariableCell(object):\n \"\"\"\n Variable lengthed cell.\n \"\"\"\n cell_type = -1\n\n def __init__(self, circuit_id=None):\n self.fixed = False\n self.circuit_id = circuit_id or 0\n\n def has_len(self):\n \"\"\"\n Returns true if the length header has been parsed.\n \"\"\"\n return hasattr(self, 'length')\n\n def len(self, data=None):\n \"\"\"\n Get or set the length of the cell.\n \"\"\"\n if data:\n self.length = struct.unpack('>H', data[:2])[0]\n elif self.has_len():\n return self.length\n\n def unpack(self, data):\n self.data = data[:self.length]\n\n def pack(self, data):\n \"\"\"\n Pack the circuit id, cell type, length, and data.\n \"\"\"\n if proto_version < 4:\n header = struct.pack('>HBH', self.circuit_id, self.cell_type, len(data))\n else:\n header = struct.pack('>IBH', self.circuit_id, self.cell_type, len(data))\n return header + data\n\nclass Relay(FixedCell):\n \"\"\"\n Relay cell.\n \"\"\"\n cell_type = 3\n\n def get_str(self, include_digest=True):\n \"\"\"\n Returns the packed data without sending so that it can be encrypted.\n \"\"\"\n if isinstance(self.data, str):\n return self.data\n\n if not self.data['data']:\n self.data['data'] = ''\n\n if include_digest:\n digest = self.data['digest']\n else:\n digest = '\\x00' * 4\n\n return struct.pack('>BHH4sH498s', self.data['command'], 0,\n self.data['stream_id'], digest, len(self.data['data']), self.data['data'])\n\n def parse(self):\n \"\"\"\n Parse a received relay cell after decryption. This currently can't be implemented\n as a part of the unpack() function because the data must first be decrypted.\n \"\"\"\n headers = struct.unpack('>BHH4sH', self.data[:11])\n self.data = self.data[11:]\n\n if len(self.data) < headers[4] or headers[1]:\n raise CellError('Invalid relay packet (possibly not from this OR).')\n\n try:\n text = relay_commands[headers[0]]\n except IndexError:\n raise CellError('Invalid relay packet command.')\n\n self.data = {\n 'command': headers[0],\n 'command_text': text,\n 'recognized': headers[1],\n 'stream_id': headers[2],\n 'digest': headers[3],\n 'length': headers[4],\n 'data': self.data[:headers[4]]\n }\n\n def pack(self, data):\n return super(Relay, self).pack(self.data)\n\n def init_relay(self, data):\n \"\"\"\n Set the relay cell data.\n \"\"\"\n self.data = data\n\nclass Padding(FixedCell):\n \"\"\"\n Padding cell.\n \"\"\"\n cell_type = 0\n\nclass Destroy(FixedCell):\n \"\"\"\n Destroy cell.\n \"\"\"\n cell_type = 4\n\n def unpack(self, data):\n super(Destroy, self).unpack(data)\n\n reason = struct.unpack('>B', self.data[0])[0]\n reasons = [\n 'No reason given.', 'Tor protocol violation.', 'Internal error.',\n 'A client sent a TRUNCATE command.',\n 'Not currently operating; trying to save bandwidth.',\n 'Out of memory, sockets, or circuit IDs.',\n 'Unable to reach relay.',\n 'Connected to relay, but its OR identity was not as expected.',\n 'The OR connection that was carrying this circuit died.',\n 'The circuit has expired for being dirty or old.'\n 'Circuit construction took too long.',\n 'The circuit was destroyed w/o client TRUNCATE.',\n 'Request for unknown hidden service.'\n ]\n\n raise TorError('Circuit closed: %s' % reasons[reason])\n\nclass CreateFast(FixedCell):\n \"\"\"\n CreateFast cell.\n \"\"\"\n cell_type = 5\n\n def __init__(self, circuit_id=None):\n super(CreateFast, self).__init__(circuit_id=circuit_id)\n self.key_material = os.urandom(20)\n\n def pack(self, data):\n data = struct.pack('>20s', self.key_material)\n return super(CreateFast, self).pack(data)\n\nclass CreatedFast(FixedCell):\n \"\"\"\n CreatedFast cell.\n \"\"\"\n cell_type = 6\n\n def unpack(self, data):\n \"\"\"\n Unpack the key material.\n \"\"\"\n super(CreatedFast, self).unpack(data)\n self.key_material, self.derivative_key = struct.unpack('>20s20s', self.data[:40])\n\nclass Versions(VariableCell):\n \"\"\"\n Versions cell.\n \"\"\"\n cell_type = 7\n\n def unpack(self, data):\n \"\"\"\n Parse the received versions.\n \"\"\"\n super(Versions, self).unpack(data)\n self.versions = struct.unpack('>' + 'H' * int(len(self.data) / 2), self.data)\n\n def pack(self, data):\n \"\"\"\n Pack our known versions.\n \"\"\"\n data = struct.pack('>HH', 3,4)\n return super(Versions, self).pack(data)\n\nclass Netinfo(FixedCell):\n \"\"\"\n Netinfo cell.\n \"\"\"\n cell_type = 8\n\n def unpack(self, data):\n \"\"\"\n Parse out netinfo.\n \"\"\"\n super(Netinfo, self).unpack(data)\n\n data = self.data\n time = struct.unpack('>I', data[:4])[0]\n data = data[4:]\n\n # decode our IP address\n host_type, address, data = self.decode_ip(data)\n self.our_address = address\n\n self.router_addresses = []\n\n # iterate over OR addresses.\n num_addresses = data[0]\n if not isinstance(num_addresses, int):\n num_addresses = struct.unpack('B', num_addresses)[0]\n\n data = data[1:]\n for _ in range(num_addresses):\n host_type, address, data = self.decode_ip(data)\n self.router_addresses.append(address)\n\n def decode_ip(self, data):\n \"\"\"\n Decode IPv4 and IPv6 addresses.\n \"\"\"\n host_type, size = struct.unpack('>BB', data[:2])\n data = data[2:]\n\n address = struct.unpack('>%ds' % size, data[:size])[0]\n data = data[size:]\n\n if host_type == 4:\n address = socket.inet_ntop(socket.AF_INET, address)\n elif host_type == 6:\n address = socket.inet_ntop(socket.AF_INET6, address)\n else:\n raise CellError('Do we allow hostnames in NETINFO?')\n\n return host_type, address, data\n\n def pack(self, data):\n \"\"\"\n Pack our own netinfo.\n \"\"\"\n ips = data\n\n data = struct.pack('>I', int(time()))\n data += self.encode_ip(ips['other'])\n data += struct.pack('>B', 1)\n data += self.encode_ip(ips['me'])\n\n return super(Netinfo, self).pack(data)\n\n def encode_ip(self, ip):\n \"\"\"\n Encode an IPv4 address.\n \"\"\"\n return struct.pack('>BB', 4, 4) + socket.inet_aton(ip)\n\nclass RelayEarly(Relay):\n \"\"\"\n RelayEarly cell.\n \"\"\"\n cell_type = 9\n\nclass Create2(FixedCell):\n \"\"\"\n Create2 cell.\n \"\"\"\n cell_type = 10\n\n def pack(self, data):\n data = struct.pack('>HH', 0x2, len(data)) + data\n return super(Create2, self).pack(data)\n\nclass Created2(FixedCell):\n \"\"\"\n Created2 cell.\n \"\"\"\n cell_type = 11\n\n def unpack(self, data):\n super(Created2, self).unpack(data)\n length, self.Y, self.auth = struct.unpack('>H32s32s', data[:66])\n\nclass Certs(VariableCell):\n \"\"\"\n Certs cell.\n \"\"\"\n cell_type = 129\n\n def unpack(self, data):\n \"\"\"\n Unpack a certs cell. Parses out all of the send certs and does *very* basic\n validation.\n \"\"\"\n super(Certs, self).unpack(data)\n\n data = self.data\n\n num_certs = data[0]\n if not isinstance(num_certs, int):\n num_certs = struct.unpack('>B', num_certs)[0]\n\n data = data[1:]\n\n now = datetime.now().strftime('%Y%m%d%H%M%S%z')\n\n self.certs = {}\n for _ in range(num_certs):\n # get cert type and length\n cert_info = struct.unpack('>BH', data[:3])\n data = data[3:]\n\n # unpack the cert\n cert_type = cert_info[0]\n cert = struct.unpack('>%ds' % cert_info[1], data[:cert_info[1]])[0]\n data = data[cert_info[1]:]\n\n # we only want one of each certificate type\n if cert_type in self.certs or int(cert_type) > 3:\n raise CellError('Duplicate or invalid certificate received.')\n\n # load the certificate and check expiration.\n # cert = crypto.load_certificate(crypto.FILETYPE_ASN1, cert)\n cert = x509.load_der_x509_certificate(cert, default_backend())\n now = datetime.now()\n if cert.not_valid_before > now or cert.not_valid_after < now:\n log.error('got invalid certificate date.')\n raise CellError('Certificate expired.')\n\n self.certs[cert_type] = cert\n log.info('got cert type %d, hash: %s' % (cert_type, cert))\n\nclass AuthChallenge(VariableCell):\n \"\"\"\n AuthChallenge cell.\n \"\"\"\n cell_type = 130\n\n def unpack(self, data):\n \"\"\"\n Unpack the auth challenge. Currently not doing anything with it.\n \"\"\"\n super(AuthChallenge, self).unpack(data)\n\n struct.unpack('>32sH', self.data[:34])\n\ndef cell_type_to_name(cell_type):\n \"\"\"\n Convert a cell type to its name.\n \"\"\"\n if cell_type in cell_types:\n return cell_types[cell_type].__name__\n else:\n return ''\n\ndef relay_name_to_command(name):\n \"\"\"\n Converts relay name to a command.\n \"\"\"\n if name in relay_commands:\n return relay_commands.index(name)\n else:\n return -1\n\n# List of cell types.\ncell_types = {\n 0: Padding,\n 3: Relay,\n 4: Destroy,\n 5: CreateFast,\n 6: CreatedFast,\n 7: Versions,\n 8: Netinfo,\n 9: RelayEarly,\n 10: Create2,\n 11: Created2,\n 129: Certs,\n 130: AuthChallenge\n}\n\n# List of relay commnads.\nrelay_commands = [\n '', 'RELAY_BEGIN', 'RELAY_DATA', 'RELAY_END', 'RELAY_CONNECTED', 'RELAY_SENDME',\n 'RELAY_EXTEND', 'RELAY_EXTENDED', 'RELAY_TRUNCATE', 'RELAY_TRUNCATED', \n 'RELAY_DROP', 'RELAY_RESOLVE', 'RELAY_RESOLVED', 'RELAY_BEGIN_DIR',\n 'RELAY_EXTEND2', 'RELAY_EXTENDED2'\n]\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203683,"cells":{"repo_name":{"kind":"string","value":"adsorensen/girder"},"path":{"kind":"string","value":"plugins/oauth/server/providers/globus.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"3567"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nfrom six.moves import urllib\n\nfrom girder.api.rest import getApiUrl, RestException\nfrom .base import ProviderBase\nfrom .. import constants\n\n\nclass Globus(ProviderBase):\n _AUTH_URL = 'https://auth.globus.org/v2/oauth2/authorize'\n _AUTH_SCOPES = ['urn:globus:auth:scope:auth.globus.org:view_identities',\n 'openid', 'profile', 'email']\n _TOKEN_URL = 'https://auth.globus.org/v2/oauth2/token'\n _API_USER_URL = 'https://auth.globus.org/v2/oauth2/userinfo'\n\n def getClientIdSetting(self):\n return self.model('setting').get(\n constants.PluginSettings.GLOBUS_CLIENT_ID)\n\n def getClientSecretSetting(self):\n return self.model('setting').get(\n constants.PluginSettings.GLOBUS_CLIENT_SECRET)\n\n @classmethod\n def getUrl(cls, state):\n clientId = cls.model('setting').get(\n constants.PluginSettings.GLOBUS_CLIENT_ID)\n\n if clientId is None:\n raise Exception('No Globus client ID setting is present.')\n\n callbackUrl = '/'.join((getApiUrl(), 'oauth', 'globus', 'callback'))\n\n query = urllib.parse.urlencode({\n 'response_type': 'code',\n 'access_type': 'online',\n 'client_id': clientId,\n 'redirect_uri': callbackUrl,\n 'state': state,\n 'scope': ' '.join(cls._AUTH_SCOPES)\n })\n return '%s?%s' % (cls._AUTH_URL, query)\n\n def getToken(self, code):\n params = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'client_id': self.clientId,\n 'client_secret': self.clientSecret,\n 'redirect_uri': self.redirectUri\n }\n resp = self._getJson(method='POST', url=self._TOKEN_URL,\n data=params,\n headers={'Accept': 'application/json'})\n if 'error' in resp:\n raise RestException(\n 'Got an error exchanging token from provider: \"%s\".' % resp,\n code=502)\n return resp\n\n def getUser(self, token):\n headers = {\n 'Authorization': 'Bearer {}'.format(token['access_token'])\n }\n\n resp = self._getJson(method='GET', url=self._API_USER_URL,\n headers=headers)\n\n oauthId = resp.get('sub')\n if not oauthId:\n raise RestException(\n 'Globus identity did not return a valid ID.', code=502)\n\n email = resp.get('email')\n if not email:\n raise RestException(\n 'Globus identity did not return a valid email.', code=502)\n\n name = resp['name'].split()\n firstName = name[0]\n lastName = name[-1]\n\n return self._createOrReuseUser(oauthId, email, firstName, lastName)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203684,"cells":{"repo_name":{"kind":"string","value":"ebernhardson/l2r"},"path":{"kind":"string","value":"code/bench_formats.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1186"},"content":{"kind":"string","value":"import pandas as pd\nimport feather\nimport os\nimport timeit\n\nimport config\nfrom utils import table_utils\n\ndf = table_utils._read(config.ALL_DATA)\n\nFILE_HDF = os.path.join(config.TMP_DIR, 'test.h5')\nFILE_PICKLE = os.path.join(config.TMP_DIR, 'test.pkl')\nFILE_FEATHER = os.path.join(config.TMP_DIR, 'test.feather')\n\ndef test_hdf_write():\n df.to_hdf(FILE_HDF, 'test', mode='w')\n\ndef test_hdf_read():\n pd.read_hdf(FILE_HDF, 'test')\n\ndef test_pickle_write():\n df.to_pickle(FILE_PICKLE)\n\ndef test_pickle_read():\n pd.read_pickle(FILE_PICKLE)\n\ndef test_feather_write():\n feather.write_dataframe(df.copy(), FILE_FEATHER)\n\ndef test_feather_read():\n feather.read_dataframe(FILE_FEATHER)\n\n\ndef test(func):\n took = timeit.timeit(\"%s()\" % (func.__name__), setup=\"from __main__ import %s\" % (func.__name__), number=3)\n print \"%s: %.3f\" % (func.__name__, took)\n\nif __name__ == \"__main__\":\n res = []\n res.append(test(test_hdf_write))\n res.append(test(test_hdf_read))\n res.append(test(test_pickle_write))\n res.append(test(test_pickle_read))\n res.append(test(test_feather_write))\n res.append(test(test_feather_read))\n print \"\\n\\n\\n\"\n print \"\\n\".join(res)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203685,"cells":{"repo_name":{"kind":"string","value":"manishpatell/erpcustomizationssaiimpex123qwe"},"path":{"kind":"string","value":"addons/gamification/models/__init__.py"},"copies":{"kind":"string","value":"389"},"size":{"kind":"string","value":"1038"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2013 OpenERP SA ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport goal\nimport challenge\nimport res_users\nimport badge\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203686,"cells":{"repo_name":{"kind":"string","value":"mattesno1/Sick-Beard"},"path":{"kind":"string","value":"lib/requests/packages/charade/jpcntx.py"},"copies":{"kind":"string","value":"151"},"size":{"kind":"string","value":"19323"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\r\n# The Original Code is Mozilla Communicator client code.\r\n#\r\n# The Initial Developer of the Original Code is\r\n# Netscape Communications Corporation.\r\n# Portions created by the Initial Developer are Copyright (C) 1998\r\n# the Initial Developer. All Rights Reserved.\r\n#\r\n# Contributor(s):\r\n# Mark Pilgrim - port to Python\r\n#\r\n# This library is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU Lesser General Public\r\n# License as published by the Free Software Foundation; either\r\n# version 2.1 of the License, or (at your option) any later version.\r\n#\r\n# This library is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n# Lesser General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Lesser General Public\r\n# License along with this library; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\r\n# 02110-1301 USA\r\n######################### END LICENSE BLOCK #########################\r\n\r\nfrom .compat import wrap_ord\r\n\r\nNUM_OF_CATEGORY = 6\r\nDONT_KNOW = -1\r\nENOUGH_REL_THRESHOLD = 100\r\nMAX_REL_THRESHOLD = 1000\r\nMINIMUM_DATA_THRESHOLD = 4\r\n\r\n# This is hiragana 2-char sequence table, the number in each cell represents its frequency category\r\njp2CharContext = (\r\n(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),\r\n(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),\r\n(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),\r\n(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),\r\n(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),\r\n(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),\r\n(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),\r\n(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),\r\n(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),\r\n(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),\r\n(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),\r\n(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),\r\n(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),\r\n(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),\r\n(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),\r\n(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),\r\n(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),\r\n(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),\r\n(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),\r\n(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),\r\n(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),\r\n(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),\r\n(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),\r\n(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),\r\n(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),\r\n(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),\r\n(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),\r\n(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),\r\n(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),\r\n(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),\r\n(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),\r\n(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),\r\n(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),\r\n(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),\r\n(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),\r\n(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),\r\n(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),\r\n(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),\r\n(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),\r\n(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),\r\n(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),\r\n(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),\r\n(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),\r\n(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),\r\n(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),\r\n(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),\r\n(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),\r\n(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),\r\n(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),\r\n(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),\r\n(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),\r\n(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),\r\n(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),\r\n(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),\r\n(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),\r\n(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),\r\n(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),\r\n(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),\r\n(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),\r\n(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),\r\n(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),\r\n(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),\r\n(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),\r\n(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),\r\n(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),\r\n(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),\r\n(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),\r\n(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),\r\n(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),\r\n(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),\r\n(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),\r\n(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),\r\n(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),\r\n(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),\r\n(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),\r\n(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),\r\n(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),\r\n(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),\r\n(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),\r\n(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),\r\n(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),\r\n(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),\r\n(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),\r\n)\r\n\r\nclass JapaneseContextAnalysis:\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self._mTotalRel = 0 # total sequence received\r\n # category counters, each interger counts sequence in its category\r\n self._mRelSample = [0] * NUM_OF_CATEGORY\r\n # if last byte in current buffer is not the last byte of a character,\r\n # we need to know how many bytes to skip in next buffer\r\n self._mNeedToSkipCharNum = 0\r\n self._mLastCharOrder = -1 # The order of previous char\r\n # If this flag is set to True, detection is done and conclusion has\r\n # been made\r\n self._mDone = False\r\n\r\n def feed(self, aBuf, aLen):\r\n if self._mDone:\r\n return\r\n\r\n # The buffer we got is byte oriented, and a character may span in more than one\r\n # buffers. In case the last one or two byte in last buffer is not\r\n # complete, we record how many byte needed to complete that character\r\n # and skip these bytes here. We can choose to record those bytes as\r\n # well and analyse the character once it is complete, but since a\r\n # character will not make much difference, by simply skipping\r\n # this character will simply our logic and improve performance.\r\n i = self._mNeedToSkipCharNum\r\n while i < aLen:\r\n order, charLen = self.get_order(aBuf[i:i + 2])\r\n i += charLen\r\n if i > aLen:\r\n self._mNeedToSkipCharNum = i - aLen\r\n self._mLastCharOrder = -1\r\n else:\r\n if (order != -1) and (self._mLastCharOrder != -1):\r\n self._mTotalRel += 1\r\n if self._mTotalRel > MAX_REL_THRESHOLD:\r\n self._mDone = True\r\n break\r\n self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1\r\n self._mLastCharOrder = order\r\n\r\n def got_enough_data(self):\r\n return self._mTotalRel > ENOUGH_REL_THRESHOLD\r\n\r\n def get_confidence(self):\r\n # This is just one way to calculate confidence. It works well for me.\r\n if self._mTotalRel > MINIMUM_DATA_THRESHOLD:\r\n return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel\r\n else:\r\n return DONT_KNOW\r\n\r\n def get_order(self, aBuf):\r\n return -1, 1\r\n\r\nclass SJISContextAnalysis(JapaneseContextAnalysis):\r\n def get_order(self, aBuf):\r\n if not aBuf:\r\n return -1, 1\r\n # find out current char's byte length\r\n first_char = wrap_ord(aBuf[0])\r\n if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):\r\n charLen = 2\r\n else:\r\n charLen = 1\r\n\r\n # return its order if it is hiragana\r\n if len(aBuf) > 1:\r\n second_char = wrap_ord(aBuf[1])\r\n if (first_char == 202) and (0x9F <= second_char <= 0xF1):\r\n return second_char - 0x9F, charLen\r\n\r\n return -1, charLen\r\n\r\nclass EUCJPContextAnalysis(JapaneseContextAnalysis):\r\n def get_order(self, aBuf):\r\n if not aBuf:\r\n return -1, 1\r\n # find out current char's byte length\r\n first_char = wrap_ord(aBuf[0])\r\n if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):\r\n charLen = 2\r\n elif first_char == 0x8F:\r\n charLen = 3\r\n else:\r\n charLen = 1\r\n\r\n # return its order if it is hiragana\r\n if len(aBuf) > 1:\r\n second_char = wrap_ord(aBuf[1])\r\n if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):\r\n return second_char - 0xA1, charLen\r\n\r\n return -1, charLen\r\n\r\n# flake8: noqa\r\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203687,"cells":{"repo_name":{"kind":"string","value":"mdhaber/scipy"},"path":{"kind":"string","value":"scipy/sparse/linalg/eigen/arpack/setup.py"},"copies":{"kind":"string","value":"18"},"size":{"kind":"string","value":"1845"},"content":{"kind":"string","value":"from os.path import join\n\n\ndef configuration(parent_package='',top_path=None):\n from scipy._build_utils.system_info import get_info\n from numpy.distutils.misc_util import Configuration\n from scipy._build_utils import (get_g77_abi_wrappers,\n gfortran_legacy_flag_hook,\n blas_ilp64_pre_build_hook,\n uses_blas64, get_f2py_int64_options)\n\n if uses_blas64():\n lapack_opt = get_info('lapack_ilp64_opt', 2)\n pre_build_hook = (gfortran_legacy_flag_hook,\n blas_ilp64_pre_build_hook(lapack_opt))\n f2py_options = get_f2py_int64_options()\n else:\n lapack_opt = get_info('lapack_opt')\n pre_build_hook = gfortran_legacy_flag_hook\n f2py_options = None\n\n config = Configuration('arpack', parent_package, top_path)\n\n arpack_sources = [join('ARPACK','SRC', '*.f')]\n arpack_sources.extend([join('ARPACK','UTIL', '*.f')])\n\n arpack_sources += get_g77_abi_wrappers(lapack_opt)\n\n config.add_library('arpack_scipy', sources=arpack_sources,\n include_dirs=[join('ARPACK', 'SRC')],\n _pre_build_hook=pre_build_hook)\n\n ext = config.add_extension('_arpack',\n sources=['arpack.pyf.src'],\n libraries=['arpack_scipy'],\n f2py_options=f2py_options,\n extra_info=lapack_opt,\n depends=arpack_sources)\n ext._pre_build_hook = pre_build_hook\n\n config.add_data_dir('tests')\n\n # Add license files\n config.add_data_files('ARPACK/COPYING')\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203688,"cells":{"repo_name":{"kind":"string","value":"dropbox/changes"},"path":{"kind":"string","value":"tests/changes/jobs/test_sync_build.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3772"},"content":{"kind":"string","value":"from __future__ import absolute_import\n\nfrom datetime import datetime\nfrom mock import patch\n\nfrom changes.constants import Status, Result\nfrom changes.config import db\nfrom changes.models.build import Build\nfrom changes.models.itemstat import ItemStat\nfrom changes.jobs.sync_build import sync_build\nfrom changes.testutils import TestCase\n\n\nclass SyncBuildTest(TestCase):\n @patch('changes.config.queue.delay')\n def test_simple(self, queue_delay):\n project = self.create_project()\n build = self.create_build(\n project=project,\n status=Status.unknown,\n result=Result.unknown,\n )\n\n job_a = self.create_job(\n build=build,\n status=Status.finished,\n result=Result.failed,\n duration=5000,\n date_started=datetime(2013, 9, 19, 22, 15, 22),\n date_finished=datetime(2013, 9, 19, 22, 15, 25),\n )\n job_b = self.create_job(\n build=build,\n status=Status.in_progress,\n result=Result.passed,\n duration=5000,\n date_started=datetime(2013, 9, 19, 22, 15, 23),\n date_finished=datetime(2013, 9, 19, 22, 15, 26),\n )\n self.create_task(\n task_name='sync_job',\n parent_id=build.id,\n task_id=job_a.id,\n status=Status.finished,\n )\n task_b = self.create_task(\n task_name='sync_job',\n parent_id=build.id,\n task_id=job_b.id,\n status=Status.in_progress,\n )\n\n db.session.add(ItemStat(item_id=job_a.id, name='tests_missing', value=1))\n db.session.add(ItemStat(item_id=job_b.id, name='tests_missing', value=0))\n db.session.commit()\n\n with patch.object(sync_build, 'allow_absent_from_db', True):\n sync_build(build_id=build.id.hex, task_id=build.id.hex)\n\n build = Build.query.get(build.id)\n\n assert build.status == Status.in_progress\n assert build.result == Result.failed\n\n task_b.status = Status.finished\n db.session.add(task_b)\n job_b.status = Status.finished\n db.session.add(job_b)\n db.session.commit()\n\n with patch.object(sync_build, 'allow_absent_from_db', True):\n sync_build(build_id=build.id.hex, task_id=build.id.hex)\n\n build = Build.query.get(build.id)\n\n assert build.status == Status.finished\n assert build.result == Result.failed\n assert build.duration == 4000\n assert build.date_started == datetime(2013, 9, 19, 22, 15, 22)\n assert build.date_finished == datetime(2013, 9, 19, 22, 15, 26)\n\n queue_delay.assert_any_call('update_project_stats', kwargs={\n 'project_id': project.id.hex,\n }, countdown=1)\n\n stat = ItemStat.query.filter(\n ItemStat.item_id == build.id,\n ItemStat.name == 'tests_missing',\n ).first()\n assert stat.value == 1\n\n @patch('changes.jobs.sync_build.datetime')\n def test_finished_no_jobs(self, sync_build_datetime):\n project = self.create_project()\n build = self.create_build(\n project=project,\n status=Status.unknown,\n result=Result.unknown,\n )\n sync_build_datetime.utcnow.return_value = datetime(2013, 9, 19, 22, 15, 22)\n\n with patch.object(sync_build, 'allow_absent_from_db', True):\n sync_build(build_id=build.id.hex, task_id=build.id.hex)\n\n build = Build.query.get(build.id)\n\n assert build.status == Status.finished\n assert build.result == Result.unknown\n\n assert build.date_finished == sync_build_datetime.utcnow.return_value\n assert build.date_decided == sync_build_datetime.utcnow.return_value\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203689,"cells":{"repo_name":{"kind":"string","value":"manishpatell/erpcustomizationssaiimpex123qwe"},"path":{"kind":"string","value":"addons/purchase/__init__.py"},"copies":{"kind":"string","value":"439"},"size":{"kind":"string","value":"1185"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n# \n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see . \n#\n##############################################################################\n\nimport purchase\nimport partner\nimport stock\nimport wizard\nimport report\nimport stock\nimport company\nimport edi\nimport res_config\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":203690,"cells":{"repo_name":{"kind":"string","value":"signed/intellij-community"},"path":{"kind":"string","value":"python/lib/Lib/site-packages/django/template/smartif.py"},"copies":{"kind":"string","value":"331"},"size":{"kind":"string","value":"6261"},"content":{"kind":"string","value":"\"\"\"\nParser and utilities for the smart 'if' tag\n\"\"\"\nimport operator\n\n# Using a simple top down parser, as described here:\n# http://effbot.org/zone/simple-top-down-parsing.htm.\n# 'led' = left denotation\n# 'nud' = null denotation\n# 'bp' = binding power (left = lbp, right = rbp)\n\nclass TokenBase(object):\n \"\"\"\n Base class for operators and literals, mainly for debugging and for throwing\n syntax errors.\n \"\"\"\n id = None # node/token type name\n value = None # used by literals\n first = second = None # used by tree nodes\n\n def nud(self, parser):\n # Null denotation - called in prefix context\n raise parser.error_class(\n \"Not expecting '%s' in this position in if tag.\" % self.id\n )\n\n def led(self, left, parser):\n # Left denotation - called in infix context\n raise parser.error_class(\n \"Not expecting '%s' as infix operator in if tag.\" % self.id\n )\n\n def display(self):\n \"\"\"\n Returns what to display in error messages for this node\n \"\"\"\n return self.id\n\n def __repr__(self):\n out = [str(x) for x in [self.id, self.first, self.second] if x is not None]\n return \"(\" + \" \".join(out) + \")\"\n\n\ndef infix(bp, func):\n \"\"\"\n Creates an infix operator, given a binding power and a function that\n evaluates the node\n \"\"\"\n class Operator(TokenBase):\n lbp = bp\n\n def led(self, left, parser):\n self.first = left\n self.second = parser.expression(bp)\n return self\n\n def eval(self, context):\n try:\n return func(context, self.first, self.second)\n except Exception:\n # Templates shouldn't throw exceptions when rendering. We are\n # most likely to get exceptions for things like {% if foo in bar\n # %} where 'bar' does not support 'in', so default to False\n return False\n\n return Operator\n\n\ndef prefix(bp, func):\n \"\"\"\n Creates a prefix operator, given a binding power and a function that\n evaluates the node.\n \"\"\"\n class Operator(TokenBase):\n lbp = bp\n\n def nud(self, parser):\n self.first = parser.expression(bp)\n self.second = None\n return self\n\n def eval(self, context):\n try:\n return func(context, self.first)\n except Exception:\n return False\n\n return Operator\n\n\n# Operator precedence follows Python.\n# NB - we can get slightly more accurate syntax error messages by not using the\n# same object for '==' and '='.\n# We defer variable evaluation to the lambda to ensure that terms are\n# lazily evaluated using Python's boolean parsing logic.\nOPERATORS = {\n 'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),\n 'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),\n 'not': prefix(8, lambda context, x: not x.eval(context)),\n 'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),\n 'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),\n '=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),\n '==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),\n '!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),\n '>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),\n '>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),\n '<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),\n '<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),\n}\n\n# Assign 'id' to each:\nfor key, op in OPERATORS.items():\n op.id = key\n\n\nclass Literal(TokenBase):\n \"\"\"\n A basic self-resolvable object similar to a Django template variable.\n \"\"\"\n # IfParser uses Literal in create_var, but TemplateIfParser overrides\n # create_var so that a proper implementation that actually resolves\n # variables, filters etc is used.\n id = \"literal\"\n lbp = 0\n\n def __init__(self, value):\n self.value = value\n\n def display(self):\n return repr(self.value)\n\n def nud(self, parser):\n return self\n\n def eval(self, context):\n return self.value\n\n def __repr__(self):\n return \"(%s %r)\" % (self.id, self.value)\n\n\nclass EndToken(TokenBase):\n lbp = 0\n\n def nud(self, parser):\n raise parser.error_class(\"Unexpected end of expression in if tag.\")\n\nEndToken = EndToken()\n\n\nclass IfParser(object):\n error_class = ValueError\n\n def __init__(self, tokens):\n # pre-pass necessary to turn 'not','in' into single token\n l = len(tokens)\n mapped_tokens = []\n i = 0\n while i < l:\n token = tokens[i]\n if token == \"not\" and i + 1 < l and tokens[i+1] == \"in\":\n token = \"not in\"\n i += 1 # skip 'in'\n mapped_tokens.append(self.translate_token(token))\n i += 1\n\n self.tokens = mapped_tokens\n self.pos = 0\n self.current_token = self.next()\n\n def translate_token(self, token):\n try:\n op = OPERATORS[token]\n except (KeyError, TypeError):\n return self.create_var(token)\n else:\n return op()\n\n def next(self):\n if self.pos >= len(self.tokens):\n return EndToken\n else:\n retval = self.tokens[self.pos]\n self.pos += 1\n return retval\n\n def parse(self):\n retval = self.expression()\n # Check that we have exhausted all the tokens\n if self.current_token is not EndToken:\n raise self.error_class(\"Unused '%s' at end of if expression.\" %\n self.current_token.display())\n return retval\n\n def expression(self, rbp=0):\n t = self.current_token\n self.current_token = self.next()\n left = t.nud(self)\n while rbp < self.current_token.lbp:\n t = self.current_token\n self.current_token = self.next()\n left = t.led(left, self)\n return left\n\n def create_var(self, value):\n return Literal(value)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203691,"cells":{"repo_name":{"kind":"string","value":"sorki/rosdep"},"path":{"kind":"string","value":"test/test_rosdep_osx.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"7167"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2011, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n# Author Ken Conley/kwc@willowgarage.com\n\nimport os\nimport traceback\n\nfrom mock import call\nfrom mock import Mock\nfrom mock import patch\n\n\ndef get_test_dir():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), 'osx'))\n\n\ndef is_port_installed_tripwire():\n # don't know the correct answer, but make sure this does not throw\n from rosdep2.platforms.osx import is_port_installed\n assert is_port_installed() in [True, False]\n\n\ndef is_brew_installed_tripwire():\n # don't know the correct answer, but make sure this does not throw\n from rosdep2.platforms.osx import is_brew_installed\n assert is_brew_installed() in [True, False]\n\n\ndef make_resolutions(package_list):\n from rosdep2.platforms.osx import HomebrewResolution\n return list(map(lambda pkg: HomebrewResolution(pkg, [], []), package_list))\n\n\ndef make_resolutions_options(package_list):\n from rosdep2.platforms.osx import HomebrewResolution\n return list(map(lambda pkg: HomebrewResolution(pkg[0], pkg[1], pkg[2]), package_list))\n\n\ndef brew_command(command):\n if command[1] == \"list\":\n with open(os.path.join(get_test_dir(), 'brew-list-output'), 'r') as f:\n return f.read()\n elif command[1] == \"info\":\n pkg = command[2]\n with open(os.path.join(get_test_dir(), 'brew-info-output'), 'r') as f:\n output = f.readlines()\n for line in output:\n res = line.split(\":\", 1)\n if res[0] == pkg:\n return res[1]\n return ''\n\n\ndef test_brew_detect():\n from rosdep2.platforms.osx import brew_detect\n\n m = Mock()\n m.return_value = ''\n val = brew_detect([], exec_fn=m)\n assert val == [], val\n\n m = Mock()\n m.return_value = ''\n val = brew_detect(make_resolutions(['tinyxml']), exec_fn=m)\n assert val == [], val\n # make sure our test harness is based on the same implementation\n m.assert_called_with(['brew', 'list'])\n assert m.call_args_list == [call(['brew', 'list'])], m.call_args_list\n\n m = Mock()\n m.side_effect = brew_command\n val = brew_detect(make_resolutions(['apt', 'subversion', 'python', 'bazaar']), exec_fn=m)\n # make sure it preserves order\n expected = make_resolutions(['subversion', 'bazaar'])\n assert set(val) == set(expected), val\n assert val == expected, val\n assert len(val) == len(set(val)), val\n\n\ndef test_HomebrewInstaller():\n from rosdep2.platforms.osx import HomebrewInstaller\n\n @patch('rosdep2.platforms.osx.is_brew_installed')\n @patch.object(HomebrewInstaller, 'remove_duplicate_dependencies')\n @patch.object(HomebrewInstaller, 'get_packages_to_install')\n def test(mock_get_packages_to_install, mock_remove_duplicate_dependencies, mock_brew_installed):\n mock_brew_installed.return_value = True\n\n installer = HomebrewInstaller()\n mock_get_packages_to_install.return_value = []\n mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value\n assert [] == installer.get_install_command(make_resolutions(['fake']))\n\n mock_get_packages_to_install.return_value = make_resolutions(['subversion', 'bazaar'])\n mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value\n expected = [['brew', 'install', 'subversion'],\n ['brew', 'install', 'bazaar']]\n # brew is always non-interactive\n for interactive in [True, False]:\n val = installer.get_install_command(['whatever'], interactive=interactive)\n assert val == expected, val\n\n expected = [['brew', 'uninstall', '--force', 'subversion'],\n ['brew', 'install', 'subversion'],\n ['brew', 'uninstall', '--force', 'bazaar'],\n ['brew', 'install', 'bazaar']]\n val = installer.get_install_command(['whatever'], reinstall=True)\n assert val == expected, val\n\n mock_get_packages_to_install.return_value = make_resolutions_options(\n [('subversion', ['foo', 'bar'], ['baz']), ('bazaar', [], ['--with-quux'])])\n mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value\n expected = [['brew', 'install', 'subversion', 'foo', 'bar', 'baz'],\n ['brew', 'install', 'bazaar', '--with-quux']]\n val = installer.get_install_command(['whatever'])\n assert val == expected, val\n\n try:\n mock_get_packages_to_install.return_value = eval(\"make_resolutions_options([('subversion', [u'f´´ßß', u'öäö'], []), (u'bazaar', [], [u'tüü'])])\")\n except SyntaxError:\n # Python 3.2, u'...' is not allowed, but string literals are unicode\n mock_get_packages_to_install.return_value = make_resolutions_options(\n [('subversion', ['f´´ßß', 'öäö'], []), ('bazaar', [], [\"tüü\"])])\n mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value\n try:\n expected = eval(\"[['brew', 'install', 'subversion', u'f´´ßß', u'öäö'], ['brew', 'install', 'bazaar', u'tüü']]\")\n except SyntaxError:\n # Python 3.2, u'...' is not allowed, but string literals are unicode\n expected = [['brew', 'install', 'subversion', 'f´´ßß', 'öäö'],\n ['brew', 'install', 'bazaar', \"tüü\"]]\n val = installer.get_install_command(['whatever'])\n assert val == expected, val\n try:\n test()\n except AssertionError:\n traceback.print_exc()\n raise\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203692,"cells":{"repo_name":{"kind":"string","value":"lancezlin/pyjs"},"path":{"kind":"string","value":"pygtkweb/library/browser.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"4255"},"content":{"kind":"string","value":"from __pyjamas__ import JS, doc, wnd, get_main_frame\n\nlisteners = {}\n\ndef mash_attrib(name, joiner='-'):\n return name\n\ndef get_listener(item):\n pass\n\ndef set_listener(item, listener):\n pass\n\ndef round_val(val, digits):\n return JS('@{{val}}.toFixed(@{{digits}});')\n\nclass Element:\n def __init__(self, tag=None, element=None):\n if tag is not None:\n JS('''\n this.element = $doc.createElement(@{{tag}});\n ''')\n elif element is not None:\n self.element = element\n else:\n raise Exception(\"Cannot create Element without tag or element\")\n\n self.element.__ref = self;\n self.activeEvents = []\n\n def append(self, element):\n JS('''\n this.element.appendChild(@{{element}}.element);\n ''')\n\n def prepend(self, element):\n JS('''\n this.element.insertBefore(@{{element}}.element, @{{self}}.element.firstChild);\n ''')\n\n def getX(self):\n JS('''\n var obj = this.element;\n var curleft = 0;\n if (obj.offsetParent) {\n curleft = obj.offsetLeft\n while (obj = obj.offsetParent) {\n curleft += obj.offsetLeft\n }\n }\n return curleft;\n ''')\n\n def getY(self):\n JS('''\n var obj = this.element;\n var curtop = 0;\n if (obj.offsetParent) {\n curtop = obj.offsetTop\n while (obj = obj.offsetParent) {\n curtop += obj.offsetTop\n }\n }\n return curtop;\n ''')\n\n def getWidth(self):\n JS('''\n return this.element.offsetWidth;\n ''')\n\n def getHeight(self):\n JS('''\n return this.element.offsetHeight;\n ''')\n\n def setWidth(self, width):\n self.setStyle('width',width)\n\n def setHeight(self, height):\n self.setStyle('height',height)\n\n def setStyle(self, property, value):\n JS('''\n this.element.style[@{{property}}] = @{{value}};\n ''')\n\n def setPxStyle(self, property, value):\n self.setStyle(property, \"%dpx\" % value)\n\n def setPercentStyle(self, property, value):\n self.setStyle(property, \"%d%%\" % value)\n\n def getStyle(self, property):\n JS('''\n return this.element.style[@{{property}}];\n ''')\n\n def setProperty(self, property, value):\n JS('''\n //this.element.setAttribute(@{{property}},@{{value}});\n this.element[@{{property}}] = @{{value}};\n ''')\n\n def getProperty(self, property):\n JS('''\n //return this.element.getAttribute(@{{property}});\n return this.element[@{{property}}];\n ''')\n\n def setHTML(self, content):\n JS('''\n this.element.innerHTML = @{{content}};\n ''')\n\n def getHTML(self):\n JS('''\n return this.element.innerHTML;\n ''')\n\n def on_browser_event(self, view, e, ignore):\n pass\n\n def catchEvents(self, name, object):\n JS('''\n var tmp = function(e) {\n var targ;\n if (!e) var e = $wnd.event;\n if (e.target) targ = e.target;\n else if (e.srcElement) targ = e.srcElement;\n if (targ.nodeType == 3) targ = targ.parentNode;\n if (targ.__ref)\n @{{object}}.dom_event(e, targ.__ref);\n else\n @{{object}}.dom_event(e, null);\n };\n ''')\n name = name[0]\n self.activeEvents.append((name, object))\n\n JS('''\n var old_callback = this.element['on'+@{{name}}];\n this.element['on'+@{{name}}] = function(e){if(old_callback){old_callback(e);}@{{!tmp}}(e);};\n ''')\n\nclass Document:\n window = Element(element= JS('$wnd'))\n document = Element(element= JS('$doc'))\n body = Element(element= JS('$doc.body'))\n\n @staticmethod\n def createElement(tag):\n return Element(tag)\n\n @staticmethod\n def append(element):\n JS('''\n $doc.body.appendChild(@{{element}}.element);\n ''')\n\n @staticmethod\n def setContent(message):\n JS('''\n $doc.body.innerHTML = @{{message}};\n ''')\n\n @staticmethod\n def setTitle(title):\n JS('''\n $doc.title = @{{title}};\n ''')\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203693,"cells":{"repo_name":{"kind":"string","value":"xen0l/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/cloud/google/gcp_compute_route_facts.py"},"copies":{"kind":"string","value":"12"},"size":{"kind":"string","value":"7916"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2017 Google\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n# ----------------------------------------------------------------------------\n#\n# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***\n#\n# ----------------------------------------------------------------------------\n#\n# This file is automatically generated by Magic Modules and manual\n# changes will be clobbered when the file is regenerated.\n#\n# Please read more about how to change this file at\n# https://www.github.com/GoogleCloudPlatform/magic-modules\n#\n# ----------------------------------------------------------------------------\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n################################################################################\n# Documentation\n################################################################################\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': [\"preview\"],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: gcp_compute_route_facts\ndescription:\n - Gather facts for GCP Route\nshort_description: Gather facts for GCP Route\nversion_added: 2.7\nauthor: Google Inc. (@googlecloudplatform)\nrequirements:\n - python >= 2.6\n - requests >= 2.18.4\n - google-auth >= 1.3.0\noptions:\n filters:\n description:\n A list of filter value pairs. Available filters are listed here\n U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).\n Each additional filter in the list will act be added as an AND condition\n (filter1 and filter2)\nextends_documentation_fragment: gcp\n'''\n\nEXAMPLES = '''\n- name: a route facts\n gcp_compute_route_facts:\n filters:\n - name = test_object\n project: test_project\n auth_kind: service_account\n service_account_file: \"/tmp/auth.pem\"\n'''\n\nRETURN = '''\nitems:\n description: List of items\n returned: always\n type: complex\n contains:\n dest_range:\n description:\n - The destination range of outgoing packets that this route applies to.\n - Only IPv4 is supported.\n returned: success\n type: str\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\n name:\n description:\n - Name of the resource. Provided by the client when the resource is created. The name\n must be 1-63 characters long, and comply with RFC1035. Specifically, the name must\n be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following characters\n must be a dash, lowercase letter, or digit, except the last character, which cannot\n be a dash.\n returned: success\n type: str\n network:\n description:\n - The network that this route applies to.\n returned: success\n type: dict\n priority:\n description:\n - The priority of this route. Priority is used to break ties in cases where there\n is more than one matching route of equal prefix length.\n - In the case of two routes with equal prefix length, the one with the lowest-numbered\n priority value wins.\n - Default value is 1000. Valid range is 0 through 65535.\n returned: success\n type: int\n tags:\n description:\n - A list of instance tags to which this route applies.\n returned: success\n type: list\n next_hop_gateway:\n description:\n - URL to a gateway that should handle matching packets.\n - 'Currently, you can only specify the internet gateway, using a full or partial valid\n URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway)\n * projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway\n .'\n returned: success\n type: str\n next_hop_instance:\n description:\n - URL to an instance that should handle matching packets.\n - 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/)\n instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance\n .'\n returned: success\n type: str\n next_hop_ip:\n description:\n - Network IP address of an instance that should handle matching packets.\n returned: success\n type: str\n next_hop_vpn_tunnel:\n description:\n - URL to a VpnTunnel that should handle matching packets.\n returned: success\n type: str\n next_hop_network:\n description:\n - URL to a Network that should handle matching packets.\n returned: success\n type: str\n'''\n\n################################################################################\n# Imports\n################################################################################\nfrom ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest\nimport json\n\n################################################################################\n# Main\n################################################################################\n\n\ndef main():\n module = GcpModule(\n argument_spec=dict(\n filters=dict(type='list', elements='str'),\n )\n )\n\n if 'scopes' not in module.params:\n module.params['scopes'] = ['https://www.googleapis.com/auth/compute']\n\n items = fetch_list(module, collection(module), query_options(module.params['filters']))\n if items.get('items'):\n items = items.get('items')\n else:\n items = []\n return_value = {\n 'items': items\n }\n module.exit_json(**return_value)\n\n\ndef collection(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/global/routes\".format(**module.params)\n\n\ndef fetch_list(module, link, query):\n auth = GcpSession(module, 'compute')\n response = auth.get(link, params={'filter': query})\n return return_if_object(module, response)\n\n\ndef query_options(filters):\n if not filters:\n return ''\n\n if len(filters) == 1:\n return filters[0]\n else:\n queries = []\n for f in filters:\n # For multiple queries, all queries should have ()\n if f[0] != '(' and f[-1] != ')':\n queries.append(\"(%s)\" % ''.join(f))\n else:\n queries.append(f)\n\n return ' '.join(queries)\n\n\ndef return_if_object(module, response):\n # If not found, return nothing.\n if response.status_code == 404:\n return None\n\n # If no content, return nothing.\n if response.status_code == 204:\n return None\n\n try:\n module.raise_for_status(response)\n result = response.json()\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:\n module.fail_json(msg=\"Invalid JSON response with error: %s\" % inst)\n\n if navigate_hash(result, ['error', 'errors']):\n module.fail_json(msg=navigate_hash(result, ['error', 'errors']))\n\n return result\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":203694,"cells":{"repo_name":{"kind":"string","value":"gudcjfdldu/volatility"},"path":{"kind":"string","value":"volatility/plugins/linux/dentry_cache.py"},"copies":{"kind":"string","value":"57"},"size":{"kind":"string","value":"2513"},"content":{"kind":"string","value":"# Volatility\n# Copyright (C) 2007-2013 Volatility Foundation\n#\n# This file is part of Volatility.\n#\n# Volatility is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Volatility is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Volatility. If not, see .\n#\n\n\"\"\"\n@author: Andrew Case\n@license: GNU General Public License 2.0\n@contact: atcuno@gmail.com\n@organization:\n\"\"\"\n\nimport volatility.plugins.linux.common as linux_common\nfrom volatility.plugins.linux.slab_info import linux_slabinfo\n\nclass linux_dentry_cache(linux_common.AbstractLinuxCommand):\n \"\"\"Gather files from the dentry cache\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)\n self._config.add_option('UNALLOCATED', short_option = 'u',\n default = False,\n help = 'Show unallocated',\n action = 'store_true')\n\n def make_body(self, dentry):\n \"\"\"Create a pipe-delimited bodyfile from a dentry structure. \n \n MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime\n \"\"\"\n \n path = dentry.get_partial_path() or \"\"\n i = dentry.d_inode\n \n if i:\n ret = [0, path, i.i_ino, 0, i.i_uid, i.i_gid, i.i_size, i.i_atime, i.i_mtime, 0, i.i_ctime]\n else:\n ret = [0, path] + [0] * 8\n \n ret = \"|\".join([str(val) for val in ret])\n return ret\n\n def calculate(self):\n linux_common.set_plugin_members(self)\n\n cache = linux_slabinfo(self._config).get_kmem_cache(\"dentry\", self._config.UNALLOCATED)\n\n # support for old kernels \n if cache == []:\n cache = linux_slabinfo(self._config).get_kmem_cache(\"dentry_cache\", self._config.UNALLOCATED, struct_name = \"dentry\")\n\n for dentry in cache:\n yield self.make_body(dentry)\n\n def render_text(self, outfd, data):\n\n for bodyline in data:\n outfd.write(bodyline + \"\\n\")\n\n\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":203695,"cells":{"repo_name":{"kind":"string","value":"scripnichenko/nova"},"path":{"kind":"string","value":"nova/tests/unit/compute/test_resources.py"},"copies":{"kind":"string","value":"57"},"size":{"kind":"string","value":"11446"},"content":{"kind":"string","value":"# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Tests for the compute extra resources framework.\"\"\"\n\n\nfrom oslo_config import cfg\nfrom stevedore import extension\nfrom stevedore import named\n\nfrom nova.compute import resources\nfrom nova.compute.resources import base\nfrom nova.compute.resources import vcpu\nfrom nova import context\nfrom nova.objects import flavor as flavor_obj\nfrom nova import test\nfrom nova.tests.unit import fake_instance\n\nCONF = cfg.CONF\n\n\nclass FakeResourceHandler(resources.ResourceHandler):\n def __init__(self, extensions):\n self._mgr = \\\n named.NamedExtensionManager.make_test_instance(extensions)\n\n\nclass FakeResource(base.Resource):\n\n def __init__(self):\n self.total_res = 0\n self.used_res = 0\n\n def _get_requested(self, usage):\n if 'extra_specs' not in usage:\n return\n if self.resource_name not in usage['extra_specs']:\n return\n req = usage['extra_specs'][self.resource_name]\n return int(req)\n\n def _get_limit(self, limits):\n if self.resource_name not in limits:\n return\n limit = limits[self.resource_name]\n return int(limit)\n\n def reset(self, resources, driver):\n self.total_res = 0\n self.used_res = 0\n\n def test(self, usage, limits):\n requested = self._get_requested(usage)\n if not requested:\n return\n\n limit = self._get_limit(limits)\n if not limit:\n return\n\n free = limit - self.used_res\n if requested <= free:\n return\n else:\n return ('Free %(free)d < requested %(requested)d ' %\n {'free': free, 'requested': requested})\n\n def add_instance(self, usage):\n requested = self._get_requested(usage)\n if requested:\n self.used_res += requested\n\n def remove_instance(self, usage):\n requested = self._get_requested(usage)\n if requested:\n self.used_res -= requested\n\n def write(self, resources):\n pass\n\n def report_free(self):\n return \"Free %s\" % (self.total_res - self.used_res)\n\n\nclass ResourceA(FakeResource):\n\n def reset(self, resources, driver):\n # ResourceA uses a configuration option\n self.total_res = int(CONF.resA)\n self.used_res = 0\n self.resource_name = 'resource:resA'\n\n def write(self, resources):\n resources['resA'] = self.total_res\n resources['used_resA'] = self.used_res\n\n\nclass ResourceB(FakeResource):\n\n def reset(self, resources, driver):\n # ResourceB uses resource details passed in parameter resources\n self.total_res = resources['resB']\n self.used_res = 0\n self.resource_name = 'resource:resB'\n\n def write(self, resources):\n resources['resB'] = self.total_res\n resources['used_resB'] = self.used_res\n\n\ndef fake_flavor_obj(**updates):\n flavor = flavor_obj.Flavor()\n flavor.id = 1\n flavor.name = 'fakeflavor'\n flavor.memory_mb = 8000\n flavor.vcpus = 3\n flavor.root_gb = 11\n flavor.ephemeral_gb = 4\n flavor.swap = 0\n flavor.rxtx_factor = 1.0\n flavor.vcpu_weight = 1\n if updates:\n flavor.update(updates)\n return flavor\n\n\nclass BaseTestCase(test.NoDBTestCase):\n\n def _initialize_used_res_counter(self):\n # Initialize the value for the used resource\n for ext in self.r_handler._mgr.extensions:\n ext.obj.used_res = 0\n\n def setUp(self):\n super(BaseTestCase, self).setUp()\n\n # initialize flavors and stub get_by_id to\n # get flavors from here\n self._flavors = {}\n self.ctxt = context.get_admin_context()\n\n # Create a flavor without extra_specs defined\n _flavor_id = 1\n _flavor = fake_flavor_obj(id=_flavor_id)\n self._flavors[_flavor_id] = _flavor\n\n # Create a flavor with extra_specs defined\n _flavor_id = 2\n requested_resA = 5\n requested_resB = 7\n requested_resC = 7\n _extra_specs = {'resource:resA': requested_resA,\n 'resource:resB': requested_resB,\n 'resource:resC': requested_resC}\n _flavor = fake_flavor_obj(id=_flavor_id,\n extra_specs=_extra_specs)\n self._flavors[_flavor_id] = _flavor\n\n # create fake resource extensions and resource handler\n _extensions = [\n extension.Extension('resA', None, ResourceA, ResourceA()),\n extension.Extension('resB', None, ResourceB, ResourceB()),\n ]\n self.r_handler = FakeResourceHandler(_extensions)\n\n # Resources details can be passed to each plugin or can be specified as\n # configuration options\n driver_resources = {'resB': 5}\n CONF.resA = '10'\n\n # initialise the resources\n self.r_handler.reset_resources(driver_resources, None)\n\n def test_update_from_instance_with_extra_specs(self):\n # Flavor with extra_specs\n _flavor_id = 2\n sign = 1\n self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)\n\n expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']\n expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']\n self.assertEqual(int(expected_resA),\n self.r_handler._mgr['resA'].obj.used_res)\n self.assertEqual(int(expected_resB),\n self.r_handler._mgr['resB'].obj.used_res)\n\n def test_update_from_instance_without_extra_specs(self):\n # Flavor id without extra spec\n _flavor_id = 1\n self._initialize_used_res_counter()\n self.r_handler.resource_list = []\n sign = 1\n self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)\n self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)\n self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)\n\n def test_write_resources(self):\n self._initialize_used_res_counter()\n extra_resources = {}\n expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}\n self.r_handler.write_resources(extra_resources)\n self.assertEqual(expected, extra_resources)\n\n def test_test_resources_without_extra_specs(self):\n limits = {}\n # Flavor id without extra_specs\n flavor = self._flavors[1]\n result = self.r_handler.test_resources(flavor, limits)\n self.assertEqual([None, None], result)\n\n def test_test_resources_with_limits_for_different_resource(self):\n limits = {'resource:resC': 20}\n # Flavor id with extra_specs\n flavor = self._flavors[2]\n result = self.r_handler.test_resources(flavor, limits)\n self.assertEqual([None, None], result)\n\n def test_passing_test_resources(self):\n limits = {'resource:resA': 10, 'resource:resB': 20}\n # Flavor id with extra_specs\n flavor = self._flavors[2]\n self._initialize_used_res_counter()\n result = self.r_handler.test_resources(flavor, limits)\n self.assertEqual([None, None], result)\n\n def test_failing_test_resources_for_single_resource(self):\n limits = {'resource:resA': 4, 'resource:resB': 20}\n # Flavor id with extra_specs\n flavor = self._flavors[2]\n self._initialize_used_res_counter()\n result = self.r_handler.test_resources(flavor, limits)\n expected = ['Free 4 < requested 5 ', None]\n self.assertEqual(sorted(expected),\n sorted(result))\n\n def test_empty_resource_handler(self):\n \"\"\"An empty resource handler has no resource extensions,\n should have no effect, and should raise no exceptions.\n \"\"\"\n empty_r_handler = FakeResourceHandler([])\n\n resources = {}\n empty_r_handler.reset_resources(resources, None)\n\n flavor = self._flavors[1]\n sign = 1\n empty_r_handler.update_from_instance(flavor, sign)\n\n limits = {}\n test_result = empty_r_handler.test_resources(flavor, limits)\n self.assertEqual([], test_result)\n\n sign = -1\n empty_r_handler.update_from_instance(flavor, sign)\n\n extra_resources = {}\n expected_extra_resources = extra_resources\n empty_r_handler.write_resources(extra_resources)\n self.assertEqual(expected_extra_resources, extra_resources)\n\n empty_r_handler.report_free_resources()\n\n def test_vcpu_resource_load(self):\n # load the vcpu example\n names = ['vcpu']\n real_r_handler = resources.ResourceHandler(names)\n ext_names = real_r_handler._mgr.names()\n self.assertEqual(names, ext_names)\n\n # check the extension loaded is the one we expect\n # and an instance of the object has been created\n ext = real_r_handler._mgr['vcpu']\n self.assertIsInstance(ext.obj, vcpu.VCPU)\n\n\nclass TestVCPU(test.NoDBTestCase):\n\n def setUp(self):\n super(TestVCPU, self).setUp()\n self._vcpu = vcpu.VCPU()\n self._vcpu._total = 10\n self._vcpu._used = 0\n self._flavor = fake_flavor_obj(vcpus=5)\n self._big_flavor = fake_flavor_obj(vcpus=20)\n self._instance = fake_instance.fake_instance_obj(None)\n\n def test_reset(self):\n # set vcpu values to something different to test reset\n self._vcpu._total = 10\n self._vcpu._used = 5\n\n driver_resources = {'vcpus': 20}\n self._vcpu.reset(driver_resources, None)\n self.assertEqual(20, self._vcpu._total)\n self.assertEqual(0, self._vcpu._used)\n\n def test_add_and_remove_instance(self):\n self._vcpu.add_instance(self._flavor)\n self.assertEqual(10, self._vcpu._total)\n self.assertEqual(5, self._vcpu._used)\n\n self._vcpu.remove_instance(self._flavor)\n self.assertEqual(10, self._vcpu._total)\n self.assertEqual(0, self._vcpu._used)\n\n def test_test_pass_limited(self):\n result = self._vcpu.test(self._flavor, {'vcpu': 10})\n self.assertIsNone(result, 'vcpu test failed when it should pass')\n\n def test_test_pass_unlimited(self):\n result = self._vcpu.test(self._big_flavor, {})\n self.assertIsNone(result, 'vcpu test failed when it should pass')\n\n def test_test_fail(self):\n result = self._vcpu.test(self._flavor, {'vcpu': 2})\n expected = 'Free CPUs 2.00 VCPUs < requested 5 VCPUs'\n self.assertEqual(expected, result)\n\n def test_write(self):\n resources = {'stats': {}}\n self._vcpu.write(resources)\n expected = {\n 'vcpus': 10,\n 'vcpus_used': 0,\n 'stats': {\n 'num_vcpus': 10,\n 'num_vcpus_used': 0\n }\n }\n self.assertEqual(sorted(expected),\n sorted(resources))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203696,"cells":{"repo_name":{"kind":"string","value":"PierreRust/beets"},"path":{"kind":"string","value":"extra/release.py"},"copies":{"kind":"string","value":"24"},"size":{"kind":"string","value":"8554"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"A utility script for automating the beets release process.\n\"\"\"\nimport click\nimport os\nimport re\nimport subprocess\nfrom contextlib import contextmanager\nimport datetime\n\nBASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nCHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')\n\n\n@contextmanager\ndef chdir(d):\n \"\"\"A context manager that temporary changes the working directory.\n \"\"\"\n olddir = os.getcwd()\n os.chdir(d)\n yield\n os.chdir(olddir)\n\n\n@click.group()\ndef release():\n pass\n\n\n# Locations (filenames and patterns) of the version number.\nVERSION_LOCS = [\n (\n os.path.join(BASE, 'beets', '__init__.py'),\n [\n (\n r'__version__\\s*=\\s*[\\'\"]([0-9\\.]+)[\\'\"]',\n \"__version__ = '{version}'\",\n )\n ]\n ),\n (\n os.path.join(BASE, 'docs', 'conf.py'),\n [\n (\n r'version\\s*=\\s*[\\'\"]([0-9\\.]+)[\\'\"]',\n \"version = '{minor}'\",\n ),\n (\n r'release\\s*=\\s*[\\'\"]([0-9\\.]+)[\\'\"]',\n \"release = '{version}'\",\n ),\n ]\n ),\n (\n os.path.join(BASE, 'setup.py'),\n [\n (\n r'\\s*version\\s*=\\s*[\\'\"]([0-9\\.]+)[\\'\"]',\n \" version='{version}',\",\n )\n ]\n ),\n]\n\n\ndef bump_version(version):\n \"\"\"Update the version number in setup.py, docs config, changelog,\n and root module.\n \"\"\"\n version_parts = [int(p) for p in version.split('.')]\n assert len(version_parts) == 3, \"invalid version number\"\n minor = '{}.{}'.format(*version_parts)\n major = '{}'.format(*version_parts)\n\n # Replace the version each place where it lives.\n for filename, locations in VERSION_LOCS:\n # Read and transform the file.\n out_lines = []\n with open(filename) as f:\n found = False\n for line in f:\n for pattern, template in locations:\n match = re.match(pattern, line)\n if match:\n # Check that this version is actually newer.\n old_version = match.group(1)\n old_parts = [int(p) for p in old_version.split('.')]\n assert version_parts > old_parts, \\\n \"version must be newer than {}\".format(\n old_version\n )\n\n # Insert the new version.\n out_lines.append(template.format(\n version=version,\n major=major,\n minor=minor,\n ) + '\\n')\n\n found = True\n break\n\n else:\n # Normal line.\n out_lines.append(line)\n\n if not found:\n print(\"No pattern found in {}\".format(filename))\n\n # Write the file back.\n with open(filename, 'w') as f:\n f.write(''.join(out_lines))\n\n # Generate bits to insert into changelog.\n header_line = '{} (in development)'.format(version)\n header = '\\n\\n' + header_line + '\\n' + '-' * len(header_line) + '\\n\\n'\n header += 'Changelog goes here!\\n'\n\n # Insert into the right place.\n with open(CHANGELOG) as f:\n contents = f.read()\n location = contents.find('\\n\\n') # First blank line.\n contents = contents[:location] + header + contents[location:]\n\n # Write back.\n with open(CHANGELOG, 'w') as f:\n f.write(contents)\n\n\n@release.command()\n@click.argument('version')\ndef bump(version):\n \"\"\"Bump the version number.\n \"\"\"\n bump_version(version)\n\n\ndef get_latest_changelog():\n \"\"\"Extract the first section of the changelog.\n \"\"\"\n started = False\n lines = []\n with open(CHANGELOG) as f:\n for line in f:\n if re.match(r'^--+$', line.strip()):\n # Section boundary. Start or end.\n if started:\n # Remove last line, which is the header of the next\n # section.\n del lines[-1]\n break\n else:\n started = True\n\n elif started:\n lines.append(line)\n return ''.join(lines).strip()\n\n\ndef rst2md(text):\n \"\"\"Use Pandoc to convert text from ReST to Markdown.\n \"\"\"\n pandoc = subprocess.Popen(\n ['pandoc', '--from=rst', '--to=markdown', '--no-wrap'],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n stdout, _ = pandoc.communicate(text.encode('utf8'))\n md = stdout.decode('utf8').strip()\n\n # Fix up odd spacing in lists.\n return re.sub(r'^- ', '- ', md, flags=re.M)\n\n\ndef changelog_as_markdown():\n \"\"\"Get the latest changelog entry as hacked up Markdown.\n \"\"\"\n rst = get_latest_changelog()\n\n # Replace plugin links with plugin names.\n rst = re.sub(r':doc:`/plugins/(\\w+)`', r'``\\1``', rst)\n\n # References with text.\n rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\\1', rst)\n\n # Other backslashes with verbatim ranges.\n rst = re.sub(r'(\\s)`([^`]+)`([^_])', r'\\1``\\2``\\3', rst)\n\n # Command links with command names.\n rst = re.sub(r':ref:`(\\w+)-cmd`', r'``\\1``', rst)\n\n # Bug numbers.\n rst = re.sub(r':bug:`(\\d+)`', r'#\\1', rst)\n\n # Users.\n rst = re.sub(r':user:`(\\w+)`', r'@\\1', rst)\n\n # Convert with Pandoc.\n md = rst2md(rst)\n\n # Restore escaped issue numbers.\n md = re.sub(r'\\\\#(\\d+)\\b', r'#\\1', md)\n\n return md\n\n\n@release.command()\ndef changelog():\n \"\"\"Get the most recent version's changelog as Markdown.\n \"\"\"\n print(changelog_as_markdown())\n\n\ndef get_version(index=0):\n \"\"\"Read the current version from the changelog.\n \"\"\"\n with open(CHANGELOG) as f:\n cur_index = 0\n for line in f:\n match = re.search(r'^\\d+\\.\\d+\\.\\d+', line)\n if match:\n if cur_index == index:\n return match.group(0)\n else:\n cur_index += 1\n\n\n@release.command()\ndef version():\n \"\"\"Display the current version.\n \"\"\"\n print(get_version())\n\n\n@release.command()\ndef datestamp():\n \"\"\"Enter today's date as the release date in the changelog.\n \"\"\"\n dt = datetime.datetime.now()\n stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)\n marker = '(in development)'\n\n lines = []\n underline_length = None\n with open(CHANGELOG) as f:\n for line in f:\n if marker in line:\n # The header line.\n line = line.replace(marker, stamp)\n lines.append(line)\n underline_length = len(line.strip())\n elif underline_length:\n # This is the line after the header. Rewrite the dashes.\n lines.append('-' * underline_length + '\\n')\n underline_length = None\n else:\n lines.append(line)\n\n with open(CHANGELOG, 'w') as f:\n for line in lines:\n f.write(line)\n\n\n@release.command()\ndef prep():\n \"\"\"Run all steps to prepare a release.\n\n - Tag the commit.\n - Build the sdist package.\n - Generate the Markdown changelog to ``changelog.md``.\n - Bump the version number to the next version.\n \"\"\"\n cur_version = get_version()\n\n # Tag.\n subprocess.check_output(['git', 'tag', 'v{}'.format(cur_version)])\n\n # Build.\n with chdir(BASE):\n subprocess.check_call(['python2', 'setup.py', 'sdist'])\n\n # Generate Markdown changelog.\n cl = changelog_as_markdown()\n with open(os.path.join(BASE, 'changelog.md'), 'w') as f:\n f.write(cl)\n\n # Version number bump.\n # FIXME It should be possible to specify this as an argument.\n version_parts = [int(n) for n in cur_version.split('.')]\n version_parts[-1] += 1\n next_version = u'.'.join(map(str, version_parts))\n bump_version(next_version)\n\n\n@release.command()\ndef publish():\n \"\"\"Unleash a release unto the world.\n\n - Push the tag to GitHub.\n - Upload to PyPI.\n \"\"\"\n version = get_version(1)\n\n # Push to GitHub.\n with chdir(BASE):\n subprocess.check_call(['git', 'push'])\n subprocess.check_call(['git', 'push', '--tags'])\n\n # Upload to PyPI.\n path = os.path.join(BASE, 'dist', 'beets-{}.tar.gz'.format(version))\n subprocess.check_call(['twine', 'upload', path])\n\n\nif __name__ == '__main__':\n release()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":203697,"cells":{"repo_name":{"kind":"string","value":"romankagan/DDBWorkbench"},"path":{"kind":"string","value":"python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/predicates.py"},"copies":{"kind":"string","value":"623"},"size":{"kind":"string","value":"1777"},"content":{"kind":"string","value":"\"\"\"\n This module houses the GEOS ctypes prototype functions for the\n unary and binary predicate operations on geometries.\n\"\"\"\nfrom ctypes import c_char, c_char_p, c_double\nfrom django.contrib.gis.geos.libgeos import GEOM_PTR\nfrom django.contrib.gis.geos.prototypes.errcheck import check_predicate\nfrom django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc\n\n## Binary & unary predicate functions ##\ndef binary_predicate(func, *args):\n \"For GEOS binary predicate functions.\"\n argtypes = [GEOM_PTR, GEOM_PTR]\n if args: argtypes += args\n func.argtypes = argtypes\n func.restype = c_char\n func.errcheck = check_predicate\n return func\n\ndef unary_predicate(func):\n \"For GEOS unary predicate functions.\"\n func.argtypes = [GEOM_PTR]\n func.restype = c_char\n func.errcheck = check_predicate\n return func\n\n## Unary Predicates ##\ngeos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))\ngeos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))\ngeos_isring = unary_predicate(GEOSFunc('GEOSisRing'))\ngeos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))\ngeos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))\n\n## Binary Predicates ##\ngeos_contains = binary_predicate(GEOSFunc('GEOSContains'))\ngeos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))\ngeos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))\ngeos_equals = binary_predicate(GEOSFunc('GEOSEquals'))\ngeos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)\ngeos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))\ngeos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))\ngeos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)\ngeos_touches = binary_predicate(GEOSFunc('GEOSTouches'))\ngeos_within = binary_predicate(GEOSFunc('GEOSWithin'))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":203698,"cells":{"repo_name":{"kind":"string","value":"RomainBrault/scikit-learn"},"path":{"kind":"string","value":"sklearn/externals/joblib/numpy_pickle_compat.py"},"copies":{"kind":"string","value":"78"},"size":{"kind":"string","value":"8439"},"content":{"kind":"string","value":"\"\"\"Numpy pickle compatibility functions.\"\"\"\n\nimport pickle\nimport os\nimport zlib\nfrom io import BytesIO\n\nfrom ._compat import PY3_OR_LATER\nfrom .numpy_pickle_utils import _ZFILE_PREFIX\nfrom .numpy_pickle_utils import Unpickler\n\n\ndef hex_str(an_int):\n \"\"\"Convert an int to an hexadecimal string.\"\"\"\n return '{:#x}'.format(an_int)\n\nif PY3_OR_LATER:\n def asbytes(s):\n if isinstance(s, bytes):\n return s\n return s.encode('latin1')\nelse:\n asbytes = str\n\n_MAX_LEN = len(hex_str(2 ** 64))\n_CHUNK_SIZE = 64 * 1024\n\n\ndef read_zfile(file_handle):\n \"\"\"Read the z-file and return the content as a string.\n\n Z-files are raw data compressed with zlib used internally by joblib\n for persistence. Backward compatibility is not guaranteed. Do not\n use for external purposes.\n \"\"\"\n file_handle.seek(0)\n header_length = len(_ZFILE_PREFIX) + _MAX_LEN\n length = file_handle.read(header_length)\n length = length[len(_ZFILE_PREFIX):]\n length = int(length, 16)\n\n # With python2 and joblib version <= 0.8.4 compressed pickle header is one\n # character wider so we need to ignore an additional space if present.\n # Note: the first byte of the zlib data is guaranteed not to be a\n # space according to\n # https://tools.ietf.org/html/rfc6713#section-2.1\n next_byte = file_handle.read(1)\n if next_byte != b' ':\n # The zlib compressed data has started and we need to go back\n # one byte\n file_handle.seek(header_length)\n\n # We use the known length of the data to tell Zlib the size of the\n # buffer to allocate.\n data = zlib.decompress(file_handle.read(), 15, length)\n assert len(data) == length, (\n \"Incorrect data length while decompressing %s.\"\n \"The file could be corrupted.\" % file_handle)\n return data\n\n\ndef write_zfile(file_handle, data, compress=1):\n \"\"\"Write the data in the given file as a Z-file.\n\n Z-files are raw data compressed with zlib used internally by joblib\n for persistence. Backward compatibility is not guarantied. Do not\n use for external purposes.\n \"\"\"\n file_handle.write(_ZFILE_PREFIX)\n length = hex_str(len(data))\n # Store the length of the data\n file_handle.write(asbytes(length.ljust(_MAX_LEN)))\n file_handle.write(zlib.compress(asbytes(data), compress))\n\n###############################################################################\n# Utility objects for persistence.\n\n\nclass NDArrayWrapper(object):\n \"\"\"An object to be persisted instead of numpy arrays.\n\n The only thing this object does, is to carry the filename in which\n the array has been persisted, and the array subclass.\n \"\"\"\n\n def __init__(self, filename, subclass, allow_mmap=True):\n \"\"\"Constructor. Store the useful information for later.\"\"\"\n self.filename = filename\n self.subclass = subclass\n self.allow_mmap = allow_mmap\n\n def read(self, unpickler):\n \"\"\"Reconstruct the array.\"\"\"\n filename = os.path.join(unpickler._dirname, self.filename)\n # Load the array from the disk\n # use getattr instead of self.allow_mmap to ensure backward compat\n # with NDArrayWrapper instances pickled with joblib < 0.9.0\n allow_mmap = getattr(self, 'allow_mmap', True)\n memmap_kwargs = ({} if not allow_mmap\n else {'mmap_mode': unpickler.mmap_mode})\n array = unpickler.np.load(filename, **memmap_kwargs)\n # Reconstruct subclasses. This does not work with old\n # versions of numpy\n if (hasattr(array, '__array_prepare__') and\n self.subclass not in (unpickler.np.ndarray,\n unpickler.np.memmap)):\n # We need to reconstruct another subclass\n new_array = unpickler.np.core.multiarray._reconstruct(\n self.subclass, (0,), 'b')\n return new_array.__array_prepare__(array)\n else:\n return array\n\n\nclass ZNDArrayWrapper(NDArrayWrapper):\n \"\"\"An object to be persisted instead of numpy arrays.\n\n This object store the Zfile filename in which\n the data array has been persisted, and the meta information to\n retrieve it.\n The reason that we store the raw buffer data of the array and\n the meta information, rather than array representation routine\n (tostring) is that it enables us to use completely the strided\n model to avoid memory copies (a and a.T store as fast). In\n addition saving the heavy information separately can avoid\n creating large temporary buffers when unpickling data with\n large arrays.\n \"\"\"\n\n def __init__(self, filename, init_args, state):\n \"\"\"Constructor. Store the useful information for later.\"\"\"\n self.filename = filename\n self.state = state\n self.init_args = init_args\n\n def read(self, unpickler):\n \"\"\"Reconstruct the array from the meta-information and the z-file.\"\"\"\n # Here we a simply reproducing the unpickling mechanism for numpy\n # arrays\n filename = os.path.join(unpickler._dirname, self.filename)\n array = unpickler.np.core.multiarray._reconstruct(*self.init_args)\n with open(filename, 'rb') as f:\n data = read_zfile(f)\n state = self.state + (data,)\n array.__setstate__(state)\n return array\n\n\nclass ZipNumpyUnpickler(Unpickler):\n \"\"\"A subclass of the Unpickler to unpickle our numpy pickles.\"\"\"\n\n dispatch = Unpickler.dispatch.copy()\n\n def __init__(self, filename, file_handle, mmap_mode=None):\n \"\"\"Constructor.\"\"\"\n self._filename = os.path.basename(filename)\n self._dirname = os.path.dirname(filename)\n self.mmap_mode = mmap_mode\n self.file_handle = self._open_pickle(file_handle)\n Unpickler.__init__(self, self.file_handle)\n try:\n import numpy as np\n except ImportError:\n np = None\n self.np = np\n\n def _open_pickle(self, file_handle):\n return BytesIO(read_zfile(file_handle))\n\n def load_build(self):\n \"\"\"Set the state of a newly created object.\n\n We capture it to replace our place-holder objects,\n NDArrayWrapper, by the array we are interested in. We\n replace them directly in the stack of pickler.\n \"\"\"\n Unpickler.load_build(self)\n if isinstance(self.stack[-1], NDArrayWrapper):\n if self.np is None:\n raise ImportError(\"Trying to unpickle an ndarray, \"\n \"but numpy didn't import correctly\")\n nd_array_wrapper = self.stack.pop()\n array = nd_array_wrapper.read(self)\n self.stack.append(array)\n\n # Be careful to register our new method.\n if PY3_OR_LATER:\n dispatch[pickle.BUILD[0]] = load_build\n else:\n dispatch[pickle.BUILD] = load_build\n\n\ndef load_compatibility(filename):\n \"\"\"Reconstruct a Python object from a file persisted with joblib.dump.\n\n This function ensures the compatibility with joblib old persistence format\n (<= 0.9.3).\n\n Parameters\n -----------\n filename: string\n The name of the file from which to load the object\n\n Returns\n -------\n result: any Python object\n The object stored in the file.\n\n See Also\n --------\n joblib.dump : function to save an object\n\n Notes\n -----\n\n This function can load numpy array files saved separately during the\n dump.\n \"\"\"\n with open(filename, 'rb') as file_handle:\n # We are careful to open the file handle early and keep it open to\n # avoid race-conditions on renames. That said, if data is stored in\n # companion files, moving the directory will create a race when\n # joblib tries to access the companion files.\n unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)\n try:\n obj = unpickler.load()\n except UnicodeDecodeError as exc:\n # More user-friendly error message\n if PY3_OR_LATER:\n new_exc = ValueError(\n 'You may be trying to read with '\n 'python 3 a joblib pickle generated with python 2. '\n 'This feature is not supported by joblib.')\n new_exc.__cause__ = exc\n raise new_exc\n finally:\n if hasattr(unpickler, 'file_handle'):\n unpickler.file_handle.close()\n return obj\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":203699,"cells":{"repo_name":{"kind":"string","value":"patrickstocklin/chattR"},"path":{"kind":"string","value":"lib/python2.7/site-packages/nltk/parse/nonprojectivedependencyparser.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"29287"},"content":{"kind":"string","value":"# Natural Language Toolkit: Dependency Grammars\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Jason Narad \n#\n# URL: \n# For license information, see LICENSE.TXT\n#\nfrom __future__ import print_function\n\nimport math\nimport logging\n\nfrom nltk.compat import xrange\n\nfrom nltk.parse.dependencygraph import DependencyGraph\n\nlogger = logging.getLogger(__name__)\n\n#################################################################\n# DependencyScorerI - Interface for Graph-Edge Weight Calculation\n#################################################################\n\n\nclass DependencyScorerI(object):\n \"\"\"\n A scorer for calculated the weights on the edges of a weighted\n dependency graph. This is used by a\n ``ProbabilisticNonprojectiveParser`` to initialize the edge\n weights of a ``DependencyGraph``. While typically this would be done\n by training a binary classifier, any class that can return a\n multidimensional list representation of the edge weights can\n implement this interface. As such, it has no necessary\n fields.\n \"\"\"\n\n def __init__(self):\n if self.__class__ == DependencyScorerI:\n raise TypeError('DependencyScorerI is an abstract interface')\n\n def train(self, graphs):\n \"\"\"\n :type graphs: list(DependencyGraph)\n :param graphs: A list of dependency graphs to train the scorer.\n Typically the edges present in the graphs can be used as\n positive training examples, and the edges not present as negative\n examples.\n \"\"\"\n raise NotImplementedError()\n\n def score(self, graph):\n \"\"\"\n :type graph: DependencyGraph\n :param graph: A dependency graph whose set of edges need to be\n scored.\n :rtype: A three-dimensional list of numbers.\n :return: The score is returned in a multidimensional(3) list, such\n that the outer-dimension refers to the head, and the\n inner-dimension refers to the dependencies. For instance,\n scores[0][1] would reference the list of scores corresponding to\n arcs from node 0 to node 1. The node's 'address' field can be used\n to determine its number identification.\n\n For further illustration, a score list corresponding to Fig.2 of\n Keith Hall's 'K-best Spanning Tree Parsing' paper:\n scores = [[[], [5], [1], [1]],\n [[], [], [11], [4]],\n [[], [10], [], [5]],\n [[], [8], [8], []]]\n When used in conjunction with a MaxEntClassifier, each score would\n correspond to the confidence of a particular edge being classified\n with the positive training examples.\n \"\"\"\n raise NotImplementedError()\n\n#################################################################\n# NaiveBayesDependencyScorer\n#################################################################\n\n\nclass NaiveBayesDependencyScorer(DependencyScorerI):\n \"\"\"\n A dependency scorer built around a MaxEnt classifier. In this\n particular class that classifier is a ``NaiveBayesClassifier``.\n It uses head-word, head-tag, child-word, and child-tag features\n for classification.\n\n >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2\n\n >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\\\n\\\\n') if entry]\n >>> npp = ProbabilisticNonprojectiveParser()\n >>> npp.train(graphs, NaiveBayesDependencyScorer())\n >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])\n >>> len(list(parses))\n 1\n\n \"\"\"\n\n def __init__(self):\n pass # Do nothing without throwing error\n\n def train(self, graphs):\n \"\"\"\n Trains a ``NaiveBayesClassifier`` using the edges present in\n graphs list as positive examples, the edges not present as\n negative examples. Uses a feature vector of head-word,\n head-tag, child-word, and child-tag.\n\n :type graphs: list(DependencyGraph)\n :param graphs: A list of dependency graphs to train the scorer.\n \"\"\"\n\n from nltk.classify import NaiveBayesClassifier\n\n # Create training labeled training examples\n labeled_examples = []\n for graph in graphs:\n for head_node in graph.nodes.values():\n for child_index, child_node in graph.nodes.items():\n if child_index in head_node['deps']:\n label = \"T\"\n else:\n label = \"F\"\n labeled_examples.append(\n (\n dict(\n a=head_node['word'],\n b=head_node['tag'],\n c=child_node['word'],\n d=child_node['tag'],\n ),\n label,\n )\n )\n\n self.classifier = NaiveBayesClassifier.train(labeled_examples)\n\n def score(self, graph):\n \"\"\"\n Converts the graph into a feature-based representation of\n each edge, and then assigns a score to each based on the\n confidence of the classifier in assigning it to the\n positive label. Scores are returned in a multidimensional list.\n\n :type graph: DependencyGraph\n :param graph: A dependency graph to score.\n :rtype: 3 dimensional list\n :return: Edge scores for the graph parameter.\n \"\"\"\n # Convert graph to feature representation\n edges = []\n for head_node in graph.nodes.values():\n for child_node in graph.nodes.values():\n edges.append(\n (\n dict(\n a=head_node['word'],\n b=head_node['tag'],\n c=child_node['word'],\n d=child_node['tag'],\n )\n )\n )\n\n # Score edges\n edge_scores = []\n row = []\n count = 0\n for pdist in self.classifier.prob_classify_many(edges):\n logger.debug('%.4f %.4f', pdist.prob('T'), pdist.prob('F'))\n # smoothing in case the probability = 0\n row.append([math.log(pdist.prob(\"T\")+0.00000000001)])\n count += 1\n if count == len(graph.nodes):\n edge_scores.append(row)\n row = []\n count = 0\n return edge_scores\n\n\n#################################################################\n# A Scorer for Demo Purposes\n#################################################################\n# A short class necessary to show parsing example from paper\nclass DemoScorer(DependencyScorerI):\n def train(self, graphs):\n print('Training...')\n\n def score(self, graph):\n # scores for Keith Hall 'K-best Spanning Tree Parsing' paper\n return [[[], [5], [1], [1]],\n [[], [], [11], [4]],\n [[], [10], [], [5]],\n [[], [8], [8], []]]\n\n#################################################################\n# Non-Projective Probabilistic Parsing\n#################################################################\n\n\nclass ProbabilisticNonprojectiveParser(object):\n \"\"\"A probabilistic non-projective dependency parser.\n\n Nonprojective dependencies allows for \"crossing branches\" in the parse tree\n which is necessary for representing particular linguistic phenomena, or even\n typical parses in some languages. This parser follows the MST parsing\n algorithm, outlined in McDonald(2005), which likens the search for the best\n non-projective parse to finding the maximum spanning tree in a weighted\n directed graph.\n\n >>> class Scorer(DependencyScorerI):\n ... def train(self, graphs):\n ... pass\n ...\n ... def score(self, graph):\n ... return [\n ... [[], [5], [1], [1]],\n ... [[], [], [11], [4]],\n ... [[], [10], [], [5]],\n ... [[], [8], [8], []],\n ... ]\n\n\n >>> npp = ProbabilisticNonprojectiveParser()\n >>> npp.train([], Scorer())\n\n >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])\n >>> len(list(parses))\n 1\n\n Rule based example\n ------------------\n\n >>> from nltk.grammar import DependencyGrammar\n\n >>> grammar = DependencyGrammar.fromstring('''\n ... 'taught' -> 'play' | 'man'\n ... 'man' -> 'the' | 'in'\n ... 'in' -> 'corner'\n ... 'corner' -> 'the'\n ... 'play' -> 'golf' | 'dachshund' | 'to'\n ... 'dachshund' -> 'his'\n ... ''')\n\n >>> ndp = NonprojectiveDependencyParser(grammar)\n >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])\n >>> len(list(parses))\n 4\n\n \"\"\"\n def __init__(self):\n \"\"\"\n Creates a new non-projective parser.\n \"\"\"\n logging.debug('initializing prob. nonprojective...')\n\n def train(self, graphs, dependency_scorer):\n \"\"\"\n Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,\n and establishes this as the parser's scorer. This is used to\n initialize the scores on a ``DependencyGraph`` during the parsing\n procedure.\n\n :type graphs: list(DependencyGraph)\n :param graphs: A list of dependency graphs to train the scorer.\n :type dependency_scorer: DependencyScorerI\n :param dependency_scorer: A scorer which implements the\n ``DependencyScorerI`` interface.\n \"\"\"\n self._scorer = dependency_scorer\n self._scorer.train(graphs)\n\n def initialize_edge_scores(self, graph):\n \"\"\"\n Assigns a score to every edge in the ``DependencyGraph`` graph.\n These scores are generated via the parser's scorer which\n was assigned during the training process.\n\n :type graph: DependencyGraph\n :param graph: A dependency graph to assign scores to.\n \"\"\"\n self.scores = self._scorer.score(graph)\n\n def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):\n \"\"\"\n Takes a list of nodes that have been identified to belong to a cycle,\n and collapses them into on larger node. The arcs of all nodes in\n the graph must be updated to account for this.\n\n :type new_node: Node.\n :param new_node: A Node (Dictionary) to collapse the cycle nodes into.\n :type cycle_path: A list of integers.\n :param cycle_path: A list of node addresses, each of which is in the cycle.\n :type g_graph, b_graph, c_graph: DependencyGraph\n :param g_graph, b_graph, c_graph: Graphs which need to be updated.\n \"\"\"\n logger.debug('Collapsing nodes...')\n # Collapse all cycle nodes into v_n+1 in G_Graph\n for cycle_node_index in cycle_path:\n g_graph.remove_by_address(cycle_node_index)\n g_graph.add_node(new_node)\n g_graph.redirect_arcs(cycle_path, new_node['address'])\n\n def update_edge_scores(self, new_node, cycle_path):\n \"\"\"\n Updates the edge scores to reflect a collapse operation into\n new_node.\n\n :type new_node: A Node.\n :param new_node: The node which cycle nodes are collapsed into.\n :type cycle_path: A list of integers.\n :param cycle_path: A list of node addresses that belong to the cycle.\n \"\"\"\n logger.debug('cycle %s', cycle_path)\n\n cycle_path = self.compute_original_indexes(cycle_path)\n\n logger.debug('old cycle %s', cycle_path)\n logger.debug('Prior to update: %s', self.scores)\n\n for i, row in enumerate(self.scores):\n for j, column in enumerate(self.scores[i]):\n logger.debug(self.scores[i][j])\n if (\n j in cycle_path\n and i not in cycle_path\n and self.scores[i][j]\n ):\n subtract_val = self.compute_max_subtract_score(j, cycle_path)\n\n logger.debug('%s - %s', self.scores[i][j], subtract_val)\n\n new_vals = []\n for cur_val in self.scores[i][j]:\n new_vals.append(cur_val - subtract_val)\n\n self.scores[i][j] = new_vals\n\n for i, row in enumerate(self.scores):\n for j, cell in enumerate(self.scores[i]):\n if i in cycle_path and j in cycle_path:\n self.scores[i][j] = []\n\n logger.debug('After update: %s', self.scores)\n\n def compute_original_indexes(self, new_indexes):\n \"\"\"\n As nodes are collapsed into others, they are replaced\n by the new node in the graph, but it's still necessary\n to keep track of what these original nodes were. This\n takes a list of node addresses and replaces any collapsed\n node addresses with their original addresses.\n\n :type new_indexes: A list of integers.\n :param new_indexes: A list of node addresses to check for\n subsumed nodes.\n \"\"\"\n swapped = True\n while swapped:\n originals = []\n swapped = False\n for new_index in new_indexes:\n if new_index in self.inner_nodes:\n for old_val in self.inner_nodes[new_index]:\n if old_val not in originals:\n originals.append(old_val)\n swapped = True\n else:\n originals.append(new_index)\n new_indexes = originals\n return new_indexes\n\n def compute_max_subtract_score(self, column_index, cycle_indexes):\n \"\"\"\n When updating scores the score of the highest-weighted incoming\n arc is subtracted upon collapse. This returns the correct\n amount to subtract from that edge.\n\n :type column_index: integer.\n :param column_index: A index representing the column of incoming arcs\n to a particular node being updated\n :type cycle_indexes: A list of integers.\n :param cycle_indexes: Only arcs from cycle nodes are considered. This\n is a list of such nodes addresses.\n \"\"\"\n max_score = -100000\n for row_index in cycle_indexes:\n for subtract_val in self.scores[row_index][column_index]:\n if subtract_val > max_score:\n max_score = subtract_val\n return max_score\n\n def best_incoming_arc(self, node_index):\n \"\"\"\n Returns the source of the best incoming arc to the\n node with address: node_index\n\n :type node_index: integer.\n :param node_index: The address of the 'destination' node,\n the node that is arced to.\n \"\"\"\n originals = self.compute_original_indexes([node_index])\n logger.debug('originals: %s', originals)\n\n max_arc = None\n max_score = None\n for row_index in range(len(self.scores)):\n for col_index in range(len(self.scores[row_index])):\n # print self.scores[row_index][col_index]\n if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):\n max_score = self.scores[row_index][col_index]\n max_arc = row_index\n logger.debug('%s, %s', row_index, col_index)\n\n logger.debug(max_score)\n\n for key in self.inner_nodes:\n replaced_nodes = self.inner_nodes[key]\n if max_arc in replaced_nodes:\n return key\n\n return max_arc\n\n def original_best_arc(self, node_index):\n originals = self.compute_original_indexes([node_index])\n max_arc = None\n max_score = None\n max_orig = None\n for row_index in range(len(self.scores)):\n for col_index in range(len(self.scores[row_index])):\n if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):\n max_score = self.scores[row_index][col_index]\n max_arc = row_index\n max_orig = col_index\n return [max_arc, max_orig]\n\n def parse(self, tokens, tags):\n \"\"\"\n Parses a list of tokens in accordance to the MST parsing algorithm\n for non-projective dependency parses. Assumes that the tokens to\n be parsed have already been tagged and those tags are provided. Various\n scoring methods can be used by implementing the ``DependencyScorerI``\n interface and passing it to the training algorithm.\n\n :type tokens: list(str)\n :param tokens: A list of words or punctuation to be parsed.\n :type tags: list(str)\n :param tags: A list of tags corresponding by index to the words in the tokens list.\n :return: An iterator of non-projective parses.\n :rtype: iter(DependencyGraph)\n \"\"\"\n self.inner_nodes = {}\n\n # Initialize g_graph\n g_graph = DependencyGraph()\n for index, token in enumerate(tokens):\n g_graph.nodes[index + 1].update(\n {\n 'word': token,\n 'tag': tags[index],\n 'rel': 'NTOP',\n 'address': index + 1,\n }\n )\n #print (g_graph.nodes)\n\n\n # Fully connect non-root nodes in g_graph\n g_graph.connect_graph()\n original_graph = DependencyGraph()\n for index, token in enumerate(tokens):\n original_graph.nodes[index + 1].update(\n {\n 'word': token,\n 'tag': tags[index],\n 'rel': 'NTOP',\n 'address': index+1,\n }\n )\n\n b_graph = DependencyGraph()\n c_graph = DependencyGraph()\n\n for index, token in enumerate(tokens):\n c_graph.nodes[index + 1].update(\n {\n 'word': token,\n 'tag': tags[index],\n 'rel': 'NTOP',\n 'address': index + 1,\n }\n )\n\n # Assign initial scores to g_graph edges\n self.initialize_edge_scores(g_graph)\n logger.debug(self.scores)\n # Initialize a list of unvisited vertices (by node address)\n unvisited_vertices = [\n vertex['address'] for vertex in c_graph.nodes.values()\n ]\n # Iterate over unvisited vertices\n nr_vertices = len(tokens)\n betas = {}\n while unvisited_vertices:\n # Mark current node as visited\n current_vertex = unvisited_vertices.pop(0)\n logger.debug('current_vertex: %s', current_vertex)\n # Get corresponding node n_i to vertex v_i\n current_node = g_graph.get_by_address(current_vertex)\n logger.debug('current_node: %s', current_node)\n # Get best in-edge node b for current node\n best_in_edge = self.best_incoming_arc(current_vertex)\n betas[current_vertex] = self.original_best_arc(current_vertex)\n logger.debug('best in arc: %s --> %s', best_in_edge, current_vertex)\n # b_graph = Union(b_graph, b)\n for new_vertex in [current_vertex, best_in_edge]:\n b_graph.nodes[new_vertex].update(\n {\n 'word': 'TEMP',\n 'rel': 'NTOP',\n 'address': new_vertex,\n }\n )\n b_graph.add_arc(best_in_edge, current_vertex)\n # Beta(current node) = b - stored for parse recovery\n # If b_graph contains a cycle, collapse it\n cycle_path = b_graph.contains_cycle()\n if cycle_path:\n # Create a new node v_n+1 with address = len(nodes) + 1\n new_node = {\n 'word': 'NONE',\n 'rel': 'NTOP',\n 'address': nr_vertices + 1,\n }\n # c_graph = Union(c_graph, v_n+1)\n c_graph.add_node(new_node)\n # Collapse all nodes in cycle C into v_n+1\n self.update_edge_scores(new_node, cycle_path)\n self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)\n for cycle_index in cycle_path:\n c_graph.add_arc(new_node['address'], cycle_index)\n # self.replaced_by[cycle_index] = new_node['address']\n\n self.inner_nodes[new_node['address']] = cycle_path\n\n # Add v_n+1 to list of unvisited vertices\n unvisited_vertices.insert(0, nr_vertices + 1)\n\n # increment # of nodes counter\n nr_vertices += 1\n\n # Remove cycle nodes from b_graph; B = B - cycle c\n for cycle_node_address in cycle_path:\n b_graph.remove_by_address(cycle_node_address)\n\n logger.debug('g_graph: %s', g_graph)\n logger.debug('b_graph: %s', b_graph)\n logger.debug('c_graph: %s', c_graph)\n logger.debug('Betas: %s', betas)\n logger.debug('replaced nodes %s', self.inner_nodes)\n\n # Recover parse tree\n logger.debug('Final scores: %s', self.scores)\n\n logger.debug('Recovering parse...')\n for i in range(len(tokens) + 1, nr_vertices + 1):\n betas[betas[i][1]] = betas[i]\n\n logger.debug('Betas: %s', betas)\n for node in original_graph.nodes.values():\n # TODO: It's dangerous to assume that deps it a dictionary\n # because it's a default dictionary. Ideally, here we should not\n # be concerned how dependencies are stored inside of a dependency\n # graph.\n node['deps'] = {}\n for i in range(1, len(tokens) + 1):\n original_graph.add_arc(betas[i][0], betas[i][1])\n\n logger.debug('Done.')\n yield original_graph\n\n#################################################################\n# Rule-based Non-Projective Parser\n#################################################################\n\n\nclass NonprojectiveDependencyParser(object):\n \"\"\"\n A non-projective, rule-based, dependency parser. This parser\n will return the set of all possible non-projective parses based on\n the word-to-word relations defined in the parser's dependency\n grammar, and will allow the branches of the parse tree to cross\n in order to capture a variety of linguistic phenomena that a\n projective parser will not.\n \"\"\"\n\n def __init__(self, dependency_grammar):\n \"\"\"\n Creates a new ``NonprojectiveDependencyParser``.\n\n :param dependency_grammar: a grammar of word-to-word relations.\n :type dependency_grammar: DependencyGrammar\n \"\"\"\n self._grammar = dependency_grammar\n\n def parse(self, tokens):\n \"\"\"\n Parses the input tokens with respect to the parser's grammar. Parsing\n is accomplished by representing the search-space of possible parses as\n a fully-connected directed graph. Arcs that would lead to ungrammatical\n parses are removed and a lattice is constructed of length n, where n is\n the number of input tokens, to represent all possible grammatical\n traversals. All possible paths through the lattice are then enumerated\n to produce the set of non-projective parses.\n\n param tokens: A list of tokens to parse.\n type tokens: list(str)\n return: An iterator of non-projective parses.\n rtype: iter(DependencyGraph)\n \"\"\"\n # Create graph representation of tokens\n self._graph = DependencyGraph()\n\n for index, token in enumerate(tokens):\n self._graph.nodes[index] = {\n 'word': token,\n 'deps': [],\n 'rel': 'NTOP',\n 'address': index,\n }\n\n for head_node in self._graph.nodes.values():\n deps = []\n for dep_node in self._graph.nodes.values() :\n if (\n self._grammar.contains(head_node['word'], dep_node['word'])\n and head_node['word'] != dep_node['word']\n ):\n deps.append(dep_node['address'])\n head_node['deps'] = deps\n\n # Create lattice of possible heads\n roots = []\n possible_heads = []\n for i, word in enumerate(tokens):\n heads = []\n for j, head in enumerate(tokens):\n if (i != j) and self._grammar.contains(head, word):\n heads.append(j)\n if len(heads) == 0:\n roots.append(i)\n possible_heads.append(heads)\n\n # Set roots to attempt\n if len(roots) < 2:\n if len(roots) == 0:\n for i in range(len(tokens)):\n roots.append(i)\n\n # Traverse lattice\n analyses = []\n for root in roots:\n stack = []\n analysis = [[] for i in range(len(possible_heads))]\n i = 0\n forward = True\n while i >= 0:\n if forward:\n if len(possible_heads[i]) == 1:\n analysis[i] = possible_heads[i][0]\n elif len(possible_heads[i]) == 0:\n analysis[i] = -1\n else:\n head = possible_heads[i].pop()\n analysis[i] = head\n stack.append([i, head])\n if not forward:\n index_on_stack = False\n for stack_item in stack:\n if stack_item[0] == i:\n index_on_stack = True\n orig_length = len(possible_heads[i])\n\n if index_on_stack and orig_length == 0:\n for j in xrange(len(stack) - 1, -1, -1):\n stack_item = stack[j]\n if stack_item[0] == i:\n possible_heads[i].append(stack.pop(j)[1])\n\n elif index_on_stack and orig_length > 0:\n head = possible_heads[i].pop()\n analysis[i] = head\n stack.append([i, head])\n forward = True\n\n if i + 1 == len(possible_heads):\n analyses.append(analysis[:])\n forward = False\n if forward:\n i += 1\n else:\n i -= 1\n\n # Filter parses\n # ensure 1 root, every thing has 1 head\n for analysis in analyses:\n if analysis.count(-1) > 1:\n # there are several root elements!\n continue\n\n graph = DependencyGraph()\n graph.root = graph.nodes[analysis.index(-1) + 1]\n\n for address, (token, head_index) in enumerate(zip(tokens, analysis), start=1):\n head_address = head_index + 1\n\n node = graph.nodes[address]\n node.update(\n {\n 'word': token,\n 'address': address,\n }\n )\n\n if head_address == 0:\n rel = 'ROOT'\n else:\n rel = ''\n graph.nodes[head_index + 1]['deps'][rel].append(address)\n\n # TODO: check for cycles\n yield graph\n\n\n#################################################################\n# Demos\n#################################################################\n\ndef demo():\n # hall_demo()\n nonprojective_conll_parse_demo()\n rule_based_demo()\n\n\ndef hall_demo():\n npp = ProbabilisticNonprojectiveParser()\n npp.train([], DemoScorer())\n for parse_graph in npp.parse(['v1', 'v2', 'v3'], [None, None, None]):\n print(parse_graph)\n\n\ndef nonprojective_conll_parse_demo():\n from nltk.parse.dependencygraph import conll_data2\n\n graphs = [\n DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry\n ]\n npp = ProbabilisticNonprojectiveParser()\n npp.train(graphs, NaiveBayesDependencyScorer())\n for parse_graph in npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']):\n print(parse_graph)\n\n\ndef rule_based_demo():\n from nltk.grammar import DependencyGrammar\n\n grammar = DependencyGrammar.fromstring(\"\"\"\n 'taught' -> 'play' | 'man'\n 'man' -> 'the' | 'in'\n 'in' -> 'corner'\n 'corner' -> 'the'\n 'play' -> 'golf' | 'dachshund' | 'to'\n 'dachshund' -> 'his'\n \"\"\")\n print(grammar)\n ndp = NonprojectiveDependencyParser(grammar)\n graphs = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])\n print('Graphs:')\n for graph in graphs:\n print(graph)\n\nif __name__ == '__main__':\n demo()\n"},"license":{"kind":"string","value":"gpl-2.0"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":2036,"numItemsPerPage":100,"numTotalItems":203850,"offset":203600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjEzNDQyMywic3ViIjoiL2RhdGFzZXRzL3Rob213b2xmL2dpdGh1Yi1kYXRhc2V0IiwiZXhwIjoxNzU2MTM4MDIzLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.AZeb6gAp_VS6iv53NA1X7oUUTIXe1EyFOTj_DTSu0c0zuEb41FujzcffUi-oV4D06RYjLDtTwE3SS5B9PSwzAg","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
    repo_name
    stringlengths
    5
    100
    path
    stringlengths
    4
    294
    copies
    stringclasses
    990 values
    size
    stringlengths
    4
    7
    content
    stringlengths
    666
    1M
    license
    stringclasses
    15 values
    tsdmgz/ansible
    test/runner/lib/changes.py
    66
    5904
    """Detect changes in Ansible code.""" from __future__ import absolute_import, print_function import re import os from lib.util import ( ApplicationError, SubprocessError, MissingEnvironmentVariable, CommonConfig, display, ) from lib.http import ( HttpClient, urlencode, ) from lib.git import ( Git, ) class InvalidBranch(ApplicationError): """Exception for invalid branch specification.""" def __init__(self, branch, reason): """ :type branch: str :type reason: str """ message = 'Invalid branch: %s\n%s' % (branch, reason) super(InvalidBranch, self).__init__(message) self.branch = branch class ChangeDetectionNotSupported(ApplicationError): """Exception for cases where change detection is not supported.""" pass class ShippableChanges(object): """Change information for Shippable build.""" def __init__(self, args, git): """ :type args: CommonConfig :type git: Git """ self.args = args try: self.branch = os.environ['BRANCH'] self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true' self.is_tag = os.environ['IS_GIT_TAG'] == 'true' self.commit = os.environ['COMMIT'] self.project_id = os.environ['PROJECT_ID'] except KeyError as ex: raise MissingEnvironmentVariable(name=ex.args[0]) if self.is_tag: raise ChangeDetectionNotSupported('Change detection is not supported for tags.') if self.is_pr: self.paths = sorted(git.get_diff_names(['origin/%s' % self.branch, '--'])) self.diff = git.get_diff(['origin/%s' % self.branch, '--']) else: merge_runs = self.get_merge_runs(self.project_id, self.branch) last_successful_commit = self.get_last_successful_commit(git, merge_runs) if last_successful_commit: self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit])) self.diff = git.get_diff([last_successful_commit, self.commit]) else: # first run for branch self.paths = None # act as though change detection not enabled, do not filter targets self.diff = [] def get_merge_runs(self, project_id, branch): """ :type project_id: str :type branch: str :rtype: list[dict] """ params = dict( isPullRequest='false', projectIds=project_id, branch=branch, ) client = HttpClient(self.args, always=True) response = client.get('https://api.shippable.com/runs?%s' % urlencode(params)) return response.json() @staticmethod def get_last_successful_commit(git, merge_runs): """ :type git: Git :type merge_runs: dict | list[dict] :rtype: str """ if 'id' in merge_runs and merge_runs['id'] == 4004: display.warning('Unable to find project. Cannot determine changes. All tests will be executed.') return None merge_runs = sorted(merge_runs, key=lambda r: r['createdAt']) known_commits = set() last_successful_commit = None for merge_run in merge_runs: commit_sha = merge_run['commitSha'] if commit_sha not in known_commits: known_commits.add(commit_sha) if merge_run['statusCode'] == 30: if git.is_valid_ref(commit_sha): last_successful_commit = commit_sha if last_successful_commit is None: display.warning('No successful commit found. All tests will be executed.') return last_successful_commit class LocalChanges(object): """Change information for local work.""" def __init__(self, args, git): """ :type args: CommonConfig :type git: Git """ self.args = args self.current_branch = git.get_branch() if self.is_official_branch(self.current_branch): raise InvalidBranch(branch=self.current_branch, reason='Current branch is not a feature branch.') self.fork_branch = None self.fork_point = None self.local_branches = sorted(git.get_branches()) self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)]) for self.fork_branch in self.official_branches: try: self.fork_point = git.get_branch_fork_point(self.fork_branch) break except SubprocessError: pass if self.fork_point is None: raise ApplicationError('Unable to auto-detect fork branch and fork point.') # tracked files (including unchanged) self.tracked = sorted(git.get_file_names(['--cached'])) # untracked files (except ignored) self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard'])) # tracked changes (including deletions) committed since the branch was forked self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD'])) # tracked changes (including deletions) which are staged self.staged = sorted(git.get_diff_names(['--cached'])) # tracked changes (including deletions) which are not staged self.unstaged = sorted(git.get_diff_names([])) # diff of all tracked files from fork point to working copy self.diff = git.get_diff([self.fork_point]) @staticmethod def is_official_branch(name): """ :type name: str :rtype: bool """ if name == 'devel': return True if re.match(r'^stable-[0-9]+\.[0-9]+$', name): return True return False
    gpl-3.0
    richrd/bx
    irc.py
    1
    29634
    # -*- coding: utf-8 -*- """irc.py: IRC Client Implementation Handles all neccessary parts of the IRC protocol for client connections. """ import os import sys import time import string import select import socket # Symbian S60 specific compatibility s60 = False if sys.platform == "symbian_s60": s60 = True sys.path.append("e:\\python") sys.path.append("c:\\python") sys.path.append("c:\\DATA\\python") from const import * from helpers import * class IRCClient: def __init__(self, host="", port=6667): """Independet irc client that will connect and sit in irc doing nothing. It can be subclassed to do whatever, with no need to worry about the protocol. """ # Server details self.host = host self.port = port # User info defaults self.def_nick = "bx" self.def_realname = "bx" self.def_ident = "bx" # User info self.realname = None self.ident = None self.irc_debugging = True # wether to show debug output # flag indicating wether the server throttled a connection attempt self.irc_throttled = 0 # wait 8 minutes if server is throttling connects self.throttle_wait = 8*60 # minimum time in seconds between sending lines to server self.send_throttling = 1 # encoding to use when sending data self.encoding = "utf-8" # encodings used to try to decode incomming data self.decodings = ["utf-8", "latin-1", "cp1251", "cp1252", "win1251", "iso-8859-1"] # FIXME tweak code to allways send at most 512 characters self.max_line_len = 400 # Maximum amount of characters sent to the server at once self.InitializeClient() def InitializeClient(self): """Reset variables before connecting.""" self.DebugLog("Initializing...") self.nick = None self.irc_connected = False self.irc_running = False self.irc_ready = False #self.irc_throttled = 0 self.sock = None self.sock_timeout = 20 # 20 sec timeout for socket self.select_timeout = .02 # Time of last successfull connection self.connection_time = None # Received data: # Received and decoded data is appended to raw_buffer. # It is split into lines which are appended to recv_buffer. # Lines in recv_buffer are processed and removed from the buffer periodically. self.read_chunk_size = 1024 self.raw_buffer = "" self.recv_buffer = [] self.last_receive = None self.last_ping_pong = None # Timestamp of the last ping from server self.pinged_server = False self.ping_after = 120 # Seconds of inactivity after which the server is pinged self.max_inactivity = 180 # Total time to wait before reconnecting self.send_buffer = [] # lines queued to be sent self.send_time = None # time of previous send def DebugLog(self, *args): args = map(arg_to_str, args) line = " ".join(args) self.PrintToLog(line) # Move this to DebugLog def PrintToLog(self, line): if not self.irc_debugging: return False log = line if not self.OnClientLog(log): print log def SetDebugging(self, b): self.irc_debugging = b def SetHost(self, host): self.host = host def SetPort(self, port): self.port = port def SetNick(self, nick): # Used to make sure current_nick is up to date self.nick = nick self.current_nick = nick def SetSendThrottling(self, seconds): # Set minimum time to wait between sending lines to the server self.send_throttling = seconds def SetEncoding(self, enc): # Set encoding to use when sending data self.encoding = enc def SetDecodings(self, decodings): # Set list of encodings used to try to decode incomming data self.decodings = decodings # # Events # def OnClientLog(self, line): # Used to intercept logs if returns True return False def OnConnectThrottled(self, reason=""): """Called when the server refuses to connect for some reason.""" self.DebugLog("OnConnectThrottled(", reason, ")") self.irc_throttled = 1 def OnConnected(self): self.irc_running = 1 self.DoIntroduce(self.nick) def OnDisconnected(self): pass def OnLoop(self): """Run each time the bot loop is run. Only used if overriden.""" pass def OnReceive(self, data): """Called when data is received from the server.""" pass def OnNickInUse(self, nick, reason): self.DebugLog("OnNickInUse(", nick, reason, ")") self.nick = self.nick + "_" self.current_nick = self.nick self.ChangeNick(self.nick) def OnPing(self, data=False): data = data or "" self.last_ping_pong = time.time() self.SendLine("PONG %s" % data) def OnPong(self, data=False): self.last_ping_pong = time.time() def OnWelcomeInfo(self, info): #self.DebugLog("OnWelcomeInfo(", info, ")") pass def OnSupportInfo(self, info): #self.DebugLog("OnSupportInfo(", info, ")") pass def OnServerInfo(self, info): #self.DebugLog("OnServerInfo(", info, ")") pass def OnProcessConn(self, message): self.DebugLog("Waiting: ", message) def OnYourId(self, id, message = ""): pass def OnMotdLine(self, line): pass def OnReady(self): self.DebugLog("OnReady()") self.irc_throttled = 0 def OnUserHostname(self, nick, hostname): pass def OnWhoisHostname(self, nick, hostname): #self.DebugLog("OnWhoisHostname(", nick, ",", hostname, ")") pass def OnUserNickChange(self, nick, new_nick): #self.DebugLog("OnUserNickChange(", nick, new_nick, ")") pass def OnUserQuit(self, nick, reason): self.DebugLog("OnUserQuit(", nick, reason, ")") def OnPrivmsg(self, by, to, msg): self.DebugLog("OnPrivmsg(", by, "->", to, "::", msg, ")") def OnNotice(self, by, to, msg): self.DebugLog("OnNotice(", by, "->", to, "::", msg, ")") def OnMyModesChanged(self, modes): self.DebugLog("OnMyModesChanged(", modes, ")") def OnIJoined(self, chan): self.DebugLog("OnIJoined(", chan, ")") def OnChannelInviteOnly(self, chan, reason): """ Called when the server indicates which users are present on a channel. """ self.DebugLog("OnChannelInviteOnly(", chan, ",", reason, ")") def OnChannelNeedsPassword(chan, reason): self.DebugLog("OnChannelNeedsPassword(", chan, ",", reason, ")") def OnChannelHasUsers(self, chan, users): """ Called when the server indicates which users are present on a channel. """ self.DebugLog("OnChannelHasUsers(", chan, ",", users, ")") def OnChannelJoin(self, chan, nick): self.DebugLog("OnChannelJoin(", chan, nick, ")") def OnChannelPart(self, chan, nick, reason): self.DebugLog("OnChannelPart(", chan, nick, reason, ")") def OnChannelKick(self, chan, who, nick, reason): self.DebugLog("OnChannelKick(", chan, ",", who, nick, reason, ")") def OnChannelCreated(self, chan, value): self.DebugLog("OnChannelCreated(", chan, value, ")") def OnChannelModesAre(self, chan, modes): self.DebugLog("OnChannelModesAre(", chan, modes, ")") def OnChannelModesChanged(self, chan, modes, nick): self.DebugLog("OnChannelModesChanged(", chan, modes, ")") def OnChannelUserModesChanged(self, chan, modes, by): self.DebugLog("OnChannelUserModesChanged(", chan, modes, by, ")") def IsConnected(self): return self.irc_connected def OnInterrupt(self): """Called when the mainloop is interrupted with a KeyboardInterrupt. Return True to continue execution.""" return False # # Actions # def WrapLine(self, start, contents): ml = self.max_line_len - 2 if len(start+contents) <= ml: return [start+contents] chunk = contents lines = [] while 1: line = start + chunk print line if len(line) > ml: chunk = line[ml:] line = line[:ml] lines.append(line) else: lines.append(line) break return lines def PingServer(self): self.SendLine("PING " + self.current_nick) self.pinged_server = time.time() def ChangeNick(self, nick=None): self.DebugLog("Changing nick to:", nick) if nick == None: nick = self.def_nick self.SetNick(nick) self.SendLine("NICK %s" % nick) def DoIntroduce(self, nick=None, ident=None, realname=None): # Send NICK and USER messages self.DebugLog("Introducing as:", "nick:", nick, ", ident:", ident, ", realname:", realname) if nick == None: nick = self.def_nick if ident == None: ident = self.def_ident if realname == None: realname = self.def_realname self.ChangeNick(nick) self.SendLine("USER %s 8 * :%s" % (ident, realname)) def DoWhois(self, nick): self.SendLine("WHOIS %s %s" % (nick, nick)) def JoinChannels(self, chans, keys=[]): if type(chans) in [type(u""), type("")]: chans = [chans] chanlist = ",".join(chans) keylist = ",".join(keys) self.SendLine("JOIN %s %s" % (chanlist, keylist)) def Join(self, chans, keys=[]): self.JoinChannels(chans, keys) def PartChannels(self, chans): if type(chans) in [type(u""), type("")]: chans = [chans] chanlist = ",".join(chans) self.SendLine("PART %s" % chanlist) def Kick(self, chan, nick, message=""): self.SendLine("KICK %s %s %s" % (chan, nick, message)) def Privmsg(self, dest, msg): if not msg: return False lines = self.WrapLine(u"PRIVMSG " + dest + u" :",msg) self.SendLines(lines) def Notice(self, dest, msg): if not msg: return False self.SendLine(u"NOTICE " + dest + u" :" + msg) def SetChannelUserModes(self, chan, nickmodes, operation=True): if operation: modes = "+" else: modes = "-" nicks = [] for item in nickmodes: nicks.append(item[0]) modes += item[1] self.SendLine(u"MODE " + chan + " " + modes + " " + (" ".join(nicks))) def SetChannelTopic(self, chan, topic): self.SendLine("TOPIC " + chan + " :" + topic) def SetChannelModes(self, chan, modes): self.SendLine("MODE " + chan + " " + modes) # # Protocol Implementation & Parsing # # Returns the last (multi-word) parameter in the line, or False if not present def GetTextData(self, line): line = line[1:] index = line.find(":") if index != -1: return line[index+1:] else: return False def GetCleanNick(self, nick): if self.GetModeChr(nick) != IRC_MODE_CHR_NONE: return nick[1:] return nick def GetModeChr(self, s): if s[:1] in [IRC_MODE_CHR_VOICE, IRC_MODE_CHR_OP]: return s[:1] return IRC_MODE_CHR_NONE def IsChannelName(self, name): if name[0] in ["#", "&", "!"]: return True return False def GetMode(self, s): modechr = self.GetModeChr(s) if modechr == IRC_MODE_CHR_NONE: return None elif modechr == IRC_MODE_CHR_OP: return IRC_MODE_OP elif modechr == IRC_MODE_CHR_VOICE: return IRC_MODE_VOICE def ParseNickHost(self, line): part = line.split(" ")[0][1:] ind=part.find("!") if ind!=-1: nick = part[:ind] hostname = part[ind+1:] else: nick = "" hostname = part[1:] if nick != "": self.OnUserHostname(nick, hostname) return nick, hostname def LineIsCommand(self, line): cmds = ["ping", "pong", "join", "part", "kick", "topic", "quit", "privmsg", "nick", "mode", "notice"] parts = string.split(line) if len(parts) < 2: return False if parts[1].lower() in cmds: return True return False def LineIsNumeric(self, line): parts = string.split(line) if len(parts) < 2: return False try: numeric = int(parts[1]) return True except: return False # Warning: Heavy shit (There be Dragons!) # ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== # Tries to parse a line received from the server def ParseLine(self, line): self.DebugLog("RAWRCV:", line) parts = string.split(line) txt_data = self.GetTextData(line) first_word = parts[0].lower() if first_word in ["ping", "error", "notice"]: if first_word == "ping": self.OnPing(" ".join(parts[1:])) elif first_word == "pong": pass elif first_word == "error": #ERROR :Closing Link: botti by portlane.se.quakenet.org (G-lined) #ERROR :Your host is trying to (re)connect too fast -- throttled text = line.lower() if text.find(":closing link:") != -1: self.OnConnectThrottled(txt_data) elif text.find("throttled") != -1: self.OnConnectThrottled(txt_data) elif text.find("g-lined") != -1: self.OnConnectThrottled(reason) else: return False elif self.LineIsCommand(line): command = parts[1].lower() nick, hostname = self.ParseNickHost(line) if not command in ["pong", "ping"]: self.DebugLog("RAW:", line) if command == "pong": self.OnPing(txt_data) elif command == "join": if txt_data != False: target = txt_data else: target = parts[2] nick, hostname = self.ParseNickHost(line) if nick == self.current_nick: self.OnIJoined(target) else: self.OnChannelJoin(target, nick) elif command == "part": if txt_data != False: target = txt_data else: target = parts[2] nick,hostname = self.ParseNickHost(line) self.OnChannelPart(target, nick, txt_data) elif command == "topic": target = parts[2] nick,hostname = self.ParseNickHost(line) topic = self.GetTextData(line) self.OnChannelTopicChanged(target, nick, topic) elif command == "privmsg": target = parts[2] nick,hostname = self.ParseNickHost(line) self.OnPrivmsg(nick, target, txt_data) elif command == "notice": target = parts[2] nick, hostname = self.ParseNickHost(line) if nick: self.OnNotice(nick, target, txt_data) elif command == "quit": reason = self.GetTextData(line) nick, hostname = self.ParseNickHost(line) self.OnUserQuit(nick, reason) elif command == "kick": nick, hostname = self.ParseNickHost(line) target = parts[2] who = parts[3] reason = self.GetTextData(line) self.OnChannelKick(target, who, nick, reason) elif command == "nick": newnick = self.GetTextData(line) nick, host = self.ParseNickHost(line) self.OnUserNickChange(nick, newnick) elif command == "mode": #self.DebugLog("\t\tParseLine(", line, ")") target = parts[2] if target == self.current_nick: modes = parts[3] self.OnMyModesChanged(modes) else: types = ["-", "+"] modes = parts[3] if len(parts) > 4: # Channel user modes are being set nicks = parts[4:] i = 0 operation = True users = [] for mode in modes: char = modes[i] if char in types: if char == "-": operation = False elif char == "+": operation = True modes = modes[1:] continue modechr = modes[i] user = (nicks[i], modechr, operation) users.append(user) i += 1 self.OnChannelUserModesChanged(target, users, nick) else: modes = parts[3] self.OnChannelModesChanged(target, modes, nick) else: return False elif self.LineIsNumeric(line): numeric = int(parts[1]) if numeric in [1, 2, 3]: # Welcome info self.OnWelcomeInfo(txt_data) elif numeric == 4: self.OnWelcomeInfo(" ".join(parts[3:])) elif numeric == 5: self.OnSupportInfo(" ".join(parts[3:])) elif numeric == 20: self.OnProcessConn(txt_data) elif numeric == 42: self.OnYourId(parts[3], txt_data) elif numeric in [251, 252, 253, 254, 255]: self.OnServerInfo(" ".join(parts[3:])) elif numeric in [265, 266]: # Local and global users #:no.address 265 bx 4 11 :Current local users 4, max 11 #:no.address 266 bx 4 11 :Current global users 4, max 11 pass # Implement if needed elif numeric in [311]: # Parse whois responses nick = parts[3] if numeric == 311: hostname = parts[4] + "@" + parts[5] self.OnWhoisHostname(nick,hostname) elif numeric in [375, 372]: # Start of MOTD, First line of MOTD self.OnMotdLine(txt_data) elif numeric in [376, 422]: # End of MOTD, No MOTD self.OnReady() elif numeric in [324, 329, 332, 333, 353, 366, 473]: # Channel numerics # Channel specific stuff chan = parts[3] if numeric in [324, 329]: value = parts[4] if numeric == 329: # channel creation time self.OnChannelCreated(chan, value) elif numeric == 324: # channel modes modes = list(value.replace("+", "")) self.OnChannelModesAre(chan, modes) elif numeric == 332: # channel topic topic = self.GetTextData(line) self.OnChannelTopicIs(chan, topic) elif numeric == 333: # channel topic metadata nick = parts[4] utime = int(parts[5]) self.OnChannelTopicMeta(chan, nick, utime) elif numeric == 353: # Reply to NAMES chan = parts[4] nicks = self.GetTextData(line).split(" ") users = [] for raw_nick in nicks: nick = self.GetCleanNick(raw_nick) mode = self.GetMode(raw_nick) users.append( (nick, mode) ) self.OnChannelHasUsers(chan, users) elif numeric == 366: # End of NAMES pass elif numeric == 473: # Channel is invite only self.OnChannelInviteOnly(chan, txt_data) elif numeric == 475: self.OnChannelNeedsPassword(chan, txt_data) elif numeric == 433: # Nick in use nick = parts[3] self.OnNickInUse(nick,txt_data) elif numeric == 465: self.OnConnectThrottled(txt_data) else: return False else: return False # ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== ==== # # Receive # def DataEncode(self, data): return data.encode(self.encoding) def DataDecode(self, data): for enc in self.decodings: try: return data.decode(enc) except Exception, err: continue return safe_escape(data) def ReceiveToBuffer(self): try: received = self.sock.recv(self.read_chunk_size) except socket.timeout, e: err = e.args[0] self.DebugLog("ReceiveToBuffer()", "timed out") if err == "timed out": return True except socket.error, e: self.DebugLog("ReceiveToBuffer()", "failed sock.recv", e) return False if len(received) == 0: self.DebugLog("ReceiveToBuffer()", "received empty data") return False if received: data = self.DataDecode(received) self.raw_buffer = self.raw_buffer + data lines = string.split(self.raw_buffer, "\n") self.raw_buffer = lines.pop() for line in lines: line = string.rstrip(line) self.recv_buffer.append(line) self.OnReceive(line) self.last_receive = time.time() self.pinged_server = False return True self.DebugLog("ReceiveToBuffer()","received nothing, sock dead?!") return False def ProcessRecvBuffer(self): if self.recv_buffer == []: return False line = self.recv_buffer.pop(0) self.ParseLine(line) # # Send # def Send(self, data): data = self.DataEncode(data) self.send_buffer.append(data) def LoopingSend(self, data): left = data while left != "": try: sent = self.sock.send(left) if len(left) == sent: return True left = left[sent:] except: return False def ProcessSend(self, data): """This is where data to be sent finally ends up.""" if self.irc_running & self.irc_connected: try: returned = self.LoopingSend(data) return True except Exception,e: self.DebugLog("ProcessSend", "fail:",data,e) return False else: self.DebugLog("ProcessSend", "not running & connected -> can't send!") return False def SendLines(self, lines): for line in lines: self.SendLine(line) def SendLine(self, line): # Send one line to the server via the send buffer. FIXME CANT BE MORE THAN 512 CHARS LONG if len(line) > 510: self.DebugLog("SendLine(): line too long!") self.Send(line + "\r\n") def SendRaw(self, line): # Send one line to the self.SendLine(line) def ProcessSendBuffer(self): # send line from buffer if not throttled if self.send_buffer != []: if self.send_time == None or time.time() - self.send_time > self.send_throttling: line = self.send_buffer.pop(0) if not self.ProcessSend(line): return False self.send_time = time.time() else: return None def DropSendBuffer(self): self.send_buffer = [] # # Start & Maintain Connection # def Connect(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if "settimeout" in dir(self.sock): self.sock.settimeout(self.sock_timeout) # This needs to be stoppable self.DebugLog("Connecting to " + self.host + ":" + str(self.port) + "...") try: self.sock.connect((self.host, self.port)) except socket.error, err: self.DebugLog("Error connecting:", str(socket.error), str(err)) return False self.irc_connected = 1 return True def DelayReconnect(self): self.DebugLog("DelayReconnect()") time.sleep(self.throttle_wait) def Disconnect(self): self.irc_connected = 0 self.irc_running = 0 if self.sock != None: self.sock.close() self.InitializeClient() self.OnDisconnected() def KeepAlive(self): if self.last_receive != None: elapsed = time.time() - self.last_receive else: return # Ping the server after inactivity and wait for reply. # If no timely response is received, cut the connection and reconnect. if elapsed > self.max_inactivity: self.DebugLog("KeepAlive()", "server not responding to ping, reconnecting.") self.Disconnect() elif (not self.pinged_server) and elapsed > self.ping_after: self.PingServer() def Process(self): try: self.ProcessRecvBuffer() self.KeepAlive() except Exception, e: try: import traceback print traceback.format_exc() except: pass print sys.exc_info()[0] self.DebugLog("Process()", e) def IRCMaintain(self): try: self.OnLoop() try: sockl = [self.sock] if not s60: readable, writable, errored = select.select(sockl, sockl, sockl, self.select_timeout) else: readable, writable, errored = select.select(sockl, [], [], self.select_timeout) time.sleep(self.select_timeout) except socket.error, err: self.DebugLog("IRCMainloop()","select error:",socket.error,err) return False if self.sock in readable: ok = self.ReceiveToBuffer() # Try to receive and break on failure if not ok: # If no data received, sock is dead self.Disconnect() return False return True elif self.sock in errored: self.DebugLog("IRCMainloop()","socket errored") return False elif self.sock in writable or s60: ok = self.ProcessSendBuffer() # Try to send and break on failure if ok == False: return False else: self.DebugLog("IRCMainloop()","socket inaccessible") self.Process() return True except KeyboardInterrupt: return self.OnInterrupt() # if self.OnInterrupt() == False: # return False # return True def IRCMainloop(self): while self.irc_running: result = self.IRCMaintain() if not result: return False # Connect and run irc client def StartClient(self, block=True): self.DebugLog("Starting client...") connected = self.Connect() if connected: self.OnConnected() self.connection_time = time.time() self.irc_running = 1 if block: status = self.IRCMainloop() self.InitializeClient() return status else: return True else: return False # Cut connection and stop def StopClient(self): self.DebugLog("StopClient()") self.irc_running = 0 self.Disconnect() self.InitializeClient()
    apache-2.0
    evonove/mkm-sdk
    tests/tests_unit/test_exception.py
    1
    1951
    from collections import namedtuple from mkmsdk.exceptions import ConnectionError, MissingParam, MissingEnvVar, SerializationException def test_connection_error_with_no_args(): """ Test error string is formatted correctly when exception is initialized without arguments """ error = ConnectionError() assert str(error) == "Request failed" def test_connection_error_with_response(): """ Test error string is formatted correctly when exception is initialized with response """ Response = namedtuple("Response", "status_code reason content") response = Response(status_code="404", reason="Not found", content="Here some content") error = ConnectionError(response) assert str(error) == "Request failed\nStatus code: 404\nResponse message: Not found\nHere some content" def test_connection_error_with_empty_response(): """ Test error string is formatted correctly when exception is initialized with empty response """ error = ConnectionError({}) assert str(error) == "Request failed" def test_connection_error_with_message(): """ Test error string is formatted correctly when exception is initialized with a message """ error = ConnectionError(message="This is a message") assert str(error) == "Request failed\nThis is a message" def test_missing_param(): """Test error string is formatted correctly""" error = MissingParam("payment_id") assert str(error) == "Missing payment_id parameter" def test_missing_config(): """Test error string is formatted correctly""" error = MissingEnvVar("client_id") assert str(error) == "Missing client_id environment variable" def test_serialization_exception(): """Test error string is formatted correctly""" error = SerializationException("Something wrong with serialization") assert str(error) == "Serialization exception. Something wrong with serialization"
    mit
    nirvn/QGIS
    python/plugins/processing/script/AddScriptFromFileAction.py
    6
    3506
    # -*- coding: utf-8 -*- """ *************************************************************************** EditScriptAction.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'April 2014' __copyright__ = '(C) 201, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtWidgets import QFileDialog, QMessageBox from qgis.PyQt.QtCore import QFileInfo from qgis.core import QgsApplication, QgsSettings from processing.script.ScriptAlgorithm import ScriptAlgorithm from processing.gui.ToolboxAction import ToolboxAction from processing.script.WrongScriptException import WrongScriptException from processing.script.ScriptUtils import ScriptUtils pluginPath = os.path.split(os.path.dirname(__file__))[0] class AddScriptFromFileAction(ToolboxAction): def __init__(self): self.name, self.i18n_name = self.trAction('Add script from file') self.group, self.i18n_group = self.trAction('Tools') def getIcon(self): return QgsApplication.getThemeIcon("/processingScript.svg") def execute(self): settings = QgsSettings() lastDir = settings.value('Processing/lastScriptsDir', '') filenames, selected_filter = QFileDialog.getOpenFileNames(self.toolbox, self.tr('Script files', 'AddScriptFromFileAction'), lastDir, self.tr('Script files (*.py *.PY)', 'AddScriptFromFileAction')) if filenames: validAlgs = 0 wrongAlgs = [] for filename in filenames: try: settings.setValue('Processing/lastScriptsDir', QFileInfo(filename).absoluteDir().absolutePath()) script = ScriptAlgorithm(filename) destFilename = os.path.join(ScriptUtils.scriptsFolders()[0], os.path.basename(filename)) with open(destFilename, 'w') as f: f.write(script.script) validAlgs += 1 except WrongScriptException: wrongAlgs.append(os.path.basename(filename)) if validAlgs: QgsApplication.processingRegistry().providerById('script').refreshAlgorithms() if wrongAlgs: QMessageBox.warning(self.toolbox, self.tr('Error reading scripts', 'AddScriptFromFileAction'), self.tr('The following files do not contain a valid script:\n-', 'AddScriptFromFileAction') + "\n-".join(wrongAlgs))
    gpl-2.0
    wakatime/sublime-wakatime
    packages/wakatime/packages/urllib3/util/request.py
    205
    3705
    from __future__ import absolute_import from base64 import b64encode from ..packages.six import b, integer_types from ..exceptions import UnrewindableBodyError ACCEPT_ENCODING = 'gzip,deflate' _FAILEDTELL = object() def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None, proxy_basic_auth=None, disable_cache=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. :param proxy_basic_auth: Colon-separated username:password string for 'proxy-authorization: basic ...' auth header. :param disable_cache: If ``True``, adds 'cache-control: no-cache' header. Example:: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = ACCEPT_ENCODING headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ b64encode(b(basic_auth)).decode('utf-8') if proxy_basic_auth: headers['proxy-authorization'] = 'Basic ' + \ b64encode(b(proxy_basic_auth)).decode('utf-8') if disable_cache: headers['cache-control'] = 'no-cache' return headers def set_file_position(body, pos): """ If a position is provided, move file to that point. Otherwise, we'll attempt to record a position for future use. """ if pos is not None: rewind_body(body, pos) elif getattr(body, 'tell', None) is not None: try: pos = body.tell() except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body. pos = _FAILEDTELL return pos def rewind_body(body, body_pos): """ Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. """ body_seek = getattr(body, 'seek', None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " "request body during a redirect/retry.") else: raise ValueError("body_pos must be of type integer, " "instead it was %s." % type(body_pos))
    bsd-3-clause
    darnould/integrations-core
    varnish/test_varnish.py
    1
    5266
    # stdlib import os import re import subprocess # 3p from nose.plugins.attrib import attr # project from tests.checks.common import AgentCheckTest # This is a small extract of metrics from varnish. This is meant to test that # the check gather metrics. This the check return everything from varnish # without any selection/rename, their is no point in having a complete list. COMMON_METRICS = [ 'varnish.uptime', # metrics where the 'MAIN' prefix was removed 'varnish.sess_conn', # metrics where the 'MAIN' prefix was removed 'varnish.sess_drop', # metrics where the 'MAIN' prefix was removed 'varnish.sess_fail', # metrics where the 'MAIN' prefix was removed 'varnish.client_req_400', # metrics where the 'MAIN' prefix was removed 'varnish.client_req_417', # metrics where the 'MAIN' prefix was removed 'varnish.client_req', # metrics where the 'MAIN' prefix was removed 'varnish.cache_hit', # metrics where the 'MAIN' prefix was removed 'varnish.cache_hitpass', # metrics where the 'MAIN' prefix was removed 'varnish.cache_miss', # metrics where the 'MAIN' prefix was removed 'varnish.backend_conn', # metrics where the 'MAIN' prefix was removed 'varnish.backend_unhealthy', # metrics where the 'MAIN' prefix was removed 'varnish.backend_busy', # metrics where the 'MAIN' prefix was removed 'varnish.fetch_eof', # metrics where the 'MAIN' prefix was removed 'varnish.fetch_bad', # metrics where the 'MAIN' prefix was removed 'varnish.fetch_none', # metrics where the 'MAIN' prefix was removed 'varnish.fetch_1xx', # metrics where the 'MAIN' prefix was removed 'varnish.pools', # metrics where the 'MAIN' prefix was removed 'varnish.busy_sleep', # metrics where the 'MAIN' prefix was removed 'varnish.busy_wakeup', # metrics where the 'MAIN' prefix was removed 'varnish.busy_killed', # metrics where the 'MAIN' prefix was removed 'varnish.sess_queued', # metrics where the 'MAIN' prefix was removed 'varnish.sess_dropped', # metrics where the 'MAIN' prefix was removed 'varnish.n_object', # metrics where the 'MAIN' prefix was removed 'varnish.n_vampireobject', # metrics where the 'MAIN' prefix was removed 'varnish.n_vcl', # metrics where the 'MAIN' prefix was removed 'varnish.n_vcl_avail', # metrics where the 'MAIN' prefix was removed 'varnish.n_vcl_discard', # metrics where the 'MAIN' prefix was removed 'varnish.bans', # metrics where the 'MAIN' prefix was removed 'varnish.bans_completed', # metrics where the 'MAIN' prefix was removed 'varnish.bans_obj', # metrics where the 'MAIN' prefix was removed 'varnish.bans_req', # metrics where the 'MAIN' prefix was removed 'varnish.MGT.child_start', 'varnish.MGT.child_exit', 'varnish.MGT.child_stop', 'varnish.MEMPOOL.busyobj.live', 'varnish.MEMPOOL.busyobj.pool', 'varnish.MEMPOOL.busyobj.allocs', 'varnish.MEMPOOL.busyobj.frees', 'varnish.SMA.s0.c_req', 'varnish.SMA.s0.c_fail', 'varnish.SMA.Transient.c_req', 'varnish.SMA.Transient.c_fail', 'varnish.VBE.boot.default.req', 'varnish.LCK.backend.creat', 'varnish.LCK.backend_tcp.creat', 'varnish.LCK.ban.creat', 'varnish.LCK.ban.locks', 'varnish.LCK.busyobj.creat', 'varnish.LCK.mempool.creat', 'varnish.LCK.vbe.creat', 'varnish.LCK.vbe.destroy', 'varnish.LCK.vcl.creat', 'varnish.LCK.vcl.destroy', 'varnish.LCK.vcl.locks', ] VARNISH_DEFAULT_VERSION = "4.1.4" @attr(requires='varnish') class VarnishCheckTest(AgentCheckTest): CHECK_NAME = 'varnish' def _get_varnish_stat_path(self): varnish_version = os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION).split('.', 1)[0] return "%s/ci/varnishstat%s" % (os.path.dirname(os.path.abspath(__file__)), varnish_version) def _get_config_by_version(self, name=None): config = { 'instances': [{ 'varnishstat': self._get_varnish_stat_path(), 'tags': ['cluster:webs'] }] } if name: config['instances'][0]['name'] = name return config def test_check(self): config = self._get_config_by_version() self.run_check_twice(config) for mname in COMMON_METRICS: self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default']) # This the docker image is in a different repository, we check that the # verison requested in the FLAVOR_VERSION is the on running inside the # container. def test_version(self): varnishstat = self._get_varnish_stat_path() output = subprocess.check_output([varnishstat, "-V"]) res = re.search(r"varnish-(\d+\.\d\.\d)", output) if res is None: raise Exception("Could not retrieve varnish version from docker") version = res.groups()[0] self.assertEquals(version, os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION))
    bsd-3-clause
    texastribune/scuole
    scuole/regions/migrations/0004_auto_20151211_2336.py
    1
    5133
    # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2015-12-11 23:36 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('regions', '0003_auto_20151211_1922'), ] operations = [ migrations.RenameField( model_name='regionstats', old_name='teacher_african_american_percent', new_name='teacher_african_american_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_american_indian_percent', new_name='teacher_american_indian_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_asian_percent', new_name='teacher_asian_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_hispanic_percent', new_name='teacher_hispanic_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_pacific_islander_percent', new_name='teacher_pacific_islander_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_two_or_more_races_percent', new_name='teacher_two_or_more_races_fte_percent', ), migrations.RenameField( model_name='regionstats', old_name='teacher_white_percent', new_name='teacher_white_fte_percent', ), migrations.RemoveField( model_name='regionstats', name='teacher_african_american_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_american_indian_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_asian_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_hispanic_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_pacific_islander_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_two_or_more_races_count', ), migrations.RemoveField( model_name='regionstats', name='teacher_white_count', ), migrations.AddField( model_name='regionstats', name='teacher_african_american_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of African American teachers'), ), migrations.AddField( model_name='regionstats', name='teacher_american_indian_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of American Indian teachers'), ), migrations.AddField( model_name='regionstats', name='teacher_asian_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of Asian teachers'), ), migrations.AddField( model_name='regionstats', name='teacher_hispanic_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of Hispanic teachers'), ), migrations.AddField( model_name='regionstats', name='teacher_pacific_islander_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of Pacific Islander teachers'), ), migrations.AddField( model_name='regionstats', name='teacher_two_or_more_races_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of teachers of two or more races'), ), migrations.AddField( model_name='regionstats', name='teacher_white_fte_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of white teachers'), ), migrations.AlterField( model_name='regionstats', name='teacher_bachelors_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of teachers with bachelors degree'), ), migrations.AlterField( model_name='regionstats', name='teacher_doctorate_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of teachers with doctorate degree'), ), migrations.AlterField( model_name='regionstats', name='teacher_masters_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of teachers with masters degree'), ), migrations.AlterField( model_name='regionstats', name='teacher_no_degree_count', field=models.FloatField(blank=True, null=True, verbose_name='Number of teachers with no degree'), ), ]
    mit
    ryepdx/kit_sale
    product.py
    1
    1279
    # -*- coding: utf-8 -*- from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class product(osv.osv): _inherit = "product.product" def get_product_available(self, cr, uid, ids, context=None): """ Finds whether product is available or not in a particular warehouse. @return: Dictionary of values """ bom_pool = self.pool.get("mrp.bom") res = super(product, self).get_product_available(cr, uid, ids, context=context) if 'done' not in context.get('states', []) or 'in' not in context.get('what', []): return res boms = bom_pool.browse(cr, uid, bom_pool.search(cr, uid, [('product_id', 'in', res.keys())])) for bom in boms: if not bom.bom_lines: continue quantities = [] for l in bom.bom_lines: if not l.product_qty: quantities.append(0) break quantities.append( (res[l.product_id.id] if l.product_id.id in res else l.product_id.qty_available) / l.product_qty) res[bom.product_id.id] += min(quantities) return res product()
    agpl-3.0
    ice9js/servo
    tests/wpt/web-platform-tests/mixed-content/generic/expect.py
    95
    4179
    import json, os, urllib, urlparse def redirect(url, response): response.add_required_headers = False response.writer.write_status(301) response.writer.write_header("access-control-allow-origin", "*") response.writer.write_header("location", url) response.writer.end_headers() response.writer.write("") def create_redirect_url(request, swap_scheme = False): parsed = urlparse.urlsplit(request.url) destination_netloc = parsed.netloc scheme = parsed.scheme if swap_scheme: scheme = "http" if parsed.scheme == "https" else "https" hostname = parsed.netloc.split(':')[0] port = request.server.config["ports"][scheme][0] destination_netloc = ":".join([hostname, str(port)]) # Remove "redirection" from query to avoid redirect loops. parsed_query = dict(urlparse.parse_qsl(parsed.query)) assert "redirection" in parsed_query del parsed_query["redirection"] destination_url = urlparse.urlunsplit(urlparse.SplitResult( scheme = scheme, netloc = destination_netloc, path = parsed.path, query = urllib.urlencode(parsed_query), fragment = None)) return destination_url def main(request, response): if "redirection" in request.GET: redirection = request.GET["redirection"] if redirection == "no-redirect": pass elif redirection == "keep-scheme-redirect": redirect(create_redirect_url(request, swap_scheme=False), response) return elif redirection == "swap-scheme-redirect": redirect(create_redirect_url(request, swap_scheme=True), response) return else: raise ValueError ("Invalid redirect type: %s" % redirection) content_type = "text/plain" response_data = "" if "action" in request.GET: action = request.GET["action"] if "content_type" in request.GET: content_type = request.GET["content_type"] key = request.GET["key"] stash = request.server.stash path = request.GET.get("path", request.url.split('?'))[0] if action == "put": value = request.GET["value"] stash.take(key=key, path=path) stash.put(key=key, value=value, path=path) response_data = json.dumps({"status": "success", "result": key}) elif action == "purge": value = stash.take(key=key, path=path) if content_type == "image/png": response_data = open(os.path.join(request.doc_root, "images", "smiley.png"), "rb").read() elif content_type == "audio/mpeg": response_data = open(os.path.join(request.doc_root, "media", "sound_5.oga"), "rb").read() elif content_type == "video/mp4": response_data = open(os.path.join(request.doc_root, "media", "movie_5.mp4"), "rb").read() elif content_type == "application/javascript": response_data = open(os.path.join(request.doc_root, "mixed-content", "generic", "worker.js"), "rb").read() else: response_data = "/* purged */" elif action == "take": value = stash.take(key=key, path=path) if value is None: status = "allowed" else: status = "blocked" response_data = json.dumps({"status": status, "result": value}) response.add_required_headers = False response.writer.write_status(200) response.writer.write_header("content-type", content_type) response.writer.write_header("cache-control", "no-cache; must-revalidate") response.writer.end_headers() response.writer.write(response_data)
    mpl-2.0
    RossBrunton/django
    tests/model_fields/test_uuid.py
    81
    6096
    import json import uuid from django.core import exceptions, serializers from django.db import models from django.test import SimpleTestCase, TestCase from .models import ( NullableUUIDModel, PrimaryKeyUUIDModel, RelatedToUUIDModel, UUIDGrandchild, UUIDModel, ) class TestSaveLoad(TestCase): def test_uuid_instance(self): instance = UUIDModel.objects.create(field=uuid.uuid4()) loaded = UUIDModel.objects.get() self.assertEqual(loaded.field, instance.field) def test_str_instance_no_hyphens(self): UUIDModel.objects.create(field='550e8400e29b41d4a716446655440000') loaded = UUIDModel.objects.get() self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000')) def test_str_instance_hyphens(self): UUIDModel.objects.create(field='550e8400-e29b-41d4-a716-446655440000') loaded = UUIDModel.objects.get() self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000')) def test_str_instance_bad_hyphens(self): UUIDModel.objects.create(field='550e84-00-e29b-41d4-a716-4-466-55440000') loaded = UUIDModel.objects.get() self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000')) def test_null_handling(self): NullableUUIDModel.objects.create(field=None) loaded = NullableUUIDModel.objects.get() self.assertEqual(loaded.field, None) def test_pk_validated(self): with self.assertRaisesMessage(TypeError, 'is not a valid UUID'): PrimaryKeyUUIDModel.objects.get(pk={}) with self.assertRaisesMessage(TypeError, 'is not a valid UUID'): PrimaryKeyUUIDModel.objects.get(pk=[]) def test_wrong_value(self): self.assertRaisesMessage( ValueError, 'badly formed hexadecimal UUID string', UUIDModel.objects.get, field='not-a-uuid') self.assertRaisesMessage( ValueError, 'badly formed hexadecimal UUID string', UUIDModel.objects.create, field='not-a-uuid') class TestMigrations(SimpleTestCase): def test_deconstruct(self): field = models.UUIDField() name, path, args, kwargs = field.deconstruct() self.assertEqual(kwargs, {}) class TestQuerying(TestCase): def setUp(self): self.objs = [ NullableUUIDModel.objects.create(field=uuid.uuid4()), NullableUUIDModel.objects.create(field='550e8400e29b41d4a716446655440000'), NullableUUIDModel.objects.create(field=None), ] def test_exact(self): self.assertSequenceEqual( NullableUUIDModel.objects.filter(field__exact='550e8400e29b41d4a716446655440000'), [self.objs[1]] ) def test_isnull(self): self.assertSequenceEqual( NullableUUIDModel.objects.filter(field__isnull=True), [self.objs[2]] ) class TestSerialization(SimpleTestCase): test_data = '[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, "model": "model_fields.uuidmodel", "pk": null}]' def test_dumping(self): instance = UUIDModel(field=uuid.UUID('550e8400e29b41d4a716446655440000')) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, uuid.UUID('550e8400-e29b-41d4-a716-446655440000')) class TestValidation(SimpleTestCase): def test_invalid_uuid(self): field = models.UUIDField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('550e8400', None) self.assertEqual(cm.exception.code, 'invalid') self.assertEqual(cm.exception.message % cm.exception.params, "'550e8400' is not a valid UUID.") def test_uuid_instance_ok(self): field = models.UUIDField() field.clean(uuid.uuid4(), None) # no error class TestAsPrimaryKey(TestCase): def test_creation(self): PrimaryKeyUUIDModel.objects.create() loaded = PrimaryKeyUUIDModel.objects.get() self.assertIsInstance(loaded.pk, uuid.UUID) def test_uuid_pk_on_save(self): saved = PrimaryKeyUUIDModel.objects.create(id=None) loaded = PrimaryKeyUUIDModel.objects.get() self.assertIsNotNone(loaded.id, None) self.assertEqual(loaded.id, saved.id) def test_uuid_pk_on_bulk_create(self): u1 = PrimaryKeyUUIDModel() u2 = PrimaryKeyUUIDModel(id=None) PrimaryKeyUUIDModel.objects.bulk_create([u1, u2]) # Check that the two objects were correctly created. u1_found = PrimaryKeyUUIDModel.objects.filter(id=u1.id).exists() u2_found = PrimaryKeyUUIDModel.objects.exclude(id=u1.id).exists() self.assertTrue(u1_found) self.assertTrue(u2_found) self.assertEqual(PrimaryKeyUUIDModel.objects.count(), 2) def test_underlying_field(self): pk_model = PrimaryKeyUUIDModel.objects.create() RelatedToUUIDModel.objects.create(uuid_fk=pk_model) related = RelatedToUUIDModel.objects.get() self.assertEqual(related.uuid_fk.pk, related.uuid_fk_id) def test_update_with_related_model_instance(self): # regression for #24611 u1 = PrimaryKeyUUIDModel.objects.create() u2 = PrimaryKeyUUIDModel.objects.create() r = RelatedToUUIDModel.objects.create(uuid_fk=u1) RelatedToUUIDModel.objects.update(uuid_fk=u2) r.refresh_from_db() self.assertEqual(r.uuid_fk, u2) def test_update_with_related_model_id(self): u1 = PrimaryKeyUUIDModel.objects.create() u2 = PrimaryKeyUUIDModel.objects.create() r = RelatedToUUIDModel.objects.create(uuid_fk=u1) RelatedToUUIDModel.objects.update(uuid_fk=u2.pk) r.refresh_from_db() self.assertEqual(r.uuid_fk, u2) def test_two_level_foreign_keys(self): # exercises ForeignKey.get_db_prep_value() UUIDGrandchild().save()
    bsd-3-clause
    haad/ansible
    lib/ansible/modules/source_control/gitlab_group.py
    47
    7493
    #!/usr/bin/python # (c) 2015, Werner Dijkerman ([email protected]) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gitlab_group short_description: Creates/updates/deletes Gitlab Groups description: - When the group does not exists in Gitlab, it will be created. - When the group does exists and state=absent, the group will be deleted. version_added: "2.1" author: "Werner Dijkerman (@dj-wasabi)" requirements: - pyapi-gitlab python module options: server_url: description: - Url of Gitlab server, with protocol (http or https). required: true validate_certs: description: - When using https if SSL certificate needs to be verified. required: false default: true aliases: - verify_ssl login_user: description: - Gitlab user name. required: false default: null login_password: description: - Gitlab password for login_user required: false default: null login_token: description: - Gitlab token for logging in. required: false default: null name: description: - Name of the group you want to create. required: true path: description: - The path of the group you want to create, this will be server_url/group_path - If not supplied, the group_name will be used. required: false default: null state: description: - create or delete group. - Possible values are present and absent. required: false default: "present" choices: ["present", "absent"] ''' EXAMPLES = ''' - name: Delete Gitlab Group gitlab_group: server_url: http://gitlab.example.com validate_certs: False login_token: WnUzDsxjy8230-Dy_k name: my_first_group state: absent delegate_to: localhost - name: Create Gitlab Group gitlab_group: server_url: https://gitlab.example.com validate_certs: True login_user: dj-wasabi login_password: MySecretPassword name: my_first_group path: my_first_group state: present delegate_to: localhost ''' RETURN = '''# ''' try: import gitlab HAS_GITLAB_PACKAGE = True except: HAS_GITLAB_PACKAGE = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native class GitLabGroup(object): def __init__(self, module, git): self._module = module self._gitlab = git def createGroup(self, group_name, group_path): if self._module.check_mode: self._module.exit_json(changed=True) return self._gitlab.creategroup(group_name, group_path) def deleteGroup(self, group_name): is_group_empty = True group_id = self.idGroup(group_name) for project in self._gitlab.getall(self._gitlab.getprojects): owner = project['namespace']['name'] if owner == group_name: is_group_empty = False if is_group_empty: if self._module.check_mode: self._module.exit_json(changed=True) return self._gitlab.deletegroup(group_id) else: self._module.fail_json(msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") def existsGroup(self, group_name): for group in self._gitlab.getall(self._gitlab.getgroups): if group['name'] == group_name: return True return False def idGroup(self, group_name): for group in self._gitlab.getall(self._gitlab.getgroups): if group['name'] == group_name: return group['id'] def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(required=True), validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']), login_user=dict(required=False, no_log=True), login_password=dict(required=False, no_log=True), login_token=dict(required=False, no_log=True), name=dict(required=True), path=dict(required=False), state=dict(default="present", choices=["present", "absent"]), ), supports_check_mode=True ) if not HAS_GITLAB_PACKAGE: module.fail_json(msg="Missing required gitlab module (check docs or " "install with: pip install pyapi-gitlab") server_url = module.params['server_url'] verify_ssl = module.params['validate_certs'] login_user = module.params['login_user'] login_password = module.params['login_password'] login_token = module.params['login_token'] group_name = module.params['name'] group_path = module.params['path'] state = module.params['state'] # We need both login_user and login_password or login_token, otherwise we fail. if login_user is not None and login_password is not None: use_credentials = True elif login_token is not None: use_credentials = False else: module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token") # Set group_path to group_name if it is empty. if group_path is None: group_path = group_name.replace(" ", "_") # Lets make an connection to the Gitlab server_url, with either login_user and login_password # or with login_token try: if use_credentials: git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl) git.login(user=login_user, password=login_password) else: git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl) except Exception as e: module.fail_json(msg="Failed to connect to Gitlab server: %s " % to_native(e)) # Check if user is authorized or not before proceeding to any operations # if not, exit from here auth_msg = git.currentuser().get('message', None) if auth_msg is not None and auth_msg == '401 Unauthorized': module.fail_json(msg='User unauthorized', details="User is not allowed to access Gitlab server " "using login_token. Please check login_token") # Validate if group exists and take action based on "state" group = GitLabGroup(module, git) group_name = group_name.lower() group_exists = group.existsGroup(group_name) if group_exists and state == "absent": group.deleteGroup(group_name) module.exit_json(changed=True, result="Successfully deleted group %s" % group_name) else: if state == "absent": module.exit_json(changed=False, result="Group deleted or does not exists") else: if group_exists: module.exit_json(changed=False) else: if group.createGroup(group_name, group_path): module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name) if __name__ == '__main__': main()
    gpl-3.0
    kjw0106/boto
    boto/pyami/bootstrap.py
    150
    5748
    # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import os import boto from boto.utils import get_instance_metadata, get_instance_userdata from boto.pyami.config import Config, BotoConfigPath from boto.pyami.scriptbase import ScriptBase import time class Bootstrap(ScriptBase): """ The Bootstrap class is instantiated and run as part of the PyAMI instance initialization process. The methods in this class will be run from the rc.local script of the instance and will be run as the root user. The main purpose of this class is to make sure the boto distribution on the instance is the one required. """ def __init__(self): self.working_dir = '/mnt/pyami' self.write_metadata() super(Bootstrap, self).__init__() def write_metadata(self): fp = open(os.path.expanduser(BotoConfigPath), 'w') fp.write('[Instance]\n') inst_data = get_instance_metadata() for key in inst_data: fp.write('%s = %s\n' % (key, inst_data[key])) user_data = get_instance_userdata() fp.write('\n%s\n' % user_data) fp.write('[Pyami]\n') fp.write('working_dir = %s\n' % self.working_dir) fp.close() # This file has the AWS credentials, should we lock it down? # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) # now that we have written the file, read it into a pyami Config object boto.config = Config() boto.init_logging() def create_working_dir(self): boto.log.info('Working directory: %s' % self.working_dir) if not os.path.exists(self.working_dir): os.mkdir(self.working_dir) def load_boto(self): update = boto.config.get('Boto', 'boto_update', 'svn:HEAD') if update.startswith('svn'): if update.find(':') >= 0: method, version = update.split(':') version = '-r%s' % version else: version = '-rHEAD' location = boto.config.get('Boto', 'boto_location', '/usr/local/boto') self.run('svn update %s %s' % (version, location)) elif update.startswith('git'): location = boto.config.get('Boto', 'boto_location', '/usr/share/python-support/python-boto/boto') num_remaining_attempts = 10 while num_remaining_attempts > 0: num_remaining_attempts -= 1 try: self.run('git pull', cwd=location) num_remaining_attempts = 0 except Exception as e: boto.log.info('git pull attempt failed with the following exception. Trying again in a bit. %s', e) time.sleep(2) if update.find(':') >= 0: method, version = update.split(':') else: version = 'master' self.run('git checkout %s' % version, cwd=location) else: # first remove the symlink needed when running from subversion self.run('rm /usr/local/lib/python2.5/site-packages/boto') self.run('easy_install %s' % update) def fetch_s3_file(self, s3_file): try: from boto.utils import fetch_file f = fetch_file(s3_file) path = os.path.join(self.working_dir, s3_file.split("/")[-1]) open(path, "w").write(f.read()) except: boto.log.exception('Problem Retrieving file: %s' % s3_file) path = None return path def load_packages(self): package_str = boto.config.get('Pyami', 'packages') if package_str: packages = package_str.split(',') for package in packages: package = package.strip() if package.startswith('s3:'): package = self.fetch_s3_file(package) if package: # if the "package" is really a .py file, it doesn't have to # be installed, just being in the working dir is enough if not package.endswith('.py'): self.run('easy_install -Z %s' % package, exit_on_error=False) def main(self): self.create_working_dir() self.load_boto() self.load_packages() self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id')) if __name__ == "__main__": # because bootstrap starts before any logging configuration can be loaded from # the boto config files, we will manually enable logging to /var/log/boto.log boto.set_file_logger('bootstrap', '/var/log/boto.log') bs = Bootstrap() bs.main()
    mit
    voidbridge/electron
    script/dump-symbols.py
    22
    1970
    #!/usr/bin/env python import os import sys from lib.config import PLATFORM from lib.util import electron_gyp, execute, rm_rf SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) DIST_DIR = os.path.join(SOURCE_ROOT, 'dist') OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R') CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download', 'libchromiumcontent', 'static_library') def main(destination): # if PLATFORM == 'win32': # register_required_dll() rm_rf(destination) (project_name, product_name) = get_names_from_gyp() if PLATFORM in ['darwin', 'linux']: generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'posix', 'generate_breakpad_symbols.py') if PLATFORM == 'darwin': start = os.path.join(OUT_DIR, '{0}.app'.format(product_name), 'Contents', 'MacOS', product_name) else: start = os.path.join(OUT_DIR, project_name) args = [ '--build-dir={0}'.format(OUT_DIR), '--binary={0}'.format(start), '--symbols-dir={0}'.format(destination), '--libchromiumcontent-dir={0}'.format(CHROMIUM_DIR), '--clear', '--jobs=16', ] else: generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'win', 'generate_breakpad_symbols.py') args = [ '--symbols-dir={0}'.format(destination), '--jobs=16', os.path.relpath(OUT_DIR), ] execute([sys.executable, generate_breakpad_symbols] + args) def register_required_dll(): register = os.path.join(SOURCE_ROOT, 'tools', 'win', 'register_msdia80_dll.js') execute(['node.exe', os.path.relpath(register)]); def get_names_from_gyp(): variables = electron_gyp() return (variables['project_name%'], variables['product_name%']) if __name__ == '__main__': sys.exit(main(sys.argv[1]))
    mit
    jmuhlich/django-conferenceabstract
    setup.py
    1
    1148
    # Based on Django "How to write reusable apps" tutorial. import os import setuptools README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setuptools.setup( name='django-conferenceabstract', version='0.1', packages=setuptools.find_packages(), include_package_data=True, license='BSD License', description='A Django app to manage conference abstract submissions.', long_description=README, #url='http://www.example.com/', author='Jeremy Muhlich', author_email='[email protected]', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], )
    mit
    GameTheory-/android_kernel_lge_l1m
    arch/ia64/scripts/unwcheck.py
    13143
    1714
    #!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
    gpl-2.0
    froch/kubernetes-py
    tests/test_k8s_pet_set.py
    3
    1866
    #!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # import unittest from tests import _utils from tests.BaseTest import BaseTest from kubernetes_py.K8sPetSet import K8sPetSet def should_skip(): config = _utils.create_config() return _utils.assert_server_version(api_host=config.api_host, major=1, minor=4) class K8sPetSetTests(BaseTest): def setUp(self): _utils.cleanup_petsets() _utils.cleanup_pods() def tearDown(self): _utils.cleanup_petsets() _utils.cleanup_pods() # --------------------------------------------------------------------------------- init @unittest.skipUnless(should_skip(), "Incorrect Server Version") def test_init_no_args(self): try: K8sPetSet() self.fail("Should not fail.") except SyntaxError: pass except IOError: pass except Exception as err: self.fail("Unhandled exception: [ {0} ]".format(err.__class__.__name__)) @unittest.skipUnless(should_skip(), "Incorrect Server Version") def test_init_with_invalid_config(self): config = object() with self.assertRaises(SyntaxError): K8sPetSet(config=config) @unittest.skipUnless(should_skip(), "Incorrect Server Version") def test_init_with_invalid_name(self): name = object() with self.assertRaises(SyntaxError): _utils.create_petset(name=name) @unittest.skipUnless(should_skip(), "Incorrect Server Version") def test_init_with_name(self): name = "yomama" rc = _utils.create_petset(name=name) self.assertIsNotNone(rc) self.assertIsInstance(rc, K8sPetSet) self.assertEqual(rc.name, name)
    apache-2.0
    ConeyLiu/spark
    examples/src/main/python/ml/standard_scaler_example.py
    128
    1594
    # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import StandardScaler # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("StandardScalerExample")\ .getOrCreate() # $example on$ dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt") scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures", withStd=True, withMean=False) # Compute summary statistics by fitting the StandardScaler scalerModel = scaler.fit(dataFrame) # Normalize each feature to have unit standard deviation. scaledData = scalerModel.transform(dataFrame) scaledData.show() # $example off$ spark.stop()
    apache-2.0
    yetsky/extra
    packages/my-application/python-all/files/usr/lib/python2.7/encodings/iso8859_13.py
    593
    13527
    """ Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-13', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xe6' # 0xBF -> LATIN SMALL LETTER AE u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON u'\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
    gpl-2.0
    mawentao007/reading_grab
    example/spider.py
    1
    1127
    # coding:utf-8 # crawl github project list from grab import Grab import logging logging.basicConfig(level=logging.DEBUG) g = Grab() g.go('https://github.com/login') print g.doc.form g.doc.set_input('login', '[email protected]') g.doc.set_input('password', '') g.doc.submit() g.doc.save('/tmp/x.html') home_url = g.doc('//a[contains(@class, "header-nav-link name")]/@href').text() repo_url = home_url + '?tab=repositories' g.go(repo_url) for elem in g.doc.select('//h3[@class="repo-list-name"]/a'): print('%s: %s' % (elem.text(), g.make_url_absolute(elem.attr('href')))) # from grab.spider import Spider, Task # import logging # # class ExampleSpider(Spider): # def task_generator(self): # for lang in ('python', 'ruby', 'perl'): # url = 'https://www.google.com/search?q=%s' % lang # yield Task('search', url=url, lang=lang) # # def task_search(self, grab, task): # print('%s: %s' % (task.lang, # grab.doc('//div[@class="s"]//cite').text())) # # # logging.basicConfig(level=logging.DEBUG) # bot = ExampleSpider() # bot.run()
    mit
    StephenWeber/ansible
    lib/ansible/modules/network/nxos/nxos_igmp.py
    6
    7707
    #!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: nxos_igmp extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages IGMP global configuration. description: - Manages IGMP global configuration configuration settings. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - When C(state=default), all supported params will be reset to a default state. - If restart is set to true with other params set, the restart will happen last, i.e. after the configuration takes place. options: flush_routes: description: - Removes routes when the IGMP process is restarted. By default, routes are not flushed. required: false default: null choices: ['true', 'false'] enforce_rtr_alert: description: - Enables or disables the enforce router alert option check for IGMPv2 and IGMPv3 packets. required: false default: null choices: ['true', 'false'] restart: description: - Restarts the igmp process (using an exec config command). required: false default: null choices: ['true', 'false'] state: description: - Manages desired state of the resource. required: false default: present choices: ['present', 'default'] ''' EXAMPLES = ''' - name: Default igmp global params (all params except restart) nxos_igmp: state: default host: "{{ inventory_hostname }}" - name: Ensure the following igmp global config exists on the device nxos_igmp: flush_routes: true enforce_rtr_alert: true host: "{{ inventory_hostname }}" - name: Restart the igmp process nxos_igmp: restart: true host: "{{ inventory_hostname }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": true} existing: description: k/v pairs of existing IGMP configuration returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": false} end_state: description: k/v pairs of IGMP configuration after module execution returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": true} updates: description: commands sent to the device returned: always type: list sample: ["ip igmp flush-routes"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.netcfg import CustomNetworkConfig PARAM_TO_COMMAND_KEYMAP = { 'flush_routes': 'ip igmp flush-routes', 'enforce_rtr_alert': 'ip igmp enforce-router-alert' } def get_value(arg, config): REGEX = re.compile(r'{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) value = False try: if REGEX.search(config): value = True except TypeError: value = False return value def get_existing(module, args): existing = {} config = str(get_config(module)) for arg in args: existing[arg] = get_value(arg, config) return existing def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def get_commands(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) if module.params['state'] == 'default': for key, value in proposed_commands.items(): if existing_commands.get(key): commands.append('no {0}'.format(key)) else: for key, value in proposed_commands.items(): if value is True: commands.append(key) else: if existing_commands.get(key): commands.append('no {0}'.format(key)) if module.params['restart']: commands.append('restart igmp') if commands: parents = [] candidate.add(commands, parents=parents) def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def main(): argument_spec = dict( flush_routes=dict(type='bool'), enforce_rtr_alert=dict(type='bool'), restart=dict(type='bool', default=False), state=dict(choices=['present', 'default'], default='present'), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) state = module.params['state'] restart = module.params['restart'] if (state == 'default' and (module.params['flush_routes'] is not None or module.params['enforce_rtr_alert'] is not None)): module.fail_json(msg='When state=default other params have no effect.') args = [ "flush_routes", "enforce_rtr_alert", ] existing = invoke('get_existing', module, args) end_state = existing proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args) proposed_args = proposed.copy() if state == 'default': proposed_args = dict((k, False) for k in args) result = {} if (state == 'present' or (state == 'default' and True in existing.values()) or restart): candidate = CustomNetworkConfig(indent=3) invoke('get_commands', module, existing, proposed_args, candidate) try: response = load_config(module, candidate) result.update(response) except ShellError: exc = get_exception() module.fail_json(msg=str(exc)) else: result['updates'] = [] if restart: proposed['restart'] = restart result['connected'] = module.connected if module._verbosity > 0: end_state = invoke('get_existing', module, args) result['end_state'] = end_state result['existing'] = existing result['proposed'] = proposed result['warnings'] = warnings module.exit_json(**result) if __name__ == '__main__': main()
    gpl-3.0
    GageGaskins/osf.io
    tests/factories.py
    1
    20787
    # -*- coding: utf-8 -*- """Factories for the OSF models, including an abstract ModularOdmFactory. Example usage: :: >>> from tests.factories import UserFactory >>> user1 = UserFactory() >>> user1.username [email protected] >>> user2 = UserFactory() [email protected] Factory boy docs: http://factoryboy.readthedocs.org/ """ import datetime import functools from factory import base, Sequence, SubFactory, post_generation, LazyAttribute from mock import patch, Mock from modularodm import Q from modularodm.exceptions import NoResultsFound from framework.mongo import StoredObject from framework.auth import User, Auth from framework.auth.utils import impute_names_model from framework.sessions.model import Session from website.addons import base as addons_base from website.files.models import StoredFileNode from website.oauth.models import ( ApiOAuth2Application, ApiOAuth2PersonalToken, ExternalAccount, ExternalProvider ) from website.models import Institution from website.project.model import ( Comment, DraftRegistration, Embargo, MetaSchema, Node, NodeLog, Pointer, PrivateLink, RegistrationApproval, Retraction, Sanction, Tag, WatchConfig, AlternativeCitation, ensure_schemas ) from website.notifications.model import NotificationSubscription, NotificationDigest from website.archiver.model import ArchiveTarget, ArchiveJob from website.archiver import ARCHIVER_SUCCESS from website.project.licenses import NodeLicense, NodeLicenseRecord, ensure_licenses ensure_licenses = functools.partial(ensure_licenses, warn=False) from website.addons.wiki.model import NodeWikiPage from website.util import permissions from tests.base import fake from tests.base import get_default_metaschema # TODO: This is a hack. Check whether FactoryBoy can do this better def save_kwargs(**kwargs): for value in kwargs.itervalues(): if isinstance(value, StoredObject) and not value._is_loaded: value.save() def FakerAttribute(provider, **kwargs): """Attribute that lazily generates a value using the Faker library. Example: :: class UserFactory(ModularOdmFactory): name = FakerAttribute('name') """ fake_gen = getattr(fake, provider) if not fake_gen: raise ValueError('{0!r} is not a valid faker provider.'.format(provider)) return LazyAttribute(lambda x: fake_gen(**kwargs)) class ModularOdmFactory(base.Factory): """Base factory for modular-odm objects. """ ABSTRACT_FACTORY = True @classmethod def _build(cls, target_class, *args, **kwargs): """Build an object without saving it.""" save_kwargs(**kwargs) return target_class(*args, **kwargs) @classmethod def _create(cls, target_class, *args, **kwargs): save_kwargs(**kwargs) instance = target_class(*args, **kwargs) instance.save() return instance class UserFactory(ModularOdmFactory): FACTORY_FOR = User username = Sequence(lambda n: "fred{0}@example.com".format(n)) # Don't use post generation call to set_password because # It slows down the tests dramatically password = "password" fullname = Sequence(lambda n: "Freddie Mercury{0}".format(n)) is_registered = True is_claimed = True date_confirmed = datetime.datetime(2014, 2, 21) merged_by = None email_verifications = {} verification_key = None @post_generation def set_names(self, create, extracted): parsed = impute_names_model(self.fullname) for key, value in parsed.items(): setattr(self, key, value) if create: self.save() @post_generation def set_emails(self, create, extracted): if self.username not in self.emails: self.emails.append(self.username) self.save() class AuthUserFactory(UserFactory): """A user that automatically has an api key, for quick authentication. Example: :: user = AuthUserFactory() res = self.app.get(url, auth=user.auth) # user is "logged in" """ @post_generation def add_auth(self, create, extracted): self.set_password('password') self.save() self.auth = (self.username, 'password') class TagFactory(ModularOdmFactory): FACTORY_FOR = Tag _id = Sequence(lambda n: "scientastic-{}".format(n)) class ApiOAuth2ApplicationFactory(ModularOdmFactory): FACTORY_FOR = ApiOAuth2Application owner = SubFactory(UserFactory) name = Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n)) home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/' callback_url = 'http://example.uk' class ApiOAuth2PersonalTokenFactory(ModularOdmFactory): FACTORY_FOR = ApiOAuth2PersonalToken owner = SubFactory(UserFactory) scopes = 'osf.full_write osf.full_read' name = Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n)) class PrivateLinkFactory(ModularOdmFactory): FACTORY_FOR = PrivateLink name = "link" key = Sequence(lambda n: 'foobar{}'.format(n)) anonymous = False creator = SubFactory(AuthUserFactory) class AbstractNodeFactory(ModularOdmFactory): FACTORY_FOR = Node title = 'The meaning of life' description = 'The meaning of life is 42.' creator = SubFactory(AuthUserFactory) class ProjectFactory(AbstractNodeFactory): category = 'project' class FolderFactory(ProjectFactory): is_folder = True class DashboardFactory(FolderFactory): is_dashboard = True class NodeFactory(AbstractNodeFactory): category = 'hypothesis' parent = SubFactory(ProjectFactory) class RegistrationFactory(AbstractNodeFactory): creator = None # Default project is created if not provided category = 'project' @classmethod def _build(cls, target_class, *args, **kwargs): raise Exception("Cannot build registration without saving.") @classmethod def _create(cls, target_class, project=None, is_public=False, schema=None, data=None, archive=False, embargo=None, registration_approval=None, retraction=None, *args, **kwargs): save_kwargs(**kwargs) user = None if project: user = project.creator user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory() kwargs['creator'] = user # Original project to be registered project = project or target_class(*args, **kwargs) if user._id not in project.permissions: project.add_contributor( contributor=user, permissions=permissions.CREATOR_PERMISSIONS, log=False, save=False ) project.save() # Default registration parameters schema = schema or get_default_metaschema() data = data or {'some': 'data'} auth = Auth(user=user) register = lambda: project.register_node( schema=schema, auth=auth, data=data ) def add_approval_step(reg): if embargo: reg.embargo = embargo elif registration_approval: reg.registration_approval = registration_approval elif retraction: reg.retraction = retraction else: reg.require_approval(reg.creator) reg.save() reg.sanction.add_authorizer(reg.creator, reg) reg.sanction.save() if archive: reg = register() add_approval_step(reg) else: with patch('framework.tasks.handlers.enqueue_task'): reg = register() add_approval_step(reg) with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)): reg.archive_job.status = ARCHIVER_SUCCESS reg.archive_job.save() reg.sanction.state = Sanction.APPROVED reg.sanction.save() ArchiveJob( src_node=project, dst_node=reg, initiator=user, ) if is_public: reg.is_public = True reg.save() return reg class RetractedRegistrationFactory(AbstractNodeFactory): @classmethod def _create(cls, *args, **kwargs): registration = kwargs.pop('registration', None) registration.is_public = True user = kwargs.pop('user', registration.creator) registration.retract_registration(user) retraction = registration.retraction token = retraction.approval_state.values()[0]['approval_token'] retraction.approve_retraction(user, token) retraction.save() return retraction class PointerFactory(ModularOdmFactory): FACTORY_FOR = Pointer node = SubFactory(NodeFactory) class NodeLogFactory(ModularOdmFactory): FACTORY_FOR = NodeLog action = 'file_added' user = SubFactory(UserFactory) class WatchConfigFactory(ModularOdmFactory): FACTORY_FOR = WatchConfig node = SubFactory(NodeFactory) class SanctionFactory(ModularOdmFactory): ABSTRACT_FACTORY = True @classmethod def _create(cls, target_class, approve=False, *args, **kwargs): user = kwargs.get('user') or UserFactory() sanction = ModularOdmFactory._create(target_class, initiated_by=user, *args, **kwargs) reg_kwargs = { 'creator': user, 'user': user, sanction.SHORT_NAME: sanction } RegistrationFactory(**reg_kwargs) if not approve: sanction.state = Sanction.UNAPPROVED sanction.save() return sanction class RetractionFactory(SanctionFactory): FACTORY_FOR = Retraction user = SubFactory(UserFactory) class EmbargoFactory(SanctionFactory): FACTORY_FOR = Embargo user = SubFactory(UserFactory) class RegistrationApprovalFactory(SanctionFactory): FACTORY_FOR = RegistrationApproval user = SubFactory(UserFactory) class NodeWikiFactory(ModularOdmFactory): FACTORY_FOR = NodeWikiPage page_name = 'home' content = 'Some content' version = 1 user = SubFactory(UserFactory) node = SubFactory(NodeFactory) @post_generation def set_node_keys(self, create, extracted): self.node.wiki_pages_current[self.page_name] = self._id self.node.wiki_pages_versions[self.page_name] = [self._id] self.node.save() class UnregUserFactory(ModularOdmFactory): """Factory for an unregistered user. Uses User.create_unregistered() to create an instance. """ FACTORY_FOR = User email = Sequence(lambda n: "brian{0}@queen.com".format(n)) fullname = Sequence(lambda n: "Brian May{0}".format(n)) @classmethod def _build(cls, target_class, *args, **kwargs): '''Build an object without saving it.''' return target_class.create_unregistered(*args, **kwargs) @classmethod def _create(cls, target_class, *args, **kwargs): instance = target_class.create_unregistered(*args, **kwargs) instance.save() return instance class UnconfirmedUserFactory(ModularOdmFactory): """Factory for a user that has not yet confirmed their primary email address (username). """ FACTORY_FOR = User username = Sequence(lambda n: 'roger{0}@queen.com'.format(n)) fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n)) password = 'killerqueen' @classmethod def _build(cls, target_class, username, password, fullname): '''Build an object without saving it.''' return target_class.create_unconfirmed( username=username, password=password, fullname=fullname ) @classmethod def _create(cls, target_class, username, password, fullname): instance = target_class.create_unconfirmed( username=username, password=password, fullname=fullname ) instance.save() return instance class AuthFactory(base.Factory): FACTORY_FOR = Auth user = SubFactory(UserFactory) class ProjectWithAddonFactory(ProjectFactory): """Factory for a project that has an addon. The addon will be added to both the Node and the creator records. :: p = ProjectWithAddonFactory(addon='github') p.get_addon('github') # => github node settings object p.creator.get_addon('github') # => github user settings object """ # TODO: Should use mock addon objects @classmethod def _build(cls, target_class, addon='s3', *args, **kwargs): '''Build an object without saving it.''' instance = ProjectFactory._build(target_class, *args, **kwargs) auth = Auth(user=instance.creator) instance.add_addon(addon, auth) instance.creator.add_addon(addon) return instance @classmethod def _create(cls, target_class, addon='s3', *args, **kwargs): instance = ProjectFactory._create(target_class, *args, **kwargs) auth = Auth(user=instance.creator) instance.add_addon(addon, auth) instance.creator.add_addon(addon) instance.save() return instance # Deprecated unregistered user factory, used mainly for testing migration class DeprecatedUnregUser(object): '''A dummy "model" for an unregistered user.''' def __init__(self, nr_name, nr_email): self.nr_name = nr_name self.nr_email = nr_email def to_dict(self): return {"nr_name": self.nr_name, "nr_email": self.nr_email} class DeprecatedUnregUserFactory(base.Factory): """Generates a dictonary represenation of an unregistered user, in the format expected by the OSF. :: >>> from tests.factories import UnregUserFactory >>> UnregUserFactory() {'nr_name': 'Tom Jones0', 'nr_email': '[email protected]'} >>> UnregUserFactory() {'nr_name': 'Tom Jones1', 'nr_email': '[email protected]'} """ FACTORY_FOR = DeprecatedUnregUser nr_name = Sequence(lambda n: "Tom Jones{0}".format(n)) nr_email = Sequence(lambda n: "tom{0}@example.com".format(n)) @classmethod def _create(cls, target_class, *args, **kwargs): return target_class(*args, **kwargs).to_dict() _build = _create class CommentFactory(ModularOdmFactory): FACTORY_FOR = Comment content = Sequence(lambda n: 'Comment {0}'.format(n)) is_public = True @classmethod def _build(cls, target_class, *args, **kwargs): node = kwargs.pop('node', None) or NodeFactory() user = kwargs.pop('user', None) or node.creator target = kwargs.pop('target', None) or node content = kwargs.pop('content', None) or 'Test comment.' instance = target_class( node=node, user=user, target=target, content=content, *args, **kwargs ) if isinstance(target, target_class): instance.root_target = target.root_target else: instance.root_target = target if isinstance(instance.root_target, StoredFileNode): file_id = instance.root_target._id instance.node.commented_files[file_id] = instance.node.commented_files.get(file_id, 0) + 1 return instance @classmethod def _create(cls, target_class, *args, **kwargs): node = kwargs.pop('node', None) or NodeFactory() user = kwargs.pop('user', None) or node.creator target = kwargs.pop('target', None) or node content = kwargs.pop('content', None) or 'Test comment.' instance = target_class( node=node, user=user, target=target, content=content, *args, **kwargs ) if isinstance(target, target_class): instance.root_target = target.root_target else: instance.root_target = target if isinstance(instance.root_target, StoredFileNode): file_id = instance.root_target._id instance.node.commented_files[file_id] = instance.node.commented_files.get(file_id, 0) + 1 instance.node.save() instance.save() return instance class InstitutionFactory(ModularOdmFactory): FACTORY_FOR = Institution _id = Sequence(lambda n: "S{}".format(n)) name = Sequence(lambda n: "School{}".format(n)) logo_name = 'logo.img' auth_url = 'http://thisIsUrl.biz' class NotificationSubscriptionFactory(ModularOdmFactory): FACTORY_FOR = NotificationSubscription class NotificationDigestFactory(ModularOdmFactory): FACTORY_FOR = NotificationDigest class ExternalAccountFactory(ModularOdmFactory): FACTORY_FOR = ExternalAccount provider = 'mock2' provider_id = Sequence(lambda n: 'user-{0}'.format(n)) provider_name = 'Fake Provider' display_name = Sequence(lambda n: 'user-{0}'.format(n)) class SessionFactory(ModularOdmFactory): FACTORY_FOR = Session @classmethod def _build(cls, target_class, *args, **kwargs): user = kwargs.pop('user', None) instance = target_class(*args, **kwargs) if user: instance.data['auth_user_username'] = user.username instance.data['auth_user_id'] = user._primary_key instance.data['auth_user_fullname'] = user.fullname return instance @classmethod def _create(cls, target_class, *args, **kwargs): instance = cls._build(target_class, *args, **kwargs) instance.save() return instance class MockOAuth2Provider(ExternalProvider): name = "Mock OAuth 2.0 Provider" short_name = "mock2" client_id = "mock2_client_id" client_secret = "mock2_client_secret" auth_url_base = "https://mock2.com/auth" callback_url = "https://mock2.com/callback" auto_refresh_url = "https://mock2.com/callback" refresh_time = 300 def handle_callback(self, response): return { 'provider_id': 'mock_provider_id' } class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase): pass class MockAddonUserSettings(addons_base.AddonUserSettingsBase): pass class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase): def merge(self): pass class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase): oauth_provider = MockOAuth2Provider class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase): oauth_provider = MockOAuth2Provider folder_id = 'foo' folder_name = 'Foo' folder_path = '/Foo' class ArchiveTargetFactory(ModularOdmFactory): FACTORY_FOR = ArchiveTarget class ArchiveJobFactory(ModularOdmFactory): FACTORY_FOR = ArchiveJob class AlternativeCitationFactory(ModularOdmFactory): FACTORY_FOR = AlternativeCitation @classmethod def _create(cls, target_class, *args, **kwargs): name = kwargs.get('name') text = kwargs.get('text') instance = target_class( name=name, text=text ) instance.save() return instance class DraftRegistrationFactory(ModularOdmFactory): FACTORY_FOR = DraftRegistration @classmethod def _create(cls, *args, **kwargs): branched_from = kwargs.get('branched_from') initiator = kwargs.get('initiator') registration_schema = kwargs.get('registration_schema') registration_metadata = kwargs.get('registration_metadata') if not branched_from: project_params = {} if initiator: project_params['creator'] = initiator branched_from = ProjectFactory(**project_params) initiator = branched_from.creator try: registration_schema = registration_schema or MetaSchema.find()[0] except IndexError: ensure_schemas() registration_metadata = registration_metadata or {} draft = DraftRegistration.create_from_node( branched_from, user=initiator, schema=registration_schema, data=registration_metadata, ) return draft class NodeLicenseRecordFactory(ModularOdmFactory): FACTORY_FOR = NodeLicenseRecord @classmethod def _create(cls, *args, **kwargs): try: NodeLicense.find_one( Q('name', 'eq', 'No license') ) except NoResultsFound: ensure_licenses() kwargs['node_license'] = kwargs.get( 'node_license', NodeLicense.find_one( Q('name', 'eq', 'No license') ) ) return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
    apache-2.0
    ferhatelmas/sexmachine
    sexmachine/detector.py
    1
    4446
    import os.path import codecs from .mapping import map_name class NoCountryError(Exception): """Raised when non-supported country is queried""" pass class Detector: """Get gender by first name""" COUNTRIES = u"""great_britain ireland usa italy malta portugal spain france belgium luxembourg the_netherlands east_frisia germany austria swiss iceland denmark norway sweden finland estonia latvia lithuania poland czech_republic slovakia hungary romania bulgaria bosniaand croatia kosovo macedonia montenegro serbia slovenia albania greece russia belarus moldova ukraine armenia azerbaijan georgia the_stans turkey arabia israel china india japan korea vietnam other_countries """.split() def __init__(self, case_sensitive=True, unknown_value=u"andy"): """Creates a detector parsing given data file""" self.case_sensitive = case_sensitive self.unknown_value = unknown_value self._parse(os.path.join(os.path.dirname(__file__), "data/nam_dict.txt")) def _parse(self, filename): """Opens data file and for each line, calls _eat_name_line""" self.names = {} with codecs.open(filename, encoding="iso8859-1") as f: for line in f: if any(map(lambda c: 128 < ord(c) < 160, line)): line = line.encode("iso8859-1").decode("windows-1252") self._eat_name_line(line.strip()) def _eat_name_line(self, line): """Parses one line of data file""" if line[0] not in "#=": parts = line.split() country_values = line[30:-1] name = map_name(parts[1]) if not self.case_sensitive: name = name.lower() if parts[0] == "M": self._set(name, u"male", country_values) elif parts[0] == "1M" or parts[0] == "?M": self._set(name, u"mostly_male", country_values) elif parts[0] == "F": self._set(name, u"female", country_values) elif parts[0] == "1F" or parts[0] == "?F": self._set(name, u"mostly_female", country_values) elif parts[0] == "?": self._set(name, self.unknown_value, country_values) else: raise "Not sure what to do with a sex of %s" % parts[0] def _set(self, name, gender, country_values): """Sets gender and relevant country values for names dictionary of detector""" if '+' in name: for replacement in ['', ' ', '-']: self._set(name.replace('+', replacement), gender, country_values) else: if name not in self.names: self.names[name] = {} self.names[name][gender] = country_values def _most_popular_gender(self, name, counter): """Finds the most popular gender for the given name counting by given counter""" if name not in self.names: return self.unknown_value max_count, max_tie = (0, 0) best = self.names[name].keys()[0] for gender, country_values in self.names[name].items(): count, tie = counter(country_values) if count > max_count or (count == max_count and tie > max_tie): max_count, max_tie, best = count, tie, gender return best if max_count > 0 else self.unknown_value def get_gender(self, name, country=None): """Returns best gender for the given name and country pair""" if not self.case_sensitive: name = name.lower() if name not in self.names: return self.unknown_value elif not country: def counter(country_values): country_values = map(ord, country_values.replace(" ", "")) return (len(country_values), sum(map(lambda c: c > 64 and c-55 or c-48, country_values))) return self._most_popular_gender(name, counter) elif country in self.__class__.COUNTRIES: index = self.__class__.COUNTRIES.index(country) counter = lambda e: (ord(e[index])-32, 0) return self._most_popular_gender(name, counter) else: raise NoCountryError("No such country: %s" % country)
    gpl-3.0
    zhangjunlei26/servo
    tests/wpt/harness/test/test.py
    5
    5079
    import ConfigParser import argparse import json import os import sys import tempfile import threading import time from StringIO import StringIO from mozlog.structured import structuredlog, reader from mozlog.structured.handlers import BaseHandler, StreamHandler, StatusHandler from mozlog.structured.formatters import MachFormatter from wptrunner import wptcommandline, wptrunner here = os.path.abspath(os.path.dirname(__file__)) def setup_wptrunner_logging(logger): structuredlog.set_default_logger(logger) wptrunner.logger = logger wptrunner.wptlogging.setup_stdlib_logger() class ResultHandler(BaseHandler): def __init__(self, verbose=False, logger=None): self.inner = StreamHandler(sys.stdout, MachFormatter()) BaseHandler.__init__(self, self.inner) self.product = None self.verbose = verbose self.logger = logger self.register_message_handlers("wptrunner-test", {"set-product": self.set_product}) def set_product(self, product): self.product = product def __call__(self, data): if self.product is not None and data["action"] in ["suite_start", "suite_end"]: # Hack: mozlog sets some internal state to prevent multiple suite_start or # suite_end messages. We actually want that here (one from the metaharness # and one from the individual test type harness), so override that internal # state (a better solution might be to not share loggers, but this works well # enough) self.logger._state.suite_started = True return if (not self.verbose and (data["action"] == "process_output" or data["action"] == "log" and data["level"] not in ["error", "critical"])): return if "test" in data: data = data.copy() data["test"] = "%s: %s" % (self.product, data["test"]) return self.inner(data) def test_settings(): return { "include": "_test", "manifest-update": "", "no-capture-stdio": "" } def read_config(): parser = ConfigParser.ConfigParser() parser.read("test.cfg") rv = {"general":{}, "products":{}} rv["general"].update(dict(parser.items("general"))) # This only allows one product per whatever for now for product in parser.sections(): if product != "general": dest = rv["products"][product] = {} for key, value in parser.items(product): rv["products"][product][key] = value return rv def run_tests(product, kwargs): kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"), "metadata_path": os.path.join(here, "metadata")} wptrunner.run_tests(**kwargs) def settings_to_argv(settings): rv = [] for name, value in settings.iteritems(): key = "--%s" % name if not value: rv.append(key) elif isinstance(value, list): for item in value: rv.extend([key, item]) else: rv.extend([key, value]) return rv def set_from_args(settings, args): if args.test: settings["include"] = args.test def run(config, args): logger = structuredlog.StructuredLogger("web-platform-tests") logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose)) setup_wptrunner_logging(logger) parser = wptcommandline.create_parser() logger.suite_start(tests=[]) for product, product_settings in config["products"].iteritems(): if args.product and product not in args.product: continue settings = test_settings() settings.update(config["general"]) settings.update(product_settings) settings["product"] = product set_from_args(settings, args) kwargs = vars(parser.parse_args(settings_to_argv(settings))) wptcommandline.check_args(kwargs) logger.send_message("wptrunner-test", "set-product", product) run_tests(product, kwargs) logger.send_message("wptrunner-test", "set-product", None) logger.suite_end() def get_parser(): parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", action="store_true", default=False, help="verbose log output") parser.add_argument("--product", action="append", help="Specific product to include in test run") parser.add_argument("--pdb", action="store_true", help="Invoke pdb on uncaught exception") parser.add_argument("test", nargs="*", help="Specific tests to include in test run") return parser def main(): config = read_config() args = get_parser().parse_args() try: run(config, args) except Exception: if args.pdb: import pdb, traceback print traceback.format_exc() pdb.post_mortem() else: raise if __name__ == "__main__": main()
    mpl-2.0
    mbakker7/ttim
    tests/test_theis.py
    1
    3437
    import numpy as np from scipy.special import exp1 from ttim import * def theis(r, t, T, S, Q): u = r ** 2 * S / (4 * T * t) h = -Q / (4 * np.pi * T) * exp1(u) return h def theisQr(r, t, T, S, Q): u = r ** 2 * S / (4 * T * t) Qr = -Q / (2 * np.pi) * np.exp(-u) / r return Qr T = 500 S = 1e-3 t = np.logspace(-4, 1, 10) r = 30 Q = 788 h1 = theis(r, t, T, S, Q) ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=1e-4, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q)]) ml.solve() h2 = ml.head(r, 0, t)[0] assert np.allclose(h1, h2, atol=1e-4), "h1 and h2 not all close Theis well 1" # turn Theis off t = np.logspace(-1, 1, 10) h1 = theis(r, t, T, S, Q) h1[t > 5] -= theis(r, t[t > 5] - 5, T, S, Q) ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=1e-4, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q), (5, 0)]) ml.solve() h2 = ml.head(r, 0, t)[0] assert np.allclose(h1, h2, atol=1e-4), "h1 and h2 not all close Theis well 2" # test nan values for Theis well 1 t = np.array([0.08, 0.09, 0.1, 1, 5, 9]) h1 = theis(r, t, T, S, Q) tmin = 0.1 ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=tmin, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q)]) ml.solve() h2 = ml.head(r, 0, t)[0] a = np.isnan(h2) b = t < tmin assert np.all(a == b), "nans not in the right spot for tmin" assert np.allclose(h1[~b], h2[~b], atol=1e-4) # test nan values for Theis well 2 t = np.array([0.08, 0.09, 0.1, 1, 5, 5.03, 9]) h1 = theis(r, t, T, S, Q) h1[t > 5] -= theis(r, t[t > 5] - 5, T, S, Q) tmin = 0.1 ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=tmin, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q), (5, 0)]) ml.solve() h2 = ml.head(r, 0, t)[0] a = np.isnan(h2) b = (t < tmin) | ((t > 5) & (t < 5.1)) assert np.all(a == b), "nans not in the right spot for tmin" assert np.allclose(h1[~b], h2[~b], atol=1e-4) ### test for Qr ######################## Qr1 = theisQr(r, t, T, S, Q) ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=1e-4, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q)]) ml.solve() Qr2 = ml.disvec(r, 0, t)[0][0] assert np.allclose(Qr1, Qr2, atol=1e-4), \ "Qr1 and Qr2 not all close Theis well 1" # turn Theis off t = np.logspace(-1, 1, 10) Qr1 = theisQr(r, t, T, S, Q) Qr1[t > 5] -= theisQr(r, t[t > 5] - 5, T, S, Q) ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=1e-4, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q), (5, 0)]) ml.solve() Qr2 = ml.disvec(r, 0, t)[0][0] assert np.allclose(Qr1, Qr2, atol=1e-4), \ "Qr1 and Qr2 not all close Theis well 2" # test nan values for Theis well 1 t = np.array([0.08, 0.09, 0.1, 1, 5, 9]) Qr1 = theisQr(r, t, T, S, Q) tmin = 0.1 ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=tmin, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q)]) ml.solve() Qr2 = ml.disvec(r, 0, t)[0][0] a = np.isnan(Qr2) b = t < tmin assert np.all(a == b), "nans not in the right spot for tmin" assert np.allclose(Qr1[~b], Qr2[~b], atol=1e-4) # test nan values for Theis well 2 t = np.array([0.08, 0.09, 0.1, 1, 5, 5.03, 9]) Qr1 = theisQr(r, t, T, S, Q) Qr1[t > 5] -= theisQr(r, t[t > 5] - 5, T, S, Q) tmin = 0.1 ml = ModelMaq(kaq=50, z=[10, 0], Saq=S / 10, tmin=tmin, tmax=10) w = Well(ml, 0, 0, rw=0.3, tsandQ=[(0, Q), (5, 0)]) ml.solve() Qr2 = ml.disvec(r, 0, t)[0][0] a = np.isnan(h2) b = (t < tmin) | ((t > 5) & (t < 5.1)) assert np.all(a == b), "nans not in the right spot for tmin" assert np.allclose(Qr1[~b], Qr2[~b], atol=1e-4)
    mit
    Stevie-Bs/Stevie-Bs-Kodi
    plugin.video.streamoase_ll/mechanize/_opener.py
    133
    14763
    """URL opener. Copyright 2004-2006 John J Lee <[email protected]> This code is free software; you can redistribute it and/or modify it under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt included with the distribution). """ import os, urllib2, bisect, httplib, types, tempfile try: import threading as _threading except ImportError: import dummy_threading as _threading try: set except NameError: import sets set = sets.Set from _request import Request import _response import _rfc3986 import _sockettimeout import _urllib2_fork from _util import isstringlike open_file = open class ContentTooShortError(urllib2.URLError): def __init__(self, reason, result): urllib2.URLError.__init__(self, reason) self.result = result def set_request_attr(req, name, value, default): try: getattr(req, name) except AttributeError: setattr(req, name, default) if value is not default: setattr(req, name, value) class OpenerDirector(_urllib2_fork.OpenerDirector): def __init__(self): _urllib2_fork.OpenerDirector.__init__(self) # really none of these are (sanely) public -- the lack of initial # underscore on some is just due to following urllib2 self.process_response = {} self.process_request = {} self._any_request = {} self._any_response = {} self._handler_index_valid = True self._tempfiles = [] def add_handler(self, handler): if not hasattr(handler, "add_parent"): raise TypeError("expected BaseHandler instance, got %r" % type(handler)) if handler in self.handlers: return # XXX why does self.handlers need to be sorted? bisect.insort(self.handlers, handler) handler.add_parent(self) self._handler_index_valid = False def _maybe_reindex_handlers(self): if self._handler_index_valid: return handle_error = {} handle_open = {} process_request = {} process_response = {} any_request = set() any_response = set() unwanted = [] for handler in self.handlers: added = False for meth in dir(handler): if meth in ["redirect_request", "do_open", "proxy_open"]: # oops, coincidental match continue if meth == "any_request": any_request.add(handler) added = True continue elif meth == "any_response": any_response.add(handler) added = True continue ii = meth.find("_") scheme = meth[:ii] condition = meth[ii+1:] if condition.startswith("error"): jj = meth[ii+1:].find("_") + ii + 1 kind = meth[jj+1:] try: kind = int(kind) except ValueError: pass lookup = handle_error.setdefault(scheme, {}) elif condition == "open": kind = scheme lookup = handle_open elif condition == "request": kind = scheme lookup = process_request elif condition == "response": kind = scheme lookup = process_response else: continue lookup.setdefault(kind, set()).add(handler) added = True if not added: unwanted.append(handler) for handler in unwanted: self.handlers.remove(handler) # sort indexed methods # XXX could be cleaned up for lookup in [process_request, process_response]: for scheme, handlers in lookup.iteritems(): lookup[scheme] = handlers for scheme, lookup in handle_error.iteritems(): for code, handlers in lookup.iteritems(): handlers = list(handlers) handlers.sort() lookup[code] = handlers for scheme, handlers in handle_open.iteritems(): handlers = list(handlers) handlers.sort() handle_open[scheme] = handlers # cache the indexes self.handle_error = handle_error self.handle_open = handle_open self.process_request = process_request self.process_response = process_response self._any_request = any_request self._any_response = any_response def _request(self, url_or_req, data, visit, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): if isstringlike(url_or_req): req = Request(url_or_req, data, visit=visit, timeout=timeout) else: # already a mechanize.Request instance req = url_or_req if data is not None: req.add_data(data) # XXX yuck set_request_attr(req, "visit", visit, None) set_request_attr(req, "timeout", timeout, _sockettimeout._GLOBAL_DEFAULT_TIMEOUT) return req def open(self, fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): req = self._request(fullurl, data, None, timeout) req_scheme = req.get_type() self._maybe_reindex_handlers() # pre-process request # XXX should we allow a Processor to change the URL scheme # of the request? request_processors = set(self.process_request.get(req_scheme, [])) request_processors.update(self._any_request) request_processors = list(request_processors) request_processors.sort() for processor in request_processors: for meth_name in ["any_request", req_scheme+"_request"]: meth = getattr(processor, meth_name, None) if meth: req = meth(req) # In Python >= 2.4, .open() supports processors already, so we must # call ._open() instead. urlopen = _urllib2_fork.OpenerDirector._open response = urlopen(self, req, data) # post-process response response_processors = set(self.process_response.get(req_scheme, [])) response_processors.update(self._any_response) response_processors = list(response_processors) response_processors.sort() for processor in response_processors: for meth_name in ["any_response", req_scheme+"_response"]: meth = getattr(processor, meth_name, None) if meth: response = meth(req, response) return response def error(self, proto, *args): if proto in ['http', 'https']: # XXX http[s] protocols are special-cased dict = self.handle_error['http'] # https is not different than http proto = args[2] # YUCK! meth_name = 'http_error_%s' % proto http_err = 1 orig_args = args else: dict = self.handle_error meth_name = proto + '_error' http_err = 0 args = (dict, proto, meth_name) + args result = apply(self._call_chain, args) if result: return result if http_err: args = (dict, 'default', 'http_error_default') + orig_args return apply(self._call_chain, args) BLOCK_SIZE = 1024*8 def retrieve(self, fullurl, filename=None, reporthook=None, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT, open=open_file): """Returns (filename, headers). For remote objects, the default filename will refer to a temporary file. Temporary files are removed when the OpenerDirector.close() method is called. For file: URLs, at present the returned filename is None. This may change in future. If the actual number of bytes read is less than indicated by the Content-Length header, raises ContentTooShortError (a URLError subclass). The exception's .result attribute contains the (filename, headers) that would have been returned. """ req = self._request(fullurl, data, False, timeout) scheme = req.get_type() fp = self.open(req) try: headers = fp.info() if filename is None and scheme == 'file': # XXX req.get_selector() seems broken here, return None, # pending sanity :-/ return None, headers #return urllib.url2pathname(req.get_selector()), headers if filename: tfp = open(filename, 'wb') else: path = _rfc3986.urlsplit(req.get_full_url())[2] suffix = os.path.splitext(path)[1] fd, filename = tempfile.mkstemp(suffix) self._tempfiles.append(filename) tfp = os.fdopen(fd, 'wb') try: result = filename, headers bs = self.BLOCK_SIZE size = -1 read = 0 blocknum = 0 if reporthook: if "content-length" in headers: size = int(headers["Content-Length"]) reporthook(blocknum, bs, size) while 1: block = fp.read(bs) if block == "": break read += len(block) tfp.write(block) blocknum += 1 if reporthook: reporthook(blocknum, bs, size) finally: tfp.close() finally: fp.close() # raise exception if actual size does not match content-length header if size >= 0 and read < size: raise ContentTooShortError( "retrieval incomplete: " "got only %i out of %i bytes" % (read, size), result ) return result def close(self): _urllib2_fork.OpenerDirector.close(self) # make it very obvious this object is no longer supposed to be used self.open = self.error = self.retrieve = self.add_handler = None if self._tempfiles: for filename in self._tempfiles: try: os.unlink(filename) except OSError: pass del self._tempfiles[:] def wrapped_open(urlopen, process_response_object, fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): success = True try: response = urlopen(fullurl, data, timeout) except urllib2.HTTPError, error: success = False if error.fp is None: # not a response raise response = error if response is not None: response = process_response_object(response) if not success: raise response return response class ResponseProcessingOpener(OpenerDirector): def open(self, fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): def bound_open(fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): return OpenerDirector.open(self, fullurl, data, timeout) return wrapped_open( bound_open, self.process_response_object, fullurl, data, timeout) def process_response_object(self, response): return response class SeekableResponseOpener(ResponseProcessingOpener): def process_response_object(self, response): return _response.seek_wrapped_response(response) def isclass(obj): return isinstance(obj, (types.ClassType, type)) class OpenerFactory: """This class's interface is quite likely to change.""" default_classes = [ # handlers _urllib2_fork.ProxyHandler, _urllib2_fork.UnknownHandler, _urllib2_fork.HTTPHandler, _urllib2_fork.HTTPDefaultErrorHandler, _urllib2_fork.HTTPRedirectHandler, _urllib2_fork.FTPHandler, _urllib2_fork.FileHandler, # processors _urllib2_fork.HTTPCookieProcessor, _urllib2_fork.HTTPErrorProcessor, ] if hasattr(httplib, 'HTTPS'): default_classes.append(_urllib2_fork.HTTPSHandler) handlers = [] replacement_handlers = [] def __init__(self, klass=OpenerDirector): self.klass = klass def build_opener(self, *handlers): """Create an opener object from a list of handlers and processors. The opener will use several default handlers and processors, including support for HTTP and FTP. If any of the handlers passed as arguments are subclasses of the default handlers, the default handlers will not be used. """ opener = self.klass() default_classes = list(self.default_classes) skip = set() for klass in default_classes: for check in handlers: if isclass(check): if issubclass(check, klass): skip.add(klass) elif isinstance(check, klass): skip.add(klass) for klass in skip: default_classes.remove(klass) for klass in default_classes: opener.add_handler(klass()) for h in handlers: if isclass(h): h = h() opener.add_handler(h) return opener build_opener = OpenerFactory().build_opener _opener = None urlopen_lock = _threading.Lock() def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): global _opener if _opener is None: urlopen_lock.acquire() try: if _opener is None: _opener = build_opener() finally: urlopen_lock.release() return _opener.open(url, data, timeout) def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): global _opener if _opener is None: urlopen_lock.acquire() try: if _opener is None: _opener = build_opener() finally: urlopen_lock.release() return _opener.retrieve(url, filename, reporthook, data, timeout) def install_opener(opener): global _opener _opener = opener
    gpl-2.0
    Jonekee/chromium.src
    tools/cr/cr/actions/installer.py
    113
    1642
    # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A module for the Installer base class.""" import cr class Installer(cr.Action, cr.Plugin.Type): """Base class for implementing installers. Installer implementations must implement the Uninstall and Install methods. If the location into which targets are built is find for running them, then they do not actually have to do anything. """ SELECTOR_ARG = '--installer' SELECTOR = 'CR_INSTALLER' SELECTOR_HELP = 'Sets the installer to use.' @cr.Plugin.activemethod def Uninstall(self, targets, arguments): """Removes a target from it's installed location.""" raise NotImplementedError('Must be overridden.') @cr.Plugin.activemethod def Install(self, targets, arguments): """Installs a target somewhere so that it is ready to run.""" raise NotImplementedError('Must be overridden.') @cr.Plugin.activemethod def Reinstall(self, targets, arguments): """Force a target to install even if already installed. Default implementation is to do an Uninstall Install sequence. Do not call the base version if you implement a more efficient one. """ self.Uninstall(targets, []) self.Install(targets, arguments) class SkipInstaller(Installer): """An Installer the user chooses to bypass the install step of a command.""" @property def priority(self): return super(SkipInstaller, self).priority - 1 def Uninstall(self, targets, arguments): pass def Install(self, targets, arguments): pass
    bsd-3-clause
    maelnor/nova
    nova/virt/vmwareapi/error_util.py
    3
    1401
    # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception classes specific for the VMware driver. """ from oslo.vmware import exceptions as vexc from nova.i18n import _ # Most VMware-specific exception classes are now centrally defined in # oslo.vmware. # Note(vui): # - map back to NovaException? class NoRootDiskDefined(vexc.VMwareDriverException): msg_fmt = _("No root disk defined.") class TaskInProgress(vexc.VMwareDriverException): msg_fmt = _("Virtual machine is busy.") class PbmDefaultPolicyUnspecified(vexc.VMwareDriverConfigurationException): msg_fmt = _("Default PBM policy is required if PBM is enabled.") class PbmDefaultPolicyDoesNotExist(vexc.VMwareDriverConfigurationException): msg_fmt = _("The default PBM policy doesn't exist on the backend.")
    apache-2.0
    ymero/pulsar
    pulsar/apps/wsgi/routers.py
    5
    22135
    ''' Routing is the process of matching and parsing a URL to something we can use. Pulsar provides a flexible integrated routing system you can use for that. It works by creating a :class:`Router` instance with its own ``rule`` and, optionally, additional sub-routers for handling additional urls:: class Page(Router): response_content_types = RouterParam(('text/html', 'text/plain', 'application/json')) def get(self, request): "This method handle requests with get-method" ... def post(self, request): "This method handle requests with post-method" ... def delete(self, request): "This method handle requests with delete-method" ... ... middleware = Page('/bla') .. _wsgi-router: Router ===================== The :ref:`middleware <wsgi-middleware>` constructed in the snippet above handles ``get`` and ``post`` methods at the ``/bla`` url. The :class:`Router` introduces a new element into pulsar WSGI handlers, the :ref:`wsgi request <app-wsgi-request>`, a light-weight wrapper of the WSGI environ. For an exaustive example on how to use the :class:`Router` middleware make sure you check out the :ref:`HttpBin example <tutorials-httpbin>`. .. autoclass:: Router :members: :member-order: bysource .. _wsgi-media-router: Media Router ===================== The :class:`MediaRouter` is a specialised :class:`Router` for serving static files such ass ``css``, ``javascript``, images and so forth. .. autoclass:: MediaRouter :members: :member-order: bysource RouterParam ================= .. autoclass:: RouterParam :members: :member-order: bysource .. _WSGI: http://www.wsgi.org ''' import os import re import stat import mimetypes from email.utils import parsedate_tz, mktime_tz from pulsar.utils.httpurl import http_date, CacheControl from pulsar.utils.structures import OrderedDict from pulsar.utils.slugify import slugify from pulsar import Http404, HttpException from .route import Route from .utils import wsgi_request from .content import Html __all__ = ['Router', 'MediaRouter', 'FileRouter', 'MediaMixin', 'RouterParam'] def get_roule_methods(attrs): rule_methods = [] for code, callable in attrs: if code.startswith('__') or not hasattr(callable, '__call__'): continue rule_method = getattr(callable, 'rule_method', None) if isinstance(rule_method, tuple): rule_methods.append((code, rule_method)) return sorted(rule_methods, key=lambda x: x[1].order) def update_args(urlargs, args): if urlargs: urlargs.update(args) return urlargs return args def _get_default(parent, name): if name in parent.defaults: return getattr(parent, name) elif parent._parent: return _get_default(parent._parent, name) else: raise AttributeError class RouterParam(object): '''A :class:`RouterParam` is a way to flag a :class:`Router` parameter so that children can inherit the value if they don't define their own. A :class:`RouterParam` is always defined as a class attribute and it is processed by the :class:`Router` metaclass and stored in a dictionary available as ``parameter`` class attribute. .. attribute:: value The value associated with this :class:`RouterParam`. THis is the value stored in the :class:`Router.parameters` dictionary at key given by the class attribute specified in the class definition. ''' def __init__(self, value=None): self.value = value class RouterType(type): ''':class:`Router` metaclass.''' def __new__(cls, name, bases, attrs): rule_methods = get_roule_methods(attrs.items()) defaults = {} for key, value in list(attrs.items()): if isinstance(value, RouterParam): defaults[key] = attrs.pop(key).value no_rule = set(attrs) - set((x[0] for x in rule_methods)) base_rules = [] for base in reversed(bases): if hasattr(base, 'defaults'): params = base.defaults.copy() params.update(defaults) defaults = params if hasattr(base, 'rule_methods'): items = base.rule_methods.items() else: g = ((key, getattr(base, key)) for key in dir(base)) items = get_roule_methods(g) rules = [pair for pair in items if pair[0] not in no_rule] base_rules = base_rules + rules if base_rules: all = base_rules + rule_methods rule_methods = {} for namerule, rule in all: if namerule in rule_methods: rule = rule.override(rule_methods[namerule]) rule_methods[namerule] = rule rule_methods = sorted(rule_methods.items(), key=lambda x: x[1].order) attrs['rule_methods'] = OrderedDict(rule_methods) attrs['defaults'] = defaults return super(RouterType, cls).__new__(cls, name, bases, attrs) class Router(metaclass=RouterType): '''A :ref:`WSGI middleware <wsgi-middleware>` to handle client requests on multiple :ref:`routes <apps-wsgi-route>`. The user must implement the HTTP methods required by the application. For example if the route needs to serve a ``GET`` request, the ``get(self, request)`` method must be implemented. :param rule: String used for creating the :attr:`route` of this :class:`Router`. :param routes: Optional :class:`Router` instances which are added to the children :attr:`routes` of this router. :param parameters: Optional parameters for this router. .. attribute:: rule_methods A class attribute built during class creation. It is an ordered dictionary mapping method names with a five-elements tuple containing information about a child route (See the :class:`.route` decorator). .. attribute:: routes List of children :class:`Router` of this :class:`Router`. .. attribute:: parent The parent :class:`Router` of this :class:`Router`. .. attribute:: response_content_types A list/tuple of possible content types of a response to a client request. The client request must accept at least one of the response content types, otherwise an HTTP ``415`` exception occurs. .. attribute:: response_wrapper Optional function which wraps all handlers of this :class:`.Router`. The function must accept two parameters, the original handler and the :class:`.WsgiRequest`:: def response_wrapper(handler, request): ... return handler(request) ''' _creation_count = 0 _parent = None name = None response_content_types = RouterParam(None) response_wrapper = RouterParam(None) def __init__(self, rule, *routes, **parameters): Router._creation_count += 1 self._creation_count = Router._creation_count if not isinstance(rule, Route): rule = Route(rule) self._route = rule parameters.setdefault('name', rule.name or self.name or '') self._set_params(parameters) self.routes = [] # add routes specified via the initialiser first for router in routes: self.add_child(router) for name, rule_method in self.rule_methods.items(): rule, method, params, _, _ = rule_method rparameters = params.copy() handler = getattr(self, name) self.add_child(self.make_router(rule, method=method, handler=handler, **rparameters)) @property def route(self): '''The relative :class:`.Route` served by this :class:`Router`. ''' parent = self._parent if parent and parent._route.is_leaf: return parent.route + self._route else: return self._route @property def full_route(self): '''The full :attr:`route` for this :class:`.Router`. It includes the :attr:`parent` portion of the route if a parent router is available. ''' if self._parent: return self._parent.full_route + self._route else: return self._route @property def root(self): '''The root :class:`Router` for this :class:`Router`.''' if self.parent: return self.parent.root else: return self @property def parent(self): return self._parent @property def creation_count(self): '''Integer for sorting :class:`Router` by creation. Auto-generated during initialisation.''' return self._creation_count @property def rule(self): '''The full ``rule`` string for this :class:`Router`. It includes the :attr:`parent` portion of the rule if a :attr:`parent` router is available. ''' return self.full_route.rule def path(self, **urlargs): '''The full path of this :class:`Router`. It includes the :attr:`parent` portion of url if a parent router is available. ''' return self.full_route.url(**urlargs) def getparam(self, name, default=None, parents=False): '''A parameter in this :class:`.Router` ''' value = getattr(self, name, None) if value is None: if parents and self._parent: return self._parent.getparam(name, default, parents) else: return default else: return value def __getattr__(self, name): '''Get the value of the ``name`` attribute. If the ``name`` is not available, retrieve it from the :attr:`parent` :class:`Router` if it exists. ''' available = False value = None if name in self.defaults: available = True value = self.defaults[name] if self._parent and value is None: try: return _get_default(self._parent, name) except AttributeError: pass if available: return value raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) def content_type(self, request): '''Evaluate the content type for the response to a client ``request``. The method uses the :attr:`response_content_types` parameter of accepted content types and the content types accepted by the client ``request`` and figures out the best match. ''' response_content_types = self.response_content_types request_content_types = request.content_types if request_content_types: ct = request_content_types.best_match(response_content_types) if ct and '*' in ct: ct = None if not ct and response_content_types: raise HttpException(status=415, msg=request_content_types) return ct def __repr__(self): return self.full_route.__repr__() def __call__(self, environ, start_response=None): path = environ.get('PATH_INFO') or '/' path = path[1:] router_args = self.resolve(path) if router_args: router, args = router_args return router.response(environ, args) def resolve(self, path, urlargs=None): '''Resolve a path and return a ``(handler, urlargs)`` tuple or ``None`` if the path could not be resolved. ''' match = self.route.match(path) if match is None: if not self.route.is_leaf: # no match return elif '__remaining__' in match: path = match.pop('__remaining__') urlargs = update_args(urlargs, match) else: return self, update_args(urlargs, match) # for handler in self.routes: view_args = handler.resolve(path, urlargs) if view_args is None: continue return view_args def response(self, environ, args): '''Once the :meth:`resolve` method has matched the correct :class:`Router` for serving the request, this matched router invokes this method to produce the WSGI response. ''' request = wsgi_request(environ, self, args) request.response.content_type = self.content_type(request) method = request.method.lower() callable = getattr(self, method, None) if callable is None: raise HttpException(status=405) response_wrapper = self.response_wrapper if response_wrapper: return response_wrapper(callable, request) return callable(request) def add_child(self, router): '''Add a new :class:`Router` to the :attr:`routes` list. ''' assert isinstance(router, Router), 'Not a valid Router' assert router is not self, 'cannot add self to children' for r in self.routes: if r == router: return r elif r._route == router._route: raise ValueError('Cannot add route %s. Already avalable' % r._route) # # Remove from previous parent if router.parent: router.parent.remove_child(router) router._parent = self self.routes.append(router) return router def remove_child(self, router): '''remove a :class:`Router` from the :attr:`routes` list.''' if router in self.routes: self.routes.remove(router) router._parent = None def get_route(self, name): '''Get a child :class:`Router` by its :attr:`name`. This method search child routes recursively. ''' for route in self.routes: if route.name == name: return route for child in self.routes: route = child.get_route(name) if route: return route def link(self, *args, **urlargs): '''Return an anchor :class:`Html` element with the `href` attribute set to the url of this :class:`Router`.''' if len(args) > 1: raise ValueError url = self.route.url(**urlargs) if len(args) == 1: text = args[0] else: text = url return Html('a', text, href=url) def has_parent(self, router): '''Check if ``router`` is ``self`` or a parent or ``self`` ''' parent = self while parent and parent is not router: parent = parent._parent return parent is not None def make_router(self, rule, method=None, handler=None, cls=None, name=None, **params): '''Create a new :class:`.Router` from a ``rule`` and parameters. This method is used during initialisation when building child Routers from the :attr:`rule_methods`. ''' cls = cls or Router router = cls(rule, name=name, **params) for r in self.routes: if r._route == router._route: if isinstance(r, cls): router = r router._set_params(params) break if method and handler: if isinstance(method, tuple): for m in method: setattr(router, m, handler) else: setattr(router, method, handler) return router # INTERNALS def _set_params(self, parameters): for name, value in parameters.items(): if name not in self.defaults: name = slugify(name, separator='_') setattr(self, name, value) class MediaMixin(object): def serve_file(self, request, fullpath, status_code=None): # Respect the If-Modified-Since header. statobj = os.stat(fullpath) content_type, encoding = mimetypes.guess_type(fullpath) response = request.response if content_type: response.content_type = content_type if encoding: response.encoding = encoding if not (status_code or self.was_modified_since( request.environ.get('HTTP_IF_MODIFIED_SINCE'), statobj[stat.ST_MTIME], statobj[stat.ST_SIZE])): response.status_code = 304 else: response.content = open(fullpath, 'rb').read() if status_code: response.status_code = status_code else: response.headers["Last-Modified"] = http_date( statobj[stat.ST_MTIME]) return response def was_modified_since(self, header=None, mtime=0, size=0): '''Check if an item was modified since the user last downloaded it :param header: the value of the ``If-Modified-Since`` header. If this is ``None``, simply return ``True`` :param mtime: the modification time of the item in question. :param size: the size of the item. ''' try: if header is None: raise ValueError matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header, re.IGNORECASE) header_mtime = mktime_tz(parsedate_tz(matches.group(1))) header_len = matches.group(3) if header_len and int(header_len) != size: raise ValueError() if mtime > header_mtime: raise ValueError() except (AttributeError, ValueError, OverflowError): return True return False def directory_index(self, request, fullpath): names = [Html('a', '../', href='../', cn='folder')] files = [] for f in sorted(os.listdir(fullpath)): if not f.startswith('.'): if os.path.isdir(os.path.join(fullpath, f)): names.append(Html('a', f, href=f+'/', cn='folder')) else: files.append(Html('a', f, href=f)) names.extend(files) return self.static_index(request, names) def static_index(self, request, links): doc = request.html_document doc.title = 'Index of %s' % request.path title = Html('h2', doc.title) list = Html('ul', *[Html('li', a) for a in links]) doc.body.append(Html('div', title, list)) return doc.http_response(request) class MediaRouter(Router, MediaMixin): '''A :class:`Router` for serving static media files from a given directory. :param rute: The top-level url for this router. For example ``/media`` will serve the ``/media/<path:path>`` :class:`Route`. :param path: Check the :attr:`path` attribute. :param show_indexes: Check the :attr:`show_indexes` attribute. .. attribute:: path The file-system path of the media files to serve. .. attribute:: show_indexes If ``True``, the router will serve media file directories as well as media files. .. attribute:: default_file The default file to serve when a directory is requested. ''' cache_control = CacheControl(maxage=86400) def __init__(self, rule, path, show_indexes=False, default_suffix=None, default_file='index.html', raise_404=True, **params): super().__init__('%s/<path:path>' % rule, **params) self._default_suffix = default_suffix self._default_file = default_file self._show_indexes = show_indexes self._file_path = path self._raise_404 = raise_404 def filesystem_path(self, request): path = request.urlargs['path'] bits = [bit for bit in path.split('/') if bit] return os.path.join(self._file_path, *bits) def get(self, request): fullpath = self.filesystem_path(request) if os.path.isdir(fullpath) and self._default_file: file = os.path.join(fullpath, self._default_file) if os.path.isfile(file): if not request.path.endswith('/'): return request.redirect('%s/' % request.path) fullpath = file # if os.path.isdir(fullpath): if self._show_indexes: return self.directory_index(request, fullpath) else: raise Http404 # filename = os.path.basename(fullpath) if '.' not in filename and self._default_suffix: fullpath = '%s.%s' % (fullpath, self._default_suffix) # if os.path.isfile(fullpath): return self.serve_file(request, fullpath) elif self._raise_404: raise Http404 class FileRouter(Router, MediaMixin): '''A Router for a single file ''' response_content_types = RouterParam(('application/octet-stream', 'text/css', 'application/javascript', 'text/html')) cache_control = CacheControl(maxage=86400) def __init__(self, route, file_path, status_code=None, raise_404=True): super().__init__(route) self._status_code = status_code self._file_path = file_path self._raise_404 = raise_404 def filesystem_path(self, request): return self._file_path def get(self, request): fullpath = self.filesystem_path(request) if os.path.isfile(fullpath): return self.serve_file(request, fullpath, status_code=self._status_code) elif self._raise_404: raise Http404
    bsd-3-clause
    creative-quant/voltdb
    tools/git-remove-branches-script.py
    10
    6165
    #!/usr/bin/python # Print out scripts for removing either # - branches already merged to trunk # - branches not merged, but with no current checkins # This script DOES NOT do the removals - you need to run the # console output. #TODO: Make it handle incorrect password the 1st time, then abort #from datetime import date, datetime, timedelta import getpass from optparse import OptionParser import re from subprocess import Popen import subprocess import sys import time import jiratools # set exclusions if there are any branches that should not be listed exclusions = ['master'] jira_url = 'https://issues.voltdb.com/' gitshowmap = \ { "unixtime":"%ct", "datetime":"%ci", "humantime":"%cr", "email":"%ce", } DELIMITER='^' def run_cmd(cmd): proc = Popen(cmd.split(' '),stdout=subprocess.PIPE,stderr=subprocess.PIPE) #print cmd (out, err) = proc.communicate(input=None) return (proc.returncode, out, err) def get_branch_list(merged): branches = [] git_cmd = 'git branch -r %s' % ('--merged' if merged else '--no-merged') print ('#\n# git command: %s\n#' % git_cmd) (returncode, stdout, stderr) = run_cmd (git_cmd) #only want branches at origin and don't want HEAD listed branches = [b.strip() for b in stdout.splitlines()if b.strip().find('origin/') == 0 and b.find('HEAD') < 0] #Filter others from list origin_exclusions = ['origin/' + b for b in exclusions] return list(set(branches) - set(origin_exclusions)) def make_delete_branches_script(branch_infos, dry_run): other_args = '' if dry_run: other_args = ' --dry-run' for bi in branch_infos: b = bi['name'] cmd = 'git push origin --delete %s%s' % \ (b, other_args) comment = make_comment(bi) print print comment print cmd def make_comment(bi): comment = '#%-20s last checkin %s %s by %s' % \ (bi['name'],bi['datetime'],bi['humantime'],bi['email']) if options.use_jira: ticket_summary = get_jira_info(bi['name']) if ticket_summary: comment += ('\n' + ticket_summary) return comment def get_jira_info(b): comment = None rg = re.compile('(eng)-?(\d+)', re.IGNORECASE) m = rg.search(b) if m: issue = m.group(1) + '-' + m.group(2) #print "##Getting %s" % issue ticket = jiratools.get_jira_issue(jira_url, user, password, issue, 'summary,assignee,status,resolution') if ticket: assignee = 'Unassigned' if ticket['fields']['assignee']: assignee = ticket['fields']['assignee']['name'] summary = ticket['fields']['summary'] #issue_url = jira_url + 'browse/' + issue_key status_resolution = ticket['fields']['status']['name'] if status_resolution in ('Closed','Resolved'): status_resolution += '/' + ticket['fields']['resolution']['name'] comment = "#%s %s %s: %s" % (issue, status_resolution.upper(), assignee, summary) return comment def make_archive_branches_script(branch_infos, dry_run): other_args = '' if dry_run: other_args = ' --dry-run' for bi in branch_infos: comment = make_comment(bi) tagname = "archive/" + bi['name'] print print comment print 'git tag -m "archiving branch %s" %s origin/%s' % \ (bi['name'], tagname, bi['name']) print 'git push origin %s' % (tagname) print 'git push origin --delete %s %s' % (other_args, bi['name']) if __name__ == "__main__": parser = OptionParser() parser.add_option('--no-jira', dest='use_jira', action = 'store_false', help = 'Don\'t look up jira ticket', default = 'True') parser.add_option('-u', '--username', dest = 'username', action = 'store', help = 'username to use for Jira lookups', default = getpass.getuser()) parser.add_option('-p', '--password', dest = 'password', action = 'store', help = 'password to use for Jira lookups') parser.add_option('--no-merged', dest = 'merged', action = 'store_false', help = "find branches that are not merged to master", default = True) parser.add_option('--older', dest = 'olderthan', action = 'store', help = "age of unmerged branches to list", type="int", default = 60); (options,args) = parser.parse_args() if options.use_jira: user = options.username password = options.password or getpass.getpass('Enter your Jira password: ') #Get the branch list branch_names = get_branch_list(options.merged) format_string = DELIMITER.join([gitshowmap[key] for key in sorted(gitshowmap)]) #Iterate over it and get a bunch of commit information using git log branch_infos = [] for b in branch_names: branch_info = {} branch_info['name'] = b.split('/')[1] #Get the git log info and pack it into a branch_info dictionary cmd = 'git log -1 --format=%s %s' % (format_string, b) (ret,stdout,stderr) = run_cmd(cmd) if not ret: values = stdout.rstrip().split(DELIMITER) for k,v in zip(sorted(gitshowmap),values): try: branch_info[k] = float(v) except ValueError: branch_info[k] = v branch_infos.append(branch_info) else: sys.stderr.write( "ERROR: Can't get git information for %s\n" % b) sys.stderr.write( "\tcmd = %s\n" % cmd) sys.stderr.write( "\tstderr=%s\n" % stderr) now = time.time() sorted_branch_infos = sorted(branch_infos, reverse = True, key=lambda bi:bi['unixtime']) old_branch_infos = [bi for bi in sorted_branch_infos if (now - bi['unixtime']) > options.olderthan * 60* 60* 24] if options.merged: make_delete_branches_script(old_branch_infos, dry_run=False) else: make_archive_branches_script(old_branch_infos, dry_run=False)
    agpl-3.0
    evro/CouchPotatoServer
    libs/guessit/transfo/guess_date.py
    150
    1217
    #!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <[email protected]> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.date import search_date import logging log = logging.getLogger(__name__) def guess_date(string): date, span = search_date(string) if date: return { 'date': date }, span else: return None, None def process(mtree): SingleNodeGuesser(guess_date, 1.0, log).process(mtree)
    gpl-3.0
    tmenjo/cinder-2015.1.1
    cinder/volume/drivers/netapp/dataontap/fc_cmode.py
    4
    3060
    # Copyright (c) - 2014, Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems. """ from oslo_log import log as logging from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetAppCmodeFibreChannelDriver(driver.FibreChannelDriver): """NetApp C-mode FibreChannel volume driver.""" DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct' def __init__(self, *args, **kwargs): super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( self.DRIVER_NAME, 'FC', **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh) def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): return self.library.initialize_connection_fc(volume, connector) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_fc(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume)
    apache-2.0
    jbteixeir/Openflow-DC-Framework
    pox/host_tracker/host_tracker.py
    1
    12071
    # Copyright 2011 Dorgival Guedes # # This file is part of POX. # Some of the arp/openflow-related code was borrowed from dumb_l3_switch. # # POX is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # POX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with POX. If not, see <http://www.gnu.org/licenses/>. """ Keep track of hosts in the network, where they are and how they are configured (at least MAC/IP addresses) For the time being, it keeps tables with the information; later, it should transfer that information to Topology and handle just the actual discovery/update of host information. Timer configuration can be changed when needed (e.g., for debugging) using the launch facility (check timeoutSec dict and PingCtrl.pingLim). """ from pox.core import core import pox log = core.getLogger() #import logging #log.setLevel(logging.WARN) from pox.lib.packet.ethernet import ethernet from pox.lib.packet.ipv4 import ipv4 from pox.lib.packet.arp import arp from pox.lib.recoco.recoco import Timer from pox.lib.revent.revent import EventMixin from ext.Structures.ercs_host import HostJoin, HostTimeout, HostMove import pox.openflow.libopenflow_01 as of import pox.openflow.discovery as discovery from pox.lib.revent.revent import * import time import string # Times (in seconds) to use for differente timouts: timeoutSec = dict( arpAware=60*2, # Quiet ARP-responding entries are pinged after this arpSilent=60*20, # This is for uiet entries not known to answer ARP arpReply=4, # Time to wait for an ARP reply before retrial timerInterval=5, # Seconds between timer routine activations entryMove=60 # Minimum expected time to move a physical entry ) # Good values for testing: # --arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4 # Another parameter that may be used: # --pingLim=2 class Alive (object): """ Holds liveliness information for MAC and IP entries """ def __init__ (self, livelinessInterval=timeoutSec['arpAware']): self.lastTimeSeen = time.time() self.interval=livelinessInterval def expired (self): return time.time() > self.lastTimeSeen + self.interval def refresh (self): self.lastTimeSeen = time.time() class PingCtrl (Alive): """ Holds information for handling ARP pings for hosts """ # Number of ARP ping attemps before deciding it failed pingLim=3 def __init__ (self): Alive.__init__(self, timeoutSec['arpReply']) self.pending = 0 def sent (self): self.refresh() self.pending += 1 def failed (self): return self.pending > PingCtrl.pingLim def received (self): # Clear any pending timeouts related to ARP pings self.pending = 0 class IpEntry (Alive): """ This entry keeps track of IP addresses seen from each MAC entry and will be kept in the macEntry object's ipAddrs dictionary. At least for now, there is no need to refer to the original macEntry as the code is organized. """ def __init__ (self, hasARP): if hasARP: Alive.__init__(self,timeoutSec['arpAware']) else: Alive.__init__(self,timeoutSec['arpSilent']) self.hasARP = hasARP self.pings = PingCtrl() def setHasARP (self): if not self.hasARP: self.hasARP = True self.interval = timeoutSec['arpAware'] class MacEntry (Alive): """ Not strictly an ARP entry. When it gets moved to Topology, may include other host info, like services, and it may replace dpid by a general switch object reference We use the port to determine which port to forward traffic out of. """ def __init__ (self, dpid, port, macaddr): Alive.__init__(self) self.dpid = dpid self.port = port self.macaddr = macaddr self.ipAddrs = {} def __str__(self): return string.join([str(self.dpid), str(self.port), str(self.macaddr)],' ') def __eq__ (self, other): if type(other) == type(None): return type(self) == type(None) elif type(other) == tuple: return (self.dpid,self.port,self.macaddr)==other else: return (self.dpid,self.port,self.macaddr) \ ==(other.dpid,other.port,other.macaddr) def __ne__ (self, other): return not self.__eq__(other) class host_tracker (EventMixin): _eventMixin_events = set([ HostJoin, HostTimeout, HostMove, ]) def __init__ (self): # The following tables should go to Topology later self.entryByMAC = {} self._t = Timer(timeoutSec['timerInterval'], self._check_timeouts, recurring=True) self.listenTo(core) log.info("host_tracker ready") # The following two functions should go to Topology also def getMacEntry(self, macaddr): try: result = self.entryByMAC[macaddr] except KeyError as e: result = None return result def sendPing(self, macEntry, ipAddr): r = arp() # Builds an "ETH/IP any-to-any ARP packet r.opcode = arp.REQUEST r.hwdst = macEntry.macaddr r.protodst = ipAddr # src is ETHER_ANY, IP_ANY e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst) e.set_payload(r) log.debug("%i %i sending ARP REQ to %s %s", macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst)) msg = of.ofp_packet_out(data = e.pack(), action = of.ofp_action_output(port = macEntry.port)) if core.openflow.sendToDPID(macEntry.dpid, msg.pack()): ipEntry = macEntry.ipAddrs[ipAddr] ipEntry.pings.sent() else: # macEntry is stale, remove it. log.debug("%i %i ERROR sending ARP REQ to %s %s", macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst)) del macEntry.ipAddrs[ipAddr] return def getSrcIPandARP(self, packet): """ This auxiliary function returns the source IPv4 address for packets that have one (IPv4, ARPv4). Returns None otherwise. """ if isinstance(packet, ipv4): log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip)) return ( packet.srcip, False ) elif isinstance(packet, arp): log.debug("ARP %s %s => %s", {arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode, 'op:%i' % (packet.opcode,)), str(packet.protosrc), str(packet.protodst)) if packet.hwtype == arp.HW_TYPE_ETHERNET and \ packet.prototype == arp.PROTO_TYPE_IP and \ packet.protosrc != 0: return ( packet.protosrc, True ) return ( None, False ) def updateIPInfo(self, pckt_srcip, macEntry, hasARP): """ If there is IP info in the incoming packet, update the macEntry accordingly. In the past we assumed a 1:1 mapping between MAC and IP addresses, but removed that restriction later to accomodate cases like virtual interfaces (1:n) and distributed packet rewriting (n:1) """ if pckt_srcip in macEntry.ipAddrs: # that entry already has that IP ipEntry = macEntry.ipAddrs[pckt_srcip] ipEntry.refresh() log.debug("%s already has IP %s, refreshing", str(macEntry), str(pckt_srcip) ) else: # new mapping ipEntry = IpEntry(hasARP) macEntry.ipAddrs[pckt_srcip] = ipEntry log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) ) if hasARP: ipEntry.pings.received() def _handle_GoingUpEvent (self, event): self.listenTo(core.openflow) log.debug("Up...") def _handle_PacketIn (self, event): """ Populate MAC and IP tables based on incoming packets. Handles only packets from ports identified as not switch-only. If a MAC was not seen before, insert it in the MAC table; otherwise, update table and enry. If packet has a source IP, update that info for the macEntry (may require removing the info from antoher entry previously with that IP address). It does not forward any packets, just extract info from them. """ dpid = event.connection.dpid inport = event.port packet = event.parse() if not packet.parsed: log.warning("%i %i ignoring unparsed packet", dpid, inport) return if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets return # a = packet.find('arp') # if a: return dir(packet) # This should use Topology later if not core.openflow_discovery.is_edge_port(dpid, inport): # No host should be right behind a switch-only port log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport) return log.debug("PacketIn: %i %i ETH %s => %s", dpid, inport, str(packet.src), str(packet.dst)) # Learn or update dpid/port/MAC info macEntry = self.getMacEntry(packet.src) if macEntry == None: # there is no known host by that MAC # should we raise a NewHostFound event (at the end)? macEntry = MacEntry(dpid,inport,packet.src) self.entryByMAC[packet.src] = macEntry log.info("Learned %s", str(macEntry)) #/begin FOR ERCS Purpose (srcip, hasARP) = self.getSrcIPandARP(packet.next) if srcip!=None: self.raiseEvent(HostJoin, packet.src, srcip, dpid, inport) #/end FOR ERCS Purpose elif macEntry != (dpid, inport, packet.src): # there is already an entry of host with that MAC, but host has moved # should we raise a HostMoved event (at the end)? log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport) # if there has not been long since heard from it... if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']: #log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i", # str(macEntry), macEntry.lastTimeSeen(), # dpid, inport, time.time()) return # should we create a whole new entry, or keep the previous host info? # for now, we keep it: IP info, answers pings, etc. macEntry.dpid = dpid macEntry.inport = inport #/begin FOR ERCS Purpose #TODO: Should we check for duplicates? self.raiseEvent(HostMove, packet.src, dpid, inport) #/end FOR ERCS Purpose macEntry.refresh() (pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next) if pckt_srcip != None: self.updateIPInfo(pckt_srcip,macEntry,hasARP) #/begin FOR ERCS Purpose self.raiseEvent(HostJoin, packet.src, pckt_srcip, dpid, inport) #/end FOR ERCS Purpose return def _check_timeouts(self): for macEntry in self.entryByMAC.values(): entryPinged = False for ip_addr, ipEntry in macEntry.ipAddrs.items(): if ipEntry.expired(): if ipEntry.pings.failed(): del macEntry.ipAddrs[ip_addr] log.info("Entry %s: IP address %s expired", str(macEntry), str(ip_addr) ) #/begin FOR ERCS Purpose self.raiseEvent(HostTimeout, macEntry, ip_addr) #/end FOR ERCS Purpose else: self.sendPing(macEntry,ip_addr) ipEntry.pings.sent() entryPinged = True if macEntry.expired() and not entryPinged: log.info("Entry %s expired", str(macEntry)) #/begin FOR ERCS Purpose self.raiseEvent(HostTimeout, macEntry, ip_addr) #/end FOR ERCS Purpose # sanity check: there should be no IP addresses left if len(macEntry.ipAddrs) > 0: for ip in macEntry.ipAddrs.keys(): log.warning("Entry %s expired but still had IP address %s", str(macEntry), str(ip_addr) ) del macEntry.ipAddrs[ip_addr] del self.entryByMAC[macEntry.macaddr] def launch(): core.registerNew(host_tracker)
    gpl-3.0
    cfe-lab/MiCall
    micall/utils/primer_tracker.py
    1
    2264
    import typing from pathlib import Path from Bio import SeqIO from micall.utils.alignment_wrapper import align_nucs class PrimerTracker: def __init__(self, conseq: str, seed_name: str): self.conseq = conseq self.seed_name = seed_name self.ignored_positions: typing.Optional[typing.Set[int]] = None def is_ignored(self, pos: int): """ Check if a position should be ignored because it's in a primer. :param pos: the one-based nucleotide position within the full seed sequence :return: True if that position should be ignored. """ self.check_positions() return pos in self.ignored_positions def check_positions(self): if self.ignored_positions is not None: return self.ignored_positions = set() if not self.seed_name.startswith('HCV-'): return cleaned_conseq = self.conseq.replace('-', 'x') data_path = Path(__file__).parent.parent / 'data' left_primers_path = data_path / f'primers_hcv_left.fasta' right_primers_path = data_path / f'primers_hcv_right_end.fasta' for primers_path in (left_primers_path, right_primers_path): with primers_path.open() as f: for primer in SeqIO.parse(f, 'fasta'): primer_name = primer.name if not primer_name.startswith('HCV'): continue if 'dA20' in primer_name or 'TIM' in primer_name: continue primer_seq = str(primer.seq).replace('X', '') aligned_primer, aligned_conseq, score = align_nucs( primer_seq, cleaned_conseq) primer_start = aligned_primer.lstrip('-') start = len(aligned_primer) - len(primer_start) primer_end = aligned_primer.rstrip('-') padded_end = len(primer_end) conseq_match = aligned_conseq[start:padded_end] unpadded_match = conseq_match.replace('-', '') end = start + len(unpadded_match) self.ignored_positions.update(range(start+1, end+1))
    agpl-3.0
    jpirates1/Django-python-pro
    venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
    1323
    1775
    from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix)
    apache-2.0
    MalloyPower/parsing-python
    front-end/testsuite-python-lib/Python-2.7.2/Lib/test/test_difflib.py
    86
    10297
    import difflib from test.test_support import run_unittest, findfile import unittest import doctest import sys class TestWithAscii(unittest.TestCase): def test_one_insert(self): sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('insert', 0, 0, 0, 1), ('equal', 0, 100, 1, 101)]) sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 50, 0, 50), ('insert', 50, 50, 50, 51), ('equal', 50, 100, 51, 101)]) def test_one_delete(self): sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40) self.assertAlmostEqual(sm.ratio(), 0.994, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 40, 0, 40), ('delete', 40, 41, 40, 40), ('equal', 41, 81, 40, 80)]) class TestAutojunk(unittest.TestCase): """Tests for the autojunk parameter added in 2.7""" def test_one_insert_homogenous_sequence(self): # By default autojunk=True and the heuristic kicks in for a sequence # of length 200+ seq1 = 'b' * 200 seq2 = 'a' + 'b' * 200 sm = difflib.SequenceMatcher(None, seq1, seq2) self.assertAlmostEqual(sm.ratio(), 0, places=3) # Now turn the heuristic off sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False) self.assertAlmostEqual(sm.ratio(), 0.9975, places=3) class TestSFbugs(unittest.TestCase): def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = difflib.SequenceMatcher(None, [], []) self.assertEqual(s.ratio(), 1) self.assertEqual(s.quick_ratio(), 1) self.assertEqual(s.real_quick_ratio(), 1) def test_comparing_empty_lists(self): # Check fix for bug #979794 group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes() self.assertRaises(StopIteration, group_gen.next) diff_gen = difflib.unified_diff([], []) self.assertRaises(StopIteration, diff_gen.next) def test_added_tab_hint(self): # Check fix for bug #1488943 diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"])) self.assertEqual("- \tI am a buggy", diff[0]) self.assertEqual("? --\n", diff[1]) self.assertEqual("+ \t\tI am a bug", diff[2]) self.assertEqual("? +\n", diff[3]) patch914575_from1 = """ 1. Beautiful is beTTer than ugly. 2. Explicit is better than implicit. 3. Simple is better than complex. 4. Complex is better than complicated. """ patch914575_to1 = """ 1. Beautiful is better than ugly. 3. Simple is better than complex. 4. Complicated is better than complex. 5. Flat is better than nested. """ patch914575_from2 = """ \t\tLine 1: preceeded by from:[tt] to:[ssss] \t\tLine 2: preceeded by from:[sstt] to:[sssst] \t \tLine 3: preceeded by from:[sstst] to:[ssssss] Line 4: \thas from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end\t """ patch914575_to2 = """ Line 1: preceeded by from:[tt] to:[ssss] \tLine 2: preceeded by from:[sstt] to:[sssst] Line 3: preceeded by from:[sstst] to:[ssssss] Line 4: has from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end """ patch914575_from3 = """line 0 1234567890123456789012345689012345 line 1 line 2 line 3 line 4 changed line 5 changed line 6 changed line 7 line 8 subtracted line 9 1234567890123456789012345689012345 short line just fits in!! just fits in two lines yup!! the end""" patch914575_to3 = """line 0 1234567890123456789012345689012345 line 1 line 2 added line 3 line 4 chanGEd line 5a chanGed line 6a changEd line 7 line 8 line 9 1234567890 another long line that needs to be wrapped just fitS in!! just fits in two lineS yup!! the end""" class TestSFpatches(unittest.TestCase): def test_html_diff(self): # Check SF patch 914575 for generating HTML differences f1a = ((patch914575_from1 + '123\n'*10)*3) t1a = (patch914575_to1 + '123\n'*10)*3 f1b = '456\n'*10 + f1a t1b = '456\n'*10 + t1a f1a = f1a.splitlines() t1a = t1a.splitlines() f1b = f1b.splitlines() t1b = t1b.splitlines() f2 = patch914575_from2.splitlines() t2 = patch914575_to2.splitlines() f3 = patch914575_from3 t3 = patch914575_to3 i = difflib.HtmlDiff() j = difflib.HtmlDiff(tabsize=2) k = difflib.HtmlDiff(wrapcolumn=14) full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5) tables = '\n'.join( [ '<h2>Context (first diff within numlines=5(default))</h2>', i.make_table(f1a,t1a,'from','to',context=True), '<h2>Context (first diff after numlines=5(default))</h2>', i.make_table(f1b,t1b,'from','to',context=True), '<h2>Context (numlines=6)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=6), '<h2>Context (numlines=0)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=0), '<h2>Same Context</h2>', i.make_table(f1a,f1a,'from','to',context=True), '<h2>Same Full</h2>', i.make_table(f1a,f1a,'from','to',context=False), '<h2>Empty Context</h2>', i.make_table([],[],'from','to',context=True), '<h2>Empty Full</h2>', i.make_table([],[],'from','to',context=False), '<h2>tabsize=2</h2>', j.make_table(f2,t2), '<h2>tabsize=default</h2>', i.make_table(f2,t2), '<h2>Context (wrapcolumn=14,numlines=0)</h2>', k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0), '<h2>wrapcolumn=14,splitlines()</h2>', k.make_table(f3.splitlines(),t3.splitlines()), '<h2>wrapcolumn=14,splitlines(True)</h2>', k.make_table(f3.splitlines(True),t3.splitlines(True)), ]) actual = full.replace('</body>','\n%s\n</body>' % tables) # temporarily uncomment next two lines to baseline this test #with open('test_difflib_expect.html','w') as fp: # fp.write(actual) with open(findfile('test_difflib_expect.html')) as fp: self.assertEqual(actual, fp.read()) def test_recursion_limit(self): # Check if the problem described in patch #1413711 exists. limit = sys.getrecursionlimit() old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)] new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)] difflib.SequenceMatcher(None, old, new).get_opcodes() class TestOutputFormat(unittest.TestCase): def test_tab_delimiter(self): args = ['one', 'two', 'Original', 'Current', '2005-01-26 23:30:50', '2010-04-02 10:20:52'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], [ "--- Original\t2005-01-26 23:30:50", "+++ Current\t2010-04-02 10:20:52"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], [ "*** Original\t2005-01-26 23:30:50", "--- Current\t2010-04-02 10:20:52"]) def test_no_trailing_tab_on_empty_filedate(self): args = ['one', 'two', 'Original', 'Current'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"]) def test_range_format_unified(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ Each <range> field shall be of the form: %1d", <beginning line number> if the range contains exactly one line, and: "%1d,%1d", <beginning line number>, <number of lines> otherwise. If a range is empty, its beginning line number shall be the number of the line just before the range, or 0 if the empty range starts the file. ''' fmt = difflib._format_range_unified self.assertEqual(fmt(3,3), '3,0') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,2') self.assertEqual(fmt(3,6), '4,3') self.assertEqual(fmt(0,0), '0,0') def test_range_format_context(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ The range of lines in file1 shall be written in the following format if the range contains two or more lines: "*** %d,%d ****\n", <beginning line number>, <ending line number> and the following format otherwise: "*** %d ****\n", <ending line number> The ending line number of an empty range shall be the number of the preceding line, or 0 if the range is at the start of the file. Next, the range of lines in file2 shall be written in the following format if the range contains two or more lines: "--- %d,%d ----\n", <beginning line number>, <ending line number> and the following format otherwise: "--- %d ----\n", <ending line number> ''' fmt = difflib._format_range_context self.assertEqual(fmt(3,3), '3') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,5') self.assertEqual(fmt(3,6), '4,6') self.assertEqual(fmt(0,0), '0') def test_main(): difflib.HtmlDiff._default_prefix = 0 Doctests = doctest.DocTestSuite(difflib) run_unittest( TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs, TestOutputFormat, Doctests) if __name__ == '__main__': test_main()
    mit
    sergiohgz/incubator-airflow
    tests/contrib/operators/test_discord_webhook_operator.py
    15
    2403
    # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from airflow import DAG, configuration from airflow.contrib.operators.discord_webhook_operator import DiscordWebhookOperator from airflow.utils import timezone DEFAULT_DATE = timezone.datetime(2018, 1, 1) class TestDiscordWebhookOperator(unittest.TestCase): _config = { 'http_conn_id': 'discord-webhook-default', 'webhook_endpoint': 'webhooks/11111/some-discord-token_111', 'message': 'your message here', 'username': 'Airflow Webhook', 'avatar_url': 'https://static-cdn.avatars.com/my-avatar-path', 'tts': False, 'proxy': 'https://proxy.proxy.com:8888' } def setUp(self): configuration.load_test_config() args = { 'owner': 'airflow', 'start_date': DEFAULT_DATE } self.dag = DAG('test_dag_id', default_args=args) def test_execute(self): operator = DiscordWebhookOperator( task_id='discord_webhook_task', dag=self.dag, **self._config ) self.assertEqual(self._config['http_conn_id'], operator.http_conn_id) self.assertEqual(self._config['webhook_endpoint'], operator.webhook_endpoint) self.assertEqual(self._config['message'], operator.message) self.assertEqual(self._config['username'], operator.username) self.assertEqual(self._config['avatar_url'], operator.avatar_url) self.assertEqual(self._config['tts'], operator.tts) self.assertEqual(self._config['proxy'], operator.proxy) if __name__ == '__main__': unittest.main()
    apache-2.0
    Tesla-Redux-Devices/android_kernel_mediatek_sprout
    arch/ia64/scripts/unwcheck.py
    13143
    1714
    #!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
    gpl-2.0
    rollenrolm/godot
    doc/tools/doc_merge.py
    22
    5046
    #!/usr/bin/python # -*- coding: utf-8 -*- import sys import xml.etree.ElementTree as ET tree = ET.parse(sys.argv[1]) old_doc=tree.getroot() tree = ET.parse(sys.argv[2]) new_doc=tree.getroot() f = file(sys.argv[3],"wb") tab=0 old_classes={} def write_string(_f, text,newline=True): for t in range(tab): _f.write("\t") _f.write(text) if (newline): _f.write("\n") def escape(ret): ret=ret.replace("&","&amp;"); ret=ret.replace("<","&gt;"); ret=ret.replace(">","&lt;"); ret=ret.replace("'","&apos;"); ret=ret.replace("\"","&quot;"); return ret def inc_tab(): global tab tab+=1 def dec_tab(): global tab tab-=1 write_string(f,'<?xml version="1.0" encoding="UTF-8" ?>') write_string(f,'<doc version="'+new_doc.attrib["version"]+'">') def get_tag(node,name): tag="" if (name in node.attrib): tag=' '+name+'="'+escape(node.attrib[name])+'" ' return tag def find_method_descr(old_class,name): methods = old_class.find("methods") if(methods!=None and len(list(methods))>0): for m in list(methods): if (m.attrib["name"]==name): description=m.find("description") if (description!=None and description.text.strip()!=""): return description.text return None def find_signal_descr(old_class,name): signals = old_class.find("signals") if(signals!=None and len(list(signals))>0): for m in list(signals): if (m.attrib["name"]==name): description=m.find("description") if (description!=None and description.text.strip()!=""): return description.text return None def find_constant_descr(old_class,name): if (old_class==None): return None constants = old_class.find("constants") if(constants!=None and len(list(constants))>0): for m in list(constants): if (m.attrib["name"]==name): if (m.text.strip()!=""): return m.text return None def write_class(c): class_name = c.attrib["name"] print("Parsing Class: "+class_name) if (class_name in old_classes): old_class=old_classes[class_name] else: old_class=None category=get_tag(c,"category") inherits=get_tag(c,"inherits") write_string(f,'<class name="'+class_name+'" '+category+inherits+'>') inc_tab() write_string(f,"<brief_description>") if (old_class!=None): old_brief_descr=old_class.find("brief_description") if (old_brief_descr!=None): write_string(f,escape(old_brief_descr.text.strip())) write_string(f,"</brief_description>") write_string(f,"<description>") if (old_class!=None): old_descr=old_class.find("description") if (old_descr!=None): write_string(f,escape(old_descr.text.strip())) write_string(f,"</description>") methods = c.find("methods") if(methods!=None and len(list(methods))>0): write_string(f,"<methods>") inc_tab() for m in list(methods): qualifiers=get_tag(m,"qualifiers") write_string(f,'<method name="'+escape(m.attrib["name"])+'" ' +qualifiers+'>') inc_tab() for a in list(m): if (a.tag=="return"): typ=get_tag(a,"type") write_string(f,'<return'+typ+'>'); write_string(f,'</return>'); elif (a.tag=="argument"): default=get_tag(a,"default") write_string(f,'<argument index="'+a.attrib["index"]+'" name="'+escape(a.attrib["name"])+'" type="'+a.attrib["type"]+'"' +default+'>'); write_string(f,'</argument>'); write_string(f,'<description>'); if (old_class!=None): old_method_descr=find_method_descr(old_class,m.attrib["name"]) if (old_method_descr): write_string(f,escape(escape(old_method_descr.strip()))) write_string(f,'</description>'); dec_tab() write_string(f,"</method>") dec_tab() write_string(f,"</methods>") signals = c.find("signals") if(signals!=None and len(list(signals))>0): write_string(f,"<signals>") inc_tab() for m in list(signals): write_string(f,'<signal name="'+escape(m.attrib["name"])+'">') inc_tab() for a in list(m): if (a.tag=="argument"): write_string(f,'<argument index="'+a.attrib["index"]+'" name="'+escape(a.attrib["name"])+'" type="'+a.attrib["type"]+'">'); write_string(f,'</argument>'); write_string(f,'<description>'); if (old_class!=None): old_signal_descr=find_signal_descr(old_class,m.attrib["name"]) if (old_signal_descr): write_string(f,escape(old_signal_descr.strip())) write_string(f,'</description>'); dec_tab() write_string(f,"</signal>") dec_tab() write_string(f,"</signals>") constants = c.find("constants") if(constants!=None and len(list(constants))>0): write_string(f,"<constants>") inc_tab() for m in list(constants): write_string(f,'<constant name="'+escape(m.attrib["name"])+'" value="'+m.attrib["value"]+'">') old_constant_descr=find_constant_descr(old_class,m.attrib["name"]) if (old_constant_descr): write_string(f,escape(old_constant_descr.strip())) write_string(f,"</constant>") dec_tab() write_string(f,"</constants>") dec_tab() write_string(f,"</class>") for c in list(old_doc): old_classes[c.attrib["name"]]=c for c in list(new_doc): write_class(c) write_string(f,'</doc>\n')
    mit
    bobellis/ghost_blog
    node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_openedgebuiltins.py
    370
    40661
    # -*- coding: utf-8 -*- """ pygments.lexers._openedgebuiltins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Builtin list for the OpenEdgeLexer. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ OPENEDGEKEYWORDS = [ 'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR', 'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA', 'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER', 'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE', 'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST', 'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD', 'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA', 'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING', 'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG', 'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY', 'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT', 'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX', 'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY', 'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE', 'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN', 'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS', 'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST', 'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT', 'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY', 'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP', 'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI', 'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT', 'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE', 'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE', 'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB', 'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA', 'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO', 'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE', 'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-', 'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H', 'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN', 'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND', 'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS', 'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO', 'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI', 'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL', 'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT', 'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA', 'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI', 'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL', 'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH', 'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH', 'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS', 'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX', 'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T', 'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH', 'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P', 'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL', 'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB', 'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS', 'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE', 'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME', 'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY', 'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL', 'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE', 'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET', 'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN', 'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV', 'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED', 'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS', 'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION', 'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS', 'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL', 'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE', 'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT', 'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN', 'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM', 'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT', 'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF', 'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS', 'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE', 'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED', 'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP', 'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL', 'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS', 'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET', 'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF', 'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN', 'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE', 'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE', 'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE', 'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT', 'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO', 'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME', 'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE', 'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG', 'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED', 'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR', 'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND', 'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU', 'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT', 'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE', 'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED', 'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP', 'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA', 'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES', 'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC', 'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID', 'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE', 'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC', 'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST', 'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT', 'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU', 'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT', 'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE', 'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO', 'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT', 'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER', 'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR', 'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE', 'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY', 'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC', 'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN', 'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS', 'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT', 'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE', 'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS', 'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE', 'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA', 'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP', 'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS', 'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST', 'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION', 'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH', 'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX', 'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR', 'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO', 'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE', 'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END', 'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY', 'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT', 'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL', 'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE', 'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME', 'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS', 'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT', 'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L', 'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS', 'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL', 'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW', 'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD', 'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION', 'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT', 'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME', 'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS', 'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN', 'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID', 'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST', 'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE', 'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST', 'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM', 'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE', 'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER', 'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE', 'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS', 'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR', 'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO', 'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED', 'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT', 'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN', 'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE', 'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC', 'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU', 'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C', 'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR', 'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX', 'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT', 'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA', 'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI', 'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME', 'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS', 'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH', 'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P', 'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL', 'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE', 'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT', 'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE', 'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE', 'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU', 'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE', 'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST', 'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS', 'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE', 'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE', 'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST', 'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V', 'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU', 'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64', 'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST', 'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT', 'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS', 'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED', 'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU', 'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET', 'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI', 'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT', 'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM', 'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-', 'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA', 'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P', 'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE', 'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH', 'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH', 'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS', 'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX', 'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX', 'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG', 'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX', 'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE', 'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN', 'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG', 'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO', 'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ', 'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT', 'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V', 'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA', 'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT', 'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH', 'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P', 'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE', 'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH', 'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH', 'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS', 'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX', 'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP', 'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS', 'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR', 'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE', 'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE', 'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA', 'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE', 'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN', 'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON', 'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE', 'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA', 'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI', 'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP', 'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN', 'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT', 'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM', 'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE', 'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL', 'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO', 'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI', 'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE', 'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT', 'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU', 'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B', 'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA', 'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T', 'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS', 'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP', 'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE', 'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED', 'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL', 'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z', 'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD', 'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE', 'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC', 'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC', 'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS', 'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC', 'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC', 'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC', 'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR', 'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS', 'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST', 'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT', 'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF', 'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU', 'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I', 'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE', 'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH', 'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT', 'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN', 'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS', 'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN', 'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN', 'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER', 'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN', 'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON', 'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER', 'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED', 'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP', 'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP', 'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP', 'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-', 'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA', 'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P', 'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE', 'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH', 'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA', 'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P', 'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE', 'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX', 'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT', 'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA', 'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI', 'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE', 'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS', 'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH', 'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH', 'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P', 'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL', 'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR', 'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE', 'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE', 'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN', 'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C', 'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR', 'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI', 'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL', 'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C', 'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS', 'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE', 'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE', 'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH', 'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA', 'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI', 'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO', 'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P', 'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE', 'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-', 'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-', 'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE', 'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-', 'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-', 'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE', 'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM', 'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO', 'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE', 'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL', 'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE', 'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW', 'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING', 'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE', 'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS', 'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP', 'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE', 'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS', 'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG', 'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI', 'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT', 'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO', 'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS', 'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE', 'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION', 'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC', 'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL', 'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP', 'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL', 'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI', 'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP', 'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE', 'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO', 'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM', 'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES', 'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR', 'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS', 'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL', 'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES', 'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS', 'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED', 'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID', 'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET', 'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF', 'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME', 'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD', 'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN', 'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR', 'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV', 'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE', 'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED', 'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE', 'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM', 'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY', 'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM', 'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI', 'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME', 'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN', 'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO', 'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU', 'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME', 'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT', 'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING', 'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL', 'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING', 'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY', 'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME', 'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D', 'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED', 'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE', 'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST', 'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe', 'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S', 'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT', 'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY', 'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY', 'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH', 'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT', 'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG', 'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE', 'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION', 'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER', 'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM', 'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO', 'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY', 'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT', 'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY', 'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN', 'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE', 'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION', 'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE', 'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE', 'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID', 'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN', 'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER', 'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR', 'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU', 'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE', 'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX', 'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT', 'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE', 'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE', 'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST', 'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED', 'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT', 'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER', 'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL', 'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE', 'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL', 'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI', 'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON', 'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS', 'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER', 'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC', 'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION', 'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT', 'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET', 'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT', 'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL', 'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION', 'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED', 'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST', 'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW', 'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE', 'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER', 'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST', 'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID', 'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT', 'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE', 'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU', 'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT', 'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC', 'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-', 'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU', 'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER', 'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-', 'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU', 'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION', 'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI', 'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-', 'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS', 'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA', 'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS', 'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE', 'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS', 'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP', 'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE', 'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE', 'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE', 'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE', 'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA', 'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE', 'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED', 'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO', 'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF', 'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG', 'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM', 'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE', 'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT', 'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM', 'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING', 'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR', 'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING', 'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV', 'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES', 'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO', 'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP', 'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION', 'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR', 'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR', 'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI', 'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW', 'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D', 'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE', 'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO', 'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO', 'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO', 'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX', 'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID', 'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE', 'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF', 'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE', 'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED', 'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION', 'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD', 'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL', 'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE', 'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO', 'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL', 'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION', 'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT', 'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES', 'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE', 'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW', 'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS', 'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C', 'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR', 'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI', 'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL', 'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C', 'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR', 'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI', 'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE', 'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN', 'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN', 'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L', 'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH', 'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA', 'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX', 'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM', 'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED', 'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE', 'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT', 'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP', 'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X', 'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE', 'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT', 'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD', 'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION', 'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE', 'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH', 'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y', 'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF' ]
    mit
    AndreasWilliams/BotGravindo
    src/scons-local-2.0.1/SCons/Variables/__init__.py
    61
    11095
    """engine.SCons.Variables This file defines the Variables class that is used to add user-friendly customizable variables to an SCons build. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "src/engine/SCons/Variables/__init__.py 5134 2010/08/16 23:02:40 bdeegan" import os.path import sys import SCons.Environment import SCons.Errors import SCons.Util import SCons.Warnings from BoolVariable import BoolVariable # okay from EnumVariable import EnumVariable # okay from ListVariable import ListVariable # naja from PackageVariable import PackageVariable # naja from PathVariable import PathVariable # okay class Variables(object): instance=None """ Holds all the options, updates the environment with the variables, and renders the help text. """ def __init__(self, files=[], args={}, is_global=1): """ files - [optional] List of option configuration files to load (backward compatibility) If a single string is passed it is automatically placed in a file list """ self.options = [] self.args = args if not SCons.Util.is_List(files): if files: files = [ files ] else: files = [] self.files = files self.unknown = {} # create the singleton instance if is_global: self=Variables.instance if not Variables.instance: Variables.instance=self def _do_add(self, key, help="", default=None, validator=None, converter=None): class Variable(object): pass option = Variable() # if we get a list or a tuple, we take the first element as the # option key and store the remaining in aliases. if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key): option.key = key[0] option.aliases = key[1:] else: option.key = key option.aliases = [ key ] option.help = help option.default = default option.validator = validator option.converter = converter self.options.append(option) # options might be added after the 'unknown' dict has been set up, # so we remove the key and all its aliases from that dict for alias in list(option.aliases) + [ option.key ]: if alias in self.unknown: del self.unknown[alias] def keys(self): """ Returns the keywords for the options """ return [o.key for o in self.options] def Add(self, key, help="", default=None, validator=None, converter=None, **kw): """ Add an option. key - the name of the variable, or a list or tuple of arguments help - optional help text for the options default - optional default value validator - optional function that is called to validate the option's value Called with (key, value, environment) converter - optional function that is called to convert the option's value before putting it in the environment. """ if SCons.Util.is_List(key) or isinstance(key, tuple): self._do_add(*key) return if not SCons.Util.is_String(key) or \ not SCons.Environment.is_valid_construction_var(key): raise SCons.Errors.UserError("Illegal Variables.Add() key `%s'" % str(key)) self._do_add(key, help, default, validator, converter) def AddVariables(self, *optlist): """ Add a list of options. Each list element is a tuple/list of arguments to be passed on to the underlying method for adding options. Example: opt.AddVariables( ('debug', '', 0), ('CC', 'The C compiler'), ('VALIDATE', 'An option for testing validation', 'notset', validator, None), ) """ for o in optlist: self._do_add(*o) def Update(self, env, args=None): """ Update an environment with the option variables. env - the environment to update. """ values = {} # first set the defaults: for option in self.options: if not option.default is None: values[option.key] = option.default # next set the value specified in the options file for filename in self.files: if os.path.exists(filename): dir = os.path.split(os.path.abspath(filename))[0] if dir: sys.path.insert(0, dir) try: values['__name__'] = filename exec open(filename, 'rU').read() in {}, values finally: if dir: del sys.path[0] del values['__name__'] # set the values specified on the command line if args is None: args = self.args for arg, value in args.items(): added = False for option in self.options: if arg in list(option.aliases) + [ option.key ]: values[option.key] = value added = True if not added: self.unknown[arg] = value # put the variables in the environment: # (don't copy over variables that are not declared as options) for option in self.options: try: env[option.key] = values[option.key] except KeyError: pass # Call the convert functions: for option in self.options: if option.converter and option.key in values: value = env.subst('${%s}'%option.key) try: try: env[option.key] = option.converter(value) except TypeError: env[option.key] = option.converter(value, env) except ValueError, x: raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x)) # Finally validate the values: for option in self.options: if option.validator and option.key in values: option.validator(option.key, env.subst('${%s}'%option.key), env) def UnknownVariables(self): """ Returns any options in the specified arguments lists that were not known, declared options in this object. """ return self.unknown def Save(self, filename, env): """ Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from """ # Create the file and write out the header try: fh = open(filename, 'w') try: # Make an assignment in the file for each option # within the environment that was assigned a value # other than the default. for option in self.options: try: value = env[option.key] try: prepare = value.prepare_to_store except AttributeError: try: eval(repr(value)) except KeyboardInterrupt: raise except: # Convert stuff that has a repr() that # cannot be evaluated into a string value = SCons.Util.to_String(value) else: value = prepare() defaultVal = env.subst(SCons.Util.to_String(option.default)) if option.converter: defaultVal = option.converter(defaultVal) if str(env.subst('${%s}' % option.key)) != str(defaultVal): fh.write('%s = %s\n' % (option.key, repr(value))) except KeyError: pass finally: fh.close() except IOError, x: raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x)) def GenerateHelpText(self, env, sort=None): """ Generate the help text for the options. env - an environment that is used to get the current values of the options. """ if sort: options = sorted(self.options, key=lambda x: x.key) else: options = self.options def format(opt, self=self, env=env): if opt.key in env: actual = env.subst('${%s}' % opt.key) else: actual = None return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases) lines = [_f for _f in map(format, options) if _f] return ''.join(lines) format = '\n%s: %s\n default: %s\n actual: %s\n' format_ = '\n%s: %s\n default: %s\n actual: %s\n aliases: %s\n' def FormatVariableHelpText(self, env, key, help, default, actual, aliases=[]): # Don't display the key name itself as an alias. aliases = [a for a in aliases if a != key] if len(aliases)==0: return self.format % (key, help, default, actual) else: return self.format_ % (key, help, default, actual, aliases) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
    gpl-2.0
    amenonsen/ansible
    test/units/modules/network/f5/test_bigip_monitor_external.py
    22
    3318
    # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_monitor_external import ModuleParameters from library.modules.bigip_monitor_external import ModuleManager from library.modules.bigip_monitor_external import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_monitor_external import ModuleParameters from ansible.modules.network.f5.bigip_monitor_external import ModuleManager from ansible.modules.network.f5.bigip_monitor_external import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='parent', ip='10.10.10.10', port=80, interval=20, timeout=30, partition='Common' ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.type == 'external' assert p.port == 80 assert p.destination == '10.10.10.10:80' assert p.interval == 20 assert p.timeout == 30 class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_monitor(self, *args): set_module_args(dict( name='foo', parent='parent', ip='10.10.10.10', port=80, interval=20, timeout=30, partition='Common', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(side_effect=[False, True]) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['parent'] == '/Common/parent'
    gpl-3.0
    jplusui/jplusui.github.com
    apps/node/node_modules/npm/node_modules/node-gyp/gyp/test/subdirectory/gyptest-SYMROOT-all.py
    399
    1269
    #!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a target and a subsidiary dependent target from a .gyp file in a subdirectory, without specifying an explicit output build directory, and using the generated solution or project file at the top of the tree as the entry point. The configuration sets the Xcode SYMROOT variable and uses --depth= to make Xcode behave like the other build tools--that is, put all built targets in a single output build directory at the top of the tree. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src') test.relocate('src', 'relocate/src') # Suppress the test infrastructure's setting SYMROOT on the command line. test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src') test.run_built_executable('prog1', stdout="Hello from prog1.c\n", chdir='relocate/src') test.run_built_executable('prog2', stdout="Hello from prog2.c\n", chdir='relocate/src') test.pass_test()
    bsd-3-clause
    CiviWiki/OpenCiviWiki
    project/api/migrations/0005_auto_20170109_1813.py
    1
    1092
    # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("api", "0004_auto_20161230_0412"), ] operations = [ migrations.AddField( model_name="civi", name="related_civis", field=models.ManyToManyField( related_name="_related_civis_+", to="api.Civi" ), ), migrations.AlterField( model_name="civi", name="c_type", field=models.CharField( default=b"problem", max_length=31, choices=[ (b"problem", b"Problem"), (b"cause", b"Cause"), (b"solution", b"Solution"), (b"response", b"Response"), ], ), ), migrations.AddField( model_name="civi", name="links", field=models.ManyToManyField(related_name="link", to="api.Civi"), ), ]
    agpl-3.0
    jonghough/SimpleWebsocket
    socket/websocket.py
    1
    11128
    # -- coding: utf-8 -- import socket import os import base64 import hashlib import struct from array import array from backports.ssl_match_hostname import match_hostname, CertificateError ''' Simple python implementation of the Websocket protocol (RFC6455) for websocket clients. The goal is to keep the source as compact and readable as possible while conforming to the protocol. author: Jonathan Hough ''' class WebsocketClient ( object ): ''' Websocket client class. ''' #Globally unique identifier, see RFC6454 GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" #Closing frame status code #see RFC 6455 Section 4.1 (Status codes) NORMAL_CLOSURE = 1000 GOING_AWAY = 1001 PROTOCOL_ERROR = 1002 UNSUPPORTED_DATA = 1003 RESERVED = 1004 NO_STATUS_RECEIVED = 1005 ABNORMAl_CLOSURE = 1006 INVALID_DATA = 1007 POLICY_VIOLATION = 1008 MESSAGE_TOO_BIG = 1009 MANDATORY_EXT = 1010 INTERNAL_SERVER_ERR = 1011 TLS_HANDSHAKE = 1015 #Websocket op-codes #see RFC 6455 Section 11.8 (Opcodes) CONTINUATION_FRAME = 0x0 TEXT_FRAME = 0x1 BINARY_FRAME = 0x2 CLOSE_FRAME = 0x8 PING_FRAME = 0x9 PONG_FRAME = 0xA #Frame bits fin_bits = 0x80 #final bit res1_bits = 0x40 #reserved bit 1 res2_bits = 0x20 #reserved bit 2 res3_bits = 0x10 #reserved bit 3 opcode = 0xF #opcode len_mask = 0x80 #payload length #frame payload length bytes MAX_DATA_NO_EXTENSION = 126 DATA_2_BYTE_EXTENSION = 1 << 16 DATA_8_BYTE_EXTENSION = 1 << 63 @staticmethod def expected_value(val): '''Returns expected base 64 encoded Sha1 hash value of val concatenated with GUID. This should be the same as the header field Sec-Websocket-Accept, returned from server.''' sha1 = base64.b64encode(hashlib.sha1(val + WebsocketClient.GUID).digest()) return sha1 @staticmethod def make_frame(data, opcode): '''Creates text frame to send to websocket server. see RFC6455 Section 5.2. For Python struct.pack formats see: https://docs.python.org/2/library/struct.html#struct.pack ''' #Assumes reserved bits are 0 #final bit and opcode (first byte) frame = chr(1 << 7 | opcode) #mask bit, payload data length, mask key, paylod data (second + bytes) mask_bit = 1 << 7 datalen = len(data) if datalen < WebsocketClient.MAX_DATA_NO_EXTENSION: frame += chr( mask_bit | datalen ) elif datalen < WebsocketClient.DATA_2_BYTE_EXTENSION: frame += struct.pack('!B', mask_bit | 0x7e) +struct.pack("!H", datalen) else: frame += struct.pack('!B', mask_bit | 0x7f) + struct.pack("!Q", datalen) print str(frame) key = os.urandom(4) frame = frame + key + WebsocketClient.mask(key, data) return frame @staticmethod def mask(key, data): ''' Masks the data with the given key using the masking method defined in RFC 6455 Section 5.3 ''' masked = [] keybytes = array("B", key) databytes = array("B", data) for i in range(len(databytes)): databytes[i] ^= keybytes[i % 4] return databytes.tostring() def create_header(socketkey, test, hosturi, port, **kwargs): ''' Creates the initial websocket creation header. test parameter is used for testing, (with echo.websocket.org). ''' if test is True: header = "GET /echo HTTP/1.1\r\n"\ +"Upgrade: websocket\r\n"\ +"Connection: Upgrade\r\n"\ +"Host: echo.websocket.org\r\n"\ +"Origin: null\r\n"\ +"Sec-WebSocket-Key: "+socketkey+"\r\n"\ +"Sec-WebSocket-Protocol: chat, superchat\r\n"\ +"Sec-WebSocket-Version: 13\r\n\r\n" return header else: resource = "/" origin = "null" if kwargs is not None: for key, value in kwargs.iteritems(): if key is "resource": resource = value elif key is "host": host = value elif key is origin: origin = value header = "GET "+resource+" HTTP/1.1\r\n"\ +"Upgrade: websocket\r\n"\ +"Connection: Upgrade\r\n"\ +"Host: "+hosturi+str(port)+" \r\n"\ +"Origin: "+origin+" \r\n"\ +"Sec-WebSocket-Key: "+socketkey+"\r\n"\ +"Sec-WebSocket-Version: 13\r\n\r\n" return header def create_header_key(): '''16 bytes. 16 random bytes, base 64 encoded.''' rand = os.urandom(16) encoded = base64.b64encode(rand) return encoded def expected_value(val): '''Returns expected base 64 encoded Sha1 hash value of val concatenated with GUID. This should be the same as the header field Sec-Websocket-Accept, returned from server.''' sha1 = base64.b64encode(hashlib.sha1(val+WebsocketClient.GUID).digest()) return sha1 def keys_match(headers, key): ''' Checks whether the key returned by the websocket server in opening handshake is the same as the expected value. see RFC 6455 Section 4.2 ''' kvp = {} for h in headers: split = h.split(':') if len(split) == 1: split.append(" ") for item in split : item.strip() item.lstrip() kvp[split[0]] = split[1] returnedkey = kvp['Sec-WebSocket-Accept'] print returnedkey expect = expected_value(key) print expect if returnedkey.strip() == expect.strip(): return True else: return False def get_cert(sock, path): ca_certs_path = os.path.join(os.path.dirname(path), 'certfiles.crt') sslsock = ssl.wrap_socket(sock, ssl.PROTOCOL_SSLv3, ssl.CERT_REQUIRED, ca_certs_path) return sslsock class WebsocketController(object): ''' Controller for websocket functionality. Needs to be passed on_error, on_close, on_message functions. ''' def __init__(self, onerror, onclose, onmessage): ''' Creates instance of WebsocketController. ''' #callbacks self.on_error = onerror self.on_close = onclose self.on_message = onmessage #the socket! self.sock = None #opening and closing flags self.handshake = False #true if complete opening handshake self.closing = False #true if begin closing handshake self.response_buffer = [] self.cont_frame = "" self.fragment = False #flag for fragmented message expectation. self.is_closed = True #true prior to connection and after connection closed by either endpoint. self.protocol = "ws" self.uri = "" self.port = "80" #default value def process_frame(self, sock, buf): ''' Processes recieved frame. ''' frameholder = FrameHolder(buf) msg = frameholder.message if frameholder.valid_frame is False: pass else: if frameholder.finbit == 0 and self.fragment is False and frameholder.opcode != 0x0: #RFC6455 section 5.4 (fragmentation) self.fragment = True self.cont_frame = "" self.cont_frame += frameholder.message elif frameholder.opcode == 0x8 and self.closing is False: # closing frame. Remote endpoint closed connection. cls = WebsocketClient.make_frame('1000', 0x8) sock.sendall(cls) self.is_closed = True if self.on_close is not None: self.on_close() elif frameholder.opcode == 0xA: #ping frame, reply pong and vice versa: RFC 6455 5.5.2 and 5.5.3 frame = WebsocketClient.make_frame(frameholder.message, 0x9) sock.sendall(frame) elif frameholder.opcode == 0x9: frame = WebsocketClient.make_frame(frameholder.message, 0xA) sock.sendall(frame) elif frameholder.opcode == 0x1: #message self.response_buffer.append(msg) elif frameholder.opcode == 0x0: #continuation fragment if self.fragment is False: pass #TODO throw an error elif frameholder.finbit == 0: self.cont_frame += frameholder.message elif frameholder.finbit == 1: self.cont_frame += frameholder.message msg = self.cont_frame self.response_buffer.append(msg) self.fragment = False #reset, fragmented message finished. if self.fragment is False: return msg else: return None def begin_connection(self, uri, port = None): ''' Starts the websocket connection with initial handshake. If not port number is given the default value of 80 will be used.''' if uri is not None: prtl = uri.split('://') self.protocol = prtl[0] print "protocol is "+str(self.protocol) self.uri = ''.join(prtl[1:]) if port is not None: self.port = port key = create_header_key() self.is_closed = False; try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error, e: print "Error "+str(e) try: host = socket.gethostbyname(self.uri) if self.protocol is "wss" and port is None: self.port = 443 addr = (self.uri, self.port) print self.protocol self.sock.connect(addr) except socket.gaierror, e: print "Error "+str(e) except socket.error, e: print "Error "+str(e) try: self.sock.sendall(create_header(key,False, self.uri, self.port)) except socket.error, e: print "Error "+str(e) while self.is_closed is False: try: buf = self.sock.recv(4096) if buf is not None and len(buf) > 0: if self.handshake is True: msg = self.process_frame(self.sock, buf) print "Returned message: "+msg #for handshake frame from server if self.handshake is False: headers = buf.split('\r\n') keymatch = keys_match(headers, key) if keymatch is True: self.handshake = True # handshake complete else: self.sock.close() self.is_closing = True self.is_closed = True if self.on_close is not None: self.on_close() #TODO throw error. Keys didn't match except socket.error, e: print "Error: "+str(e) def close(self, reason =''): '''Send closing frame. Connection close initiated by local endpoint.''' if self.closing is False: self.sock.sendall(WebsocketClient.make_frame(reason, 0x8)) self.closing is True def send_message(self, message): '''Sends message string to remote endpoint''' if self.closing is False and self.is_closed is False: self.sock.sendall(WebsocketClient.make_frame(unicode(message).encode("unicode_escape"), 0x1)) return True else: return False def error_in_frame(self, error): if on_frame_error is not None: on_frame_error(error) class FrameHolder(object): ''' Convenient holder class for received frames. Validates and gets info from raw frame bytes ''' def __init__(self, rawframe): self.valid_frame = True self.finbit = None self.opcode = None self.msg_length = None self.message = None frame = array('B', rawframe) first_byte = frame[0] self.finbit = first_byte >> 7 self.finbit = self.finbit & 0xFF #opcode is final 4 bits of first byte. self.opcode = frame[0] & 0xF length = frame[1] #first bit (masking bit) must be zero so don't bother to bit shift. self.msg_length = length & 0xFF #get length of payload self.message = 0 # get the payload length # extension if length == 126: self.message = frame[4:] # 8 byte extension elif length == 127: self.message = frame[10:] # standard else: self.message = frame[2:] #payload message. self.message = self.message.tostring().encode('unicode_escape').decode('unicode_escape') print "msg = "+self.message # below is retained for some tests. ''' def main(): wc = WebsocketController(None, None, None) wc.begin_connection() if __name__ == '__main__': main()'''
    bsd-2-clause
    googleinterns/connectivity-test
    src/derivation_declarations/generators/BGP_generators.py
    1
    2030
    # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import proto.cloud_network_model_pb2 as entities import proto.derivation_rules_pb2 as derivation import proto.rules_pb2 as rules from src.utils.derivation_utils import findNetwork, listBgpPeers, REGION_LIST, clearNextHopsInRoute from src.utils.ip_utils import ipv4RangeToStr Destination = derivation.DestinationAndGeneration.Destination DestinationContext = derivation.DestinationAndGeneration.DestinationContext def BgpPeersGeneratorCommon(derived: rules.Route, context: DestinationContext, model: entities.Model) -> rules.Route: clearNextHopsInRoute(derived) derived.next_hop_tunnel = context.peer_info # derived.region currently contains the original route's region derived.from_local = derived.region == context.region derived.region = context.region derived.route_type = rules.Route.DYNAMIC derived.url = "dynamic-route-" + ipv4RangeToStr(derived.dest_range) return derived def BgpPeersGenerator(derived: rules.Route, context: DestinationContext, model: entities.Model) -> rules.Route: derived = BgpPeersGeneratorCommon(derived, context, model) derived.from_local = True return derived def OtherRegionsWhenGlobalRoutingGenerator(derived: rules.Route, context: DestinationContext, model: entities.Model) -> rules.Route: return BgpPeersGeneratorCommon(derived, context, model)
    apache-2.0
    bunnyitvn/webptn
    build/lib.linux-i686-2.7/django/contrib/gis/geos/point.py
    224
    4351
    from ctypes import c_uint from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos import prototypes as capi from django.utils import six from django.utils.six.moves import xrange class Point(GEOSGeometry): _minlength = 2 _maxlength = 3 def __init__(self, x, y=None, z=None, srid=None): """ The Point object may be initialized with either a tuple, or individual parameters. For Example: >>> p = Point((5, 23)) # 2D point, passed in as a tuple >>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters """ if isinstance(x, (tuple, list)): # Here a tuple or list was passed in under the `x` parameter. ndim = len(x) coords = x elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)): # Here X, Y, and (optionally) Z were passed in individually, as parameters. if isinstance(z, six.integer_types + (float,)): ndim = 3 coords = [x, y, z] else: ndim = 2 coords = [x, y] else: raise TypeError('Invalid parameters given for Point initialization.') point = self._create_point(ndim, coords) # Initializing using the address returned from the GEOS # createPoint factory. super(Point, self).__init__(point, srid=srid) def _create_point(self, ndim, coords): """ Create a coordinate sequence, set X, Y, [Z], and create point """ if ndim < 2 or ndim > 3: raise TypeError('Invalid point dimension: %s' % str(ndim)) cs = capi.create_cs(c_uint(1), c_uint(ndim)) i = iter(coords) capi.cs_setx(cs, 0, next(i)) capi.cs_sety(cs, 0, next(i)) if ndim == 3: capi.cs_setz(cs, 0, next(i)) return capi.create_point(cs) def _set_list(self, length, items): ptr = self._create_point(length, items) if ptr: capi.destroy_geom(self.ptr) self._ptr = ptr self._set_cs() else: # can this happen? raise GEOSException('Geometry resulting from slice deletion was invalid.') def _set_single(self, index, value): self._cs.setOrdinate(index, 0, value) def __iter__(self): "Allows iteration over coordinates of this Point." for i in xrange(len(self)): yield self[i] def __len__(self): "Returns the number of dimensions for this Point (either 0, 2 or 3)." if self.empty: return 0 if self.hasz: return 3 else: return 2 def _get_single_external(self, index): if index == 0: return self.x elif index == 1: return self.y elif index == 2: return self.z _get_single_internal = _get_single_external def get_x(self): "Returns the X component of the Point." return self._cs.getOrdinate(0, 0) def set_x(self, value): "Sets the X component of the Point." self._cs.setOrdinate(0, 0, value) def get_y(self): "Returns the Y component of the Point." return self._cs.getOrdinate(1, 0) def set_y(self, value): "Sets the Y component of the Point." self._cs.setOrdinate(1, 0, value) def get_z(self): "Returns the Z component of the Point." if self.hasz: return self._cs.getOrdinate(2, 0) else: return None def set_z(self, value): "Sets the Z component of the Point." if self.hasz: self._cs.setOrdinate(2, 0, value) else: raise GEOSException('Cannot set Z on 2D Point.') # X, Y, Z properties x = property(get_x, set_x) y = property(get_y, set_y) z = property(get_z, set_z) ### Tuple setting and retrieval routines. ### def get_coords(self): "Returns a tuple of the point." return self._cs.tuple def set_coords(self, tup): "Sets the coordinates of the point with the given tuple." self._cs[0] = tup # The tuple and coords properties tuple = property(get_coords, set_coords) coords = tuple
    bsd-3-clause
    Jimaklas/grd2poly
    grd2poly.py
    1
    1764
    import win32com.client from input import GRD_FILE # # Generate modules of necessary typelibs (AutoCAD Civil 3D 2008) # comtypes.client.GetModule("C:\\Program Files\\Common Files\\Autodesk Shared\\acax17enu.tlb") # comtypes.client.GetModule("C:\\Program Files\\AutoCAD Civil 3D 2008\\AecXBase.tlb") # comtypes.client.GetModule("C:\\Program Files\\AutoCAD Civil 3D 2008\\AecXUIBase.tlb") # comtypes.client.GetModule("C:\\Program Files\\AutoCAD Civil 3D 2008\\Civil\\AeccXLand.tlb") # comtypes.client.GetModule("C:\\Program Files\\AutoCAD Civil 3D 2008\\Civil\\AeccXUiLand.tlb") # raise SystemExit # Get running instance of the AutoCAD application acadApp = win32com.client.Dispatch("AutoCAD.Application") aeccApp = acadApp.GetInterfaceObject("AeccXUiLand.AeccApplication.5.0") # Document object doc = aeccApp.ActiveDocument alignment, point_clicked = doc.Utility.GetEntity(None, None, Prompt="Select an alignment:") command = "pl " f = open(GRD_FILE, "r") line = f.readline() while 1: try: line = f.readline() section, station = line.strip().split() station = float(station) line = f.readline() while line[0] != "*": offset, h = line.strip().split() offset = float(offset) h = float(h) # draw the next polyline vertex print "Point at station %s (section %s) - offset %s" % (station, section, offset) x, y = alignment.PointLocation(station, offset) command = command + "%s,%s " % (x, y) line = f.readline() except ValueError: # raised when trying to read past EOF (why not IOError? - need to think on it) break doc.SendCommand(command + " ") f.close() # x, y = alignment.PointLocation(0.0, 10.0) # print x, y
    gpl-3.0
    Bakterija/mmplayer
    mmplayer/media_player/providers/audio_gstplayer_modified.py
    1
    1047
    from kivy.core.audio.audio_gstplayer import SoundGstplayer from kivy.logger import Logger from kivy.compat import PY2 from os.path import realpath from kivy.lib.gstplayer import GstPlayer, get_gst_version if PY2: from urllib import pathname2url else: from urllib.request import pathname2url def _on_gstplayer_message(mtype, message): if mtype == 'error': Logger.error('AudioGstplayer: {}'.format(message)) elif mtype == 'warning': Logger.warning('AudioGstplayer: {}'.format(message)) elif mtype == 'info': Logger.info('AudioGstplayer: {}'.format(message)) class SoundGstplayerModified(SoundGstplayer): '''This is a modified SoundGstplayer that works''' def load(self, uri): self.unload() if not uri: return if not '://' in uri: uri = 'file:' + pathname2url(realpath(uri)) self.player = GstPlayer(uri, None, self._on_gst_eos_sync, _on_gstplayer_message) self.player.load() return self
    mit
    PaulWay/spacewalk
    search-server/spacewalk-search/scripts/search.py
    19
    2581
    #!/usr/bin/python import xmlrpclib from optparse import OptionParser indexName = "package" usage = "usage: %prog [options] search term" desc = "%prog searches for package (default) or systems with the given \ search criteria" parser = OptionParser(usage=usage, description=desc) parser.add_option("--sessionid", dest="sessionid", type="int", help="PXT sessionid") parser.add_option("--package", action="store_true", dest="package", help="search packages", default=True) parser.add_option("--system", action="store_true", dest="system", help="search systems", default=False) parser.add_option("--indexName", dest="indexName", type="string", help="lucene index name to search ex: package server hwdevice snapshotTag errata") parser.add_option("--serverAddr", dest="serverAddr", type="string", default="localhost", help="Server to authenticate to, NOT WHERE SEARCH SERVER RUNS") parser.add_option("--username", dest="username", type="string", help="username") parser.add_option("--password", dest="password", type="string", help="password") parser.add_option("--debug", action="store_true", dest="debug", default=False, help="enable debug output") (options, terms) = parser.parse_args() if len(terms) < 1: parser.error("please supply a search term\n" + str(parser.print_help())) if not options.sessionid and (not options.username or not options.password): print parser.print_help() parser.exit() if options.package: indexName = "package" if options.system: indexName = "server" if options.indexName: indexName = options.indexName sessionid = None if options.sessionid: sessionid = options.sessionid print "Using passed in authentication info, sessionid = %s" % (sessionid) else: xmlrpcURL = "http://%s/rhn/rpc/api" % (options.serverAddr) print "Getting authentication information from: %s" % (xmlrpcURL) rhnclient = xmlrpclib.Server(xmlrpcURL) authSessionId = rhnclient.auth.login(options.username, options.password) sessionid = int(authSessionId.split('x')[0]) url = "http://localhost:2828/RPC2" print "Connecting to SearchServer: (%s)" % url client = xmlrpclib.Server(url, verbose=options.debug) term = " ".join(terms) print "searching for (%s) matching criteria: (%s)" % (indexName, str(term)) items = client.index.search(sessionid, indexName, term) print "We got (%d) items back." % len(items) print items #Remember to logout if the user didn't supply the sessionid if not options.sessionid: rhnclient.auth.logout(authSessionId)
    gpl-2.0
    MITHyperloopTeam/software_core
    software/simulation/sysid/brake_actuator_sysid.py
    1
    2012
    import numpy as np from scipy.optimize import minimize input_file = "/home/gizatt/mit-hyperloop-es/logs/20161014/velocity_steps_extract.csv" dataset_raw = np.loadtxt(input_file, skiprows=1, delimiter=",") # indexes into that j_recv_time = 0 j_utime = 1 j_dist = 2 j_vel = 3 j_pressure = 4 j_m = 5 j_po = 6 j_pv = 7 i_p_res = 0 # reservoir pressure i_A_cyl = 1 # cylinder area i_K_spring = 2 # spring constant i_B_cyl = 3 # cylinder movement constant i_C_mtr = 4 # motor pressure increase rate i_K_pv = 5 i_K_po = 6 x0 = np.array([ 80, # reservoir pressure 100, # cylinder area 1000, # spring constant 1, # cylinder movement constant 100, # motor pressure increase rate 100, 10000 ]) def brake_actuator_fitting_function(x): """Using x as brake system parameters, calculates error over brake testing dataset""" err = 0 for i in range(0, dataset_raw.shape[0] - 1): # predict next point from current one dt = (dataset_raw[i+1, j_utime] - dataset_raw[i, j_utime]) / 1000.0 / 1000.0 d_vel = x[i_A_cyl]*dataset_raw[i, j_pressure] - x[i_K_spring] pred_vel = dataset_raw[i, j_vel] + dt * d_vel d_pos = dt * d_vel + dataset_raw[i, j_vel] pred_pos = dataset_raw[i, j_dist] + dt * d_pos d_pcyl = -x[i_B_cyl]*dataset_raw[i, j_vel] + \ x[i_C_mtr]*dataset_raw[i, j_m] + \ -x[i_K_pv]*np.sqrt(dataset_raw[i, j_pv] * np.abs(dataset_raw[i, j_pressure] - x[i_p_res])) * np.sign(dataset_raw[i, j_pressure] - x[i_p_res]) + \ -x[i_K_po]*np.sqrt(dataset_raw[i, j_po] * np.abs(dataset_raw[i, j_pressure] - x[i_p_res])) * np.sign(dataset_raw[i, j_pressure] - x[i_p_res]) pred_pcyl = dataset_raw[i, j_pressure] + dt*d_pcyl err += (pred_vel - dataset_raw[i+1, j_vel])**2 + (pred_pos - dataset_raw[i+1, j_dist])**2 + (pred_pcyl - dataset_raw[i+1, j_pressure])**2 print x, ", err ", err return err res = minimize(brake_actuator_fitting_function, x0, method='SLSQP', options={'disp': True}) print "final: ", res
    lgpl-3.0
    devendermishrajio/nova_test_latest
    nova/cells/filters/__init__.py
    61
    2105
    # Copyright (c) 2012-2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cell scheduler filters """ from oslo_log import log as logging from nova import filters from nova import policy LOG = logging.getLogger(__name__) class BaseCellFilter(filters.BaseFilter): """Base class for cell filters.""" def authorized(self, ctxt): """Return whether or not the context is authorized for this filter based on policy. The policy action is "cells_scheduler_filter:<name>" where <name> is the name of the filter class. """ name = 'cells_scheduler_filter:' + self.__class__.__name__ target = {'project_id': ctxt.project_id, 'user_id': ctxt.user_id} return policy.enforce(ctxt, name, target, do_raise=False) def _filter_one(self, cell, filter_properties): return self.cell_passes(cell, filter_properties) def cell_passes(self, cell, filter_properties): """Return True if the CellState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class CellFilterHandler(filters.BaseFilterHandler): def __init__(self): super(CellFilterHandler, self).__init__(BaseCellFilter) def all_filters(): """Return a list of filter classes found in this directory. This method is used as the default for available scheduler filters and should return a list of all filter classes available. """ return CellFilterHandler().get_all_classes()
    apache-2.0
    ph4r05/plyprotobuf
    plyproto/parser.py
    1
    14479
    __author__ = "Dusan (Ph4r05) Klinec" __copyright__ = "Copyright (C) 2014 Dusan (ph4r05) Klinec" __license__ = "Apache License, Version 2.0" __version__ = "1.0" import ply.lex as lex import ply.yacc as yacc from .model import * class ProtobufLexer(object): keywords = ('double', 'float', 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64', 'fixed32', 'fixed64', 'sfixed32', 'sfixed64', 'bool', 'string', 'bytes', 'message', 'required', 'optional', 'repeated', 'enum', 'extensions', 'max', 'extends', 'extend', 'to', 'package', 'service', 'rpc', 'returns', 'true', 'false', 'option', 'import') tokens = [ 'NAME', 'NUM', 'STRING_LITERAL', 'LINE_COMMENT', 'BLOCK_COMMENT', 'LBRACE', 'RBRACE', 'LBRACK', 'RBRACK', 'LPAR', 'RPAR', 'EQ', 'SEMI', 'DOT', 'STARTTOKEN' ] + [k.upper() for k in keywords] literals = '()+-*/=?:,.^|&~!=[]{};<>@%' t_NUM = r'[+-]?\d+' t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"' t_ignore_LINE_COMMENT = '//.*' def t_BLOCK_COMMENT(self, t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') t_LBRACE = '{' t_RBRACE = '}' t_LBRACK = '\\[' t_RBRACK = '\\]' t_LPAR = '\\(' t_RPAR = '\\)' t_EQ = '=' t_SEMI = ';' t_DOT = '\\.' t_ignore = ' \t\f' t_STARTTOKEN = '\\+' def t_NAME(self, t): '[A-Za-z_$][A-Za-z0-9_$]*' if t.value in ProtobufLexer.keywords: #print "type: %s val %s t %s" % (t.type, t.value, t) t.type = t.value.upper() return t def t_newline(self, t): r'\n+' t.lexer.lineno += len(t.value) def t_newline2(self, t): r'(\r\n)+' t.lexer.lineno += len(t.value) / 2 def t_error(self, t): print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno)) t.lexer.skip(1) class LexHelper: offset = 0 def get_max_linespan(self, p): defSpan=[1e60, -1] mSpan=[1e60, -1] for sp in range(0, len(p)): csp = p.linespan(sp) if csp[0] == 0 and csp[1] == 0: if hasattr(p[sp], "linespan"): csp = p[sp].linespan else: continue if csp == None or len(csp) != 2: continue if csp[0] == 0 and csp[1] == 0: continue if csp[0] < mSpan[0]: mSpan[0] = csp[0] if csp[1] > mSpan[1]: mSpan[1] = csp[1] if defSpan == mSpan: return (0,0) return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset]) def get_max_lexspan(self, p): defSpan=[1e60, -1] mSpan=[1e60, -1] for sp in range(0, len(p)): csp = p.lexspan(sp) if csp[0] == 0 and csp[1] == 0: if hasattr(p[sp], "lexspan"): csp = p[sp].lexspan else: continue if csp == None or len(csp) != 2: continue if csp[0] == 0 and csp[1] == 0: continue if csp[0] < mSpan[0]: mSpan[0] = csp[0] if csp[1] > mSpan[1]: mSpan[1] = csp[1] if defSpan == mSpan: return (0,0) return tuple([mSpan[0]-self.offset, mSpan[1]-self.offset]) def set_parse_object(self, dst, p): dst.setLexData(linespan=self.get_max_linespan(p), lexspan=self.get_max_lexspan(p)) dst.setLexObj(p) class ProtobufParser(object): tokens = ProtobufLexer.tokens offset = 0 lh = LexHelper() def setOffset(self, of): self.offset = of self.lh.offset = of def p_empty(self, p): '''empty :''' pass def p_field_modifier(self,p): '''field_modifier : REQUIRED | OPTIONAL | REPEATED''' p[0] = LU.i(p,1) def p_primitive_type(self, p): '''primitive_type : DOUBLE | FLOAT | INT32 | INT64 | UINT32 | UINT64 | SINT32 | SINT64 | FIXED32 | FIXED64 | SFIXED32 | SFIXED64 | BOOL | STRING | BYTES''' p[0] = LU.i(p,1) def p_field_id(self, p): '''field_id : NUM''' p[0] = LU.i(p,1) def p_rvalue(self, p): '''rvalue : NUM | TRUE | FALSE''' p[0] = LU.i(p,1) def p_rvalue2(self, p): '''rvalue : NAME''' p[0] = Name(LU.i(p, 1)) self.lh.set_parse_object(p[0], p) p[0].deriveLex() def p_field_directive(self, p): '''field_directive : LBRACK NAME EQ rvalue RBRACK''' p[0] = FieldDirective(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) def p_field_directive_times(self, p): '''field_directive_times : field_directive_plus''' p[0] = p[1] def p_field_directive_times2(self, p): '''field_directive_times : empty''' p[0] = [] def p_field_directive_plus(self, p): '''field_directive_plus : field_directive | field_directive_plus field_directive''' if len(p) == 2: p[0] = [LU(p,1)] else: p[0] = p[1] + [LU(p,2)] def p_dotname(self, p): '''dotname : NAME | dotname DOT NAME''' if len(p) == 2: p[0] = [LU(p,1)] else: p[0] = p[1] + [LU(p,3)] # Hack for cases when there is a field named 'message' or 'max' def p_fieldName(self, p): '''field_name : NAME | MESSAGE | MAX''' p[0] = Name(LU.i(p,1)) self.lh.set_parse_object(p[0], p) p[0].deriveLex() def p_field_type(self, p): '''field_type : primitive_type''' p[0] = FieldType(LU.i(p,1)) self.lh.set_parse_object(p[0], p) def p_field_type2(self, p): '''field_type : dotname''' p[0] = DotName(LU.i(p, 1)) self.lh.set_parse_object(p[0], p) p[0].deriveLex() # Root of the field declaration. def p_field_definition(self, p): '''field_definition : field_modifier field_type field_name EQ field_id field_directive_times SEMI''' p[0] = FieldDefinition(LU.i(p,1), LU.i(p,2), LU.i(p, 3), LU.i(p,5), LU.i(p,6)) self.lh.set_parse_object(p[0], p) # Root of the enum field declaration. def p_enum_field(self, p): '''enum_field : field_name EQ NUM SEMI''' p[0] = EnumFieldDefinition(LU.i(p, 1), LU.i(p,3)) self.lh.set_parse_object(p[0], p) def p_enum_body_part(self, p): '''enum_body_part : enum_field | option_directive''' p[0] = p[1] def p_enum_body(self, p): '''enum_body : enum_body_part | enum_body enum_body_part''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]] def p_enum_body_opt(self, p): '''enum_body_opt : empty''' p[0] = [] def p_enum_body_opt2(self, p): '''enum_body_opt : enum_body''' p[0] = p[1] # Root of the enum declaration. # enum_definition ::= 'enum' ident '{' { ident '=' integer ';' }* '}' def p_enum_definition(self, p): '''enum_definition : ENUM NAME LBRACE enum_body_opt RBRACE''' p[0] = EnumDefinition(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) def p_extensions_to(self, p): '''extensions_to : MAX''' p[0] = ExtensionsMax() self.lh.set_parse_object(p[0], p) def p_extensions_to2(self, p): '''extensions_to : NUM''' p[0] = LU.i(p, 1) # extensions_definition ::= 'extensions' integer 'to' integer ';' def p_extensions_definition(self, p): '''extensions_definition : EXTENSIONS NUM TO extensions_to SEMI''' p[0] = ExtensionsDirective(LU.i(p,2), LU.i(p,4)) self.lh.set_parse_object(p[0], p) # message_extension ::= 'extend' ident '{' message_body '}' def p_message_extension(self, p): '''message_extension : EXTEND NAME LBRACE message_body RBRACE''' p[0] = MessageExtension(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) def p_message_body_part(self, p): '''message_body_part : field_definition | enum_definition | message_definition | extensions_definition | message_extension''' p[0] = p[1] # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }* def p_message_body(self, p): '''message_body : empty''' p[0] = [] # message_body ::= { field_definition | enum_definition | message_definition | extensions_definition | message_extension }* def p_message_body2(self, p): '''message_body : message_body_part | message_body message_body_part''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]] # Root of the message declaration. # message_definition = MESSAGE_ - ident("messageId") + LBRACE + message_body("body") + RBRACE def p_message_definition(self, p): '''message_definition : MESSAGE NAME LBRACE message_body RBRACE''' p[0] = MessageDefinition(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) # method_definition ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';' def p_method_definition(self, p): '''method_definition : RPC NAME LPAR NAME RPAR RETURNS LPAR NAME RPAR''' p[0] = MethodDefinition(Name(LU.i(p, 2)), Name(LU.i(p, 4)), Name(LU.i(p, 8))) self.lh.set_parse_object(p[0], p) def p_method_definition_opt(self, p): '''method_definition_opt : empty''' p[0] = [] def p_method_definition_opt2(self, p): '''method_definition_opt : method_definition | method_definition_opt method_definition''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]] # service_definition ::= 'service' ident '{' method_definition* '}' # service_definition = SERVICE_ - ident("serviceName") + LBRACE + ZeroOrMore(Group(method_definition)) + RBRACE def p_service_definition(self, p): '''service_definition : SERVICE NAME LBRACE method_definition_opt RBRACE''' p[0] = ServiceDefinition(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) # package_directive ::= 'package' ident [ '.' ident]* ';' def p_package_directive(self,p): '''package_directive : PACKAGE dotname SEMI''' p[0] = PackageStatement(Name(LU.i(p, 2))) self.lh.set_parse_object(p[0], p) # import_directive = IMPORT_ - quotedString("importFileSpec") + SEMI def p_import_directive(self, p): '''import_directive : IMPORT STRING_LITERAL SEMI''' p[0] = ImportStatement(Literal(LU.i(p,2))) self.lh.set_parse_object(p[0], p) def p_option_rvalue(self, p): '''option_rvalue : NUM | TRUE | FALSE''' p[0] = LU(p, 1) def p_option_rvalue2(self, p): '''option_rvalue : STRING_LITERAL''' p[0] = Literal(LU(p,1)) def p_option_rvalue3(self, p): '''option_rvalue : NAME''' p[0] = Name(LU.i(p,1)) # option_directive = OPTION_ - ident("optionName") + EQ + quotedString("optionValue") + SEMI def p_option_directive(self, p): '''option_directive : OPTION NAME EQ option_rvalue SEMI''' p[0] = OptionStatement(Name(LU.i(p, 2)), LU.i(p,4)) self.lh.set_parse_object(p[0], p) # topLevelStatement = Group(message_definition | message_extension | enum_definition | service_definition | import_directive | option_directive) def p_topLevel(self,p): '''topLevel : message_definition | message_extension | enum_definition | service_definition | import_directive | option_directive''' p[0] = p[1] def p_package_definition(self, p): '''package_definition : package_directive''' p[0] = p[1] def p_packages2(self, p): '''package_definition : empty''' p[0] = [] def p_statements2(self, p): '''statements : topLevel | statements topLevel''' if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]] def p_statements(self, p): '''statements : empty''' p[0] = [] # parser = Optional(package_directive) + ZeroOrMore(topLevelStatement) def p_protofile(self, p): '''protofile : package_definition statements''' p[0] = ProtoFile(LU.i(p,1), LU.i(p,2)) self.lh.set_parse_object(p[0], p) # Parsing starting point def p_goal(self, p): '''goal : STARTTOKEN protofile''' p[0] = p[2] def p_error(self, p): print('error: {}'.format(p)) class ProtobufAnalyzer(object): def __init__(self): self.lexer = lex.lex(module=ProtobufLexer(), optimize=1) self.parser = yacc.yacc(module=ProtobufParser(), start='goal', optimize=1) def tokenize_string(self, code): self.lexer.input(code) for token in self.lexer: print(token) def tokenize_file(self, _file): if type(_file) == str: _file = file(_file) content = '' for line in _file: content += line return self.tokenize_string(content) def parse_string(self, code, debug=0, lineno=1, prefix='+'): self.lexer.lineno = lineno self.parser.offset = len(prefix) return self.parser.parse(prefix + code, lexer=self.lexer, debug=debug) def parse_file(self, _file, debug=0): if type(_file) == str: _file = file(_file) content = '' for line in _file: content += line return self.parse_string(content, debug=debug)
    apache-2.0
    jaduimstra/nilmtk
    nilmtk/dataset_converters/redd/convert_redd.py
    6
    5462
    from __future__ import print_function, division import pandas as pd import numpy as np from copy import deepcopy from os.path import join, isdir, isfile from os import listdir import re from sys import stdout from nilmtk.utils import get_datastore from nilmtk.datastore import Key from nilmtk.timeframe import TimeFrame from nilmtk.measurement import LEVEL_NAMES from nilmtk.utils import get_module_directory, check_directory_exists from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore """ TODO: * The bottleneck appears to be CPU. So could be sped up by using multiprocessing module to use multiple CPU cores to load REDD channels in parallel. """ def convert_redd(redd_path, output_filename, format='HDF'): """ Parameters ---------- redd_path : str The root path of the REDD low_freq dataset. output_filename : str The destination filename (including path and suffix). format : str format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF' """ def _redd_measurement_mapping_func(house_id, chan_id): ac_type = 'apparent' if chan_id <= 2 else 'active' return [('power', ac_type)] # Open DataStore store = get_datastore(output_filename, format, mode='w') # Convert raw data to DataStore _convert(redd_path, store, _redd_measurement_mapping_func, 'US/Eastern') # Add metadata save_yaml_to_datastore(join(get_module_directory(), 'dataset_converters', 'redd', 'metadata'), store) store.close() print("Done converting REDD to HDF5!") def _convert(input_path, store, measurement_mapping_func, tz, sort_index=True): """ Parameters ---------- input_path : str The root path of the REDD low_freq dataset. store : DataStore The NILMTK DataStore object. measurement_mapping_func : function Must take these parameters: - house_id - chan_id Function should return a list of tuples e.g. [('power', 'active')] tz : str Timezone e.g. 'US/Eastern' sort_index : bool """ check_directory_exists(input_path) # Iterate though all houses and channels houses = _find_all_houses(input_path) for house_id in houses: print("Loading house", house_id, end="... ") stdout.flush() chans = _find_all_chans(input_path, house_id) for chan_id in chans: print(chan_id, end=" ") stdout.flush() key = Key(building=house_id, meter=chan_id) measurements = measurement_mapping_func(house_id, chan_id) csv_filename = _get_csv_filename(input_path, key) df = _load_csv(csv_filename, measurements, tz) if sort_index: df = df.sort_index() # raw REDD data isn't always sorted store.put(str(key), df) print() def _find_all_houses(input_path): """ Returns ------- list of integers (house instances) """ dir_names = [p for p in listdir(input_path) if isdir(join(input_path, p))] return _matching_ints(dir_names, '^house_(\d)$') def _find_all_chans(input_path, house_id): """ Returns ------- list of integers (channels) """ house_path = join(input_path, 'house_{:d}'.format(house_id)) filenames = [p for p in listdir(house_path) if isfile(join(house_path, p))] return _matching_ints(filenames, '^channel_(\d\d?).dat$') def _matching_ints(strings, regex): """Uses regular expression to select and then extract an integer from strings. Parameters ---------- strings : list of strings regex : string Regular Expression. Including one group. This group is used to extract the integer from each string. Returns ------- list of ints """ ints = [] p = re.compile(regex) for string in strings: m = p.match(string) if m: integer = int(m.group(1)) ints.append(integer) ints.sort() return ints def _get_csv_filename(input_path, key_obj): """ Parameters ---------- input_path : (str) the root path of the REDD low_freq dataset key_obj : (nilmtk.Key) the house and channel to load Returns ------- filename : str """ assert isinstance(input_path, str) assert isinstance(key_obj, Key) # Get path house_path = 'house_{:d}'.format(key_obj.building) path = join(input_path, house_path) assert isdir(path) # Get filename filename = 'channel_{:d}.dat'.format(key_obj.meter) filename = join(path, filename) assert isfile(filename) return filename def _load_csv(filename, columns, tz): """ Parameters ---------- filename : str columns : list of tuples (for hierarchical column index) tz : str e.g. 'US/Eastern' Returns ------- dataframe """ # Load data df = pd.read_csv(filename, sep=' ', names=columns, dtype={m:np.float32 for m in columns}) # Modify the column labels to reflect the power measurements recorded. df.columns.set_names(LEVEL_NAMES, inplace=True) # Convert the integer index column to timezone-aware datetime df.index = pd.to_datetime(df.index.values, unit='s', utc=True) df = df.tz_convert(tz) return df
    apache-2.0
    dcroc16/skunk_works
    google_appengine/lib/distutils/distutils/msvccompiler.py
    250
    23637
    """distutils.msvccompiler Contains MSVCCompiler, an implementation of the abstract CCompiler class for the Microsoft Visual Studio. """ # Written by Perry Stoll # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) __revision__ = "$Id$" import sys import os import string from distutils.errors import (DistutilsExecError, DistutilsPlatformError, CompileError, LibError, LinkError) from distutils.ccompiler import CCompiler, gen_lib_options from distutils import log _can_read_reg = 0 try: import _winreg _can_read_reg = 1 hkey_mod = _winreg RegOpenKeyEx = _winreg.OpenKeyEx RegEnumKey = _winreg.EnumKey RegEnumValue = _winreg.EnumValue RegError = _winreg.error except ImportError: try: import win32api import win32con _can_read_reg = 1 hkey_mod = win32con RegOpenKeyEx = win32api.RegOpenKeyEx RegEnumKey = win32api.RegEnumKey RegEnumValue = win32api.RegEnumValue RegError = win32api.error except ImportError: log.info("Warning: Can't read registry to find the " "necessary compiler setting\n" "Make sure that Python modules _winreg, " "win32api or win32con are installed.") pass if _can_read_reg: HKEYS = (hkey_mod.HKEY_USERS, hkey_mod.HKEY_CURRENT_USER, hkey_mod.HKEY_LOCAL_MACHINE, hkey_mod.HKEY_CLASSES_ROOT) def read_keys(base, key): """Return list of registry keys.""" try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 while 1: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) i = i + 1 return L def read_values(base, key): """Return dict of registry keys and values. All names are converted to lowercase. """ try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while 1: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[convert_mbcs(name)] = convert_mbcs(value) i = i + 1 return d def convert_mbcs(s): enc = getattr(s, "encode", None) if enc is not None: try: s = enc("mbcs") except UnicodeError: pass return s class MacroExpander: def __init__(self, version): self.macros = {} self.load_macros(version) def set_macro(self, macro, path, key): for base in HKEYS: d = read_values(base, path) if d: self.macros["$(%s)" % macro] = d[key] break def load_macros(self, version): vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") net = r"Software\Microsoft\.NETFramework" self.set_macro("FrameworkDir", net, "installroot") try: if version > 7.0: self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") except KeyError: raise DistutilsPlatformError, \ ("""Python was built with Visual Studio 2003; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2003 was not found on this system. If you have Cygwin installed, you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: try: h = RegOpenKeyEx(base, p) except RegError: continue key = RegEnumKey(h, 0) d = read_values(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): for k, v in self.macros.items(): s = string.replace(s, k, v) return s def get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ prefix = "MSC v." i = string.find(sys.version, prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None def get_build_architecture(): """Return the processor architecture. Possible results are "Intel", "Itanium", or "AMD64". """ prefix = " bit (" i = string.find(sys.version, prefix) if i == -1: return "Intel" j = string.find(sys.version, ")", i) return sys.version[i+len(prefix):j] def normalize_and_reduce_paths(paths): """Return a list of normalized paths with duplicates removed. The current order of paths is maintained. """ # Paths are normalized so things like: /a and /a/ aren't both preserved. reduced_paths = [] for p in paths: np = os.path.normpath(p) # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. if np not in reduced_paths: reduced_paths.append(np) return reduced_paths class MSVCCompiler (CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, as defined by the CCompiler abstract class.""" compiler_type = 'msvc' # Just set this so CCompiler's constructor doesn't barf. We currently # don't use the 'set_executables()' bureaucracy provided by CCompiler, # as it really isn't necessary for this sort of single-compiler class. # Would be nice to have a consistent interface with UnixCCompiler, # though, so it's worth thinking about. executables = {} # Private class data (need to distinguish C from C++ source for compiler) _c_extensions = ['.c'] _cpp_extensions = ['.cc', '.cpp', '.cxx'] _rc_extensions = ['.rc'] _mc_extensions = ['.mc'] # Needed for the filename generation methods provided by the # base class, CCompiler. src_extensions = (_c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' shared_lib_extension = '.dll' static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' def __init__ (self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) self.__version = get_build_version() self.__arch = get_build_architecture() if self.__arch == "Intel": # x86 if self.__version >= 7: self.__root = r"Software\Microsoft\VisualStudio" self.__macros = MacroExpander(self.__version) else: self.__root = r"Software\Microsoft\Devstudio" self.__product = "Visual Studio version %s" % self.__version else: # Win64. Assume this was built with the platform SDK self.__product = "Microsoft SDK compiler %s" % (self.__version + 6) self.initialized = False def initialize(self): self.__paths = [] if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" self.linker = "link.exe" self.lib = "lib.exe" self.rc = "rc.exe" self.mc = "mc.exe" else: self.__paths = self.get_msvc_paths("path") if len (self.__paths) == 0: raise DistutilsPlatformError, \ ("Python was built with %s, " "and extensions need to be built with the same " "version of the compiler, but it isn't installed." % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") self.rc = self.find_exe("rc.exe") # resource compiler self.mc = self.find_exe("mc.exe") # message compiler self.set_path_env_var('lib') self.set_path_env_var('include') # extend the MSVC path with the current path try: for p in string.split(os.environ['path'], ';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) os.environ['path'] = string.join(self.__paths, ';') self.preprocess_options = None if self.__arch == "Intel": self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', '/Z7', '/D_DEBUG'] else: # Win64 self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] else: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' ] self.ldflags_static = [ '/nologo'] self.initialized = True # -- Worker methods ------------------------------------------------ def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: (base, ext) = os.path.splitext (src_name) base = os.path.splitdrive(base)[1] # Chop off the drive base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: # Better to raise an exception instead of silently continuing # and later complain about sources and targets having # different lengths raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: base = os.path.basename (base) if ext in self._rc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) elif ext in self._mc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) compile_opts = extra_preargs or [] compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: compile_opts.extend(self.compile_options) for obj in objects: try: src, ext = build[obj] except KeyError: continue if debug: # pass the full pathname to MSVC in debug mode, # this allows the debugger to find the source file # without asking the user to browse for it src = os.path.abspath(src) if ext in self._c_extensions: input_opt = "/Tc" + src elif ext in self._cpp_extensions: input_opt = "/Tp" + src elif ext in self._rc_extensions: # compile .RC to .RES file input_opt = src output_opt = "/fo" + obj try: self.spawn ([self.rc] + pp_opts + [output_opt] + [input_opt]) except DistutilsExecError, msg: raise CompileError, msg continue elif ext in self._mc_extensions: # Compile .MC to .RC file to .RES file. # * '-h dir' specifies the directory for the # generated include file # * '-r dir' specifies the target directory of the # generated RC file and the binary message resource # it includes # # For now (since there are no options to change this), # we use the source-directory for the include file and # the build directory for the RC file and message # resources. This works at least for win32all. h_dir = os.path.dirname (src) rc_dir = os.path.dirname (obj) try: # first compile .MC to .RC and .H file self.spawn ([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) base, _ = os.path.splitext (os.path.basename (src)) rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file self.spawn ([self.rc] + ["/fo" + obj] + [rc_file]) except DistutilsExecError, msg: raise CompileError, msg continue else: # how to handle this file? raise CompileError ( "Don't know how to compile %s to %s" % \ (src, obj)) output_opt = "/Fo" + obj try: self.spawn ([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs) except DistutilsExecError, msg: raise CompileError, msg return objects # compile () def create_static_lib (self, objects, output_libname, output_dir=None, debug=0, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args (objects, output_dir) output_filename = \ self.library_filename (output_libname, output_dir=output_dir) if self._need_link (objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: pass # XXX what goes here? try: self.spawn ([self.lib] + lib_args) except DistutilsExecError, msg: raise LibError, msg else: log.debug("skipping %s (up-to-date)", output_filename) # create_static_lib () def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args (objects, output_dir) (libraries, library_dirs, runtime_library_dirs) = \ self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) if runtime_library_dirs: self.warn ("I don't know what to do with 'runtime_library_dirs': " + str (runtime_library_dirs)) lib_opts = gen_lib_options (self, library_dirs, runtime_library_dirs, libraries) if output_dir is not None: output_filename = os.path.join (output_dir, output_filename) if self._need_link (objects, output_filename): if target_desc == CCompiler.EXECUTABLE: if debug: ldflags = self.ldflags_shared_debug[1:] else: ldflags = self.ldflags_shared[1:] else: if debug: ldflags = self.ldflags_shared_debug else: ldflags = self.ldflags_shared export_opts = [] for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) ld_args = (ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]) # The MSVC linker generates .lib and .exp files, which cannot be # suppressed by any linker switches. The .lib files may even be # needed! Make sure they are generated in the temporary build # directory. Since they have different names for debug and release # builds, they can go into the same directory. if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( os.path.basename(output_filename)) implib_file = os.path.join( os.path.dirname(objects[0]), self.library_filename(dll_name)) ld_args.append ('/IMPLIB:' + implib_file) if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath (os.path.dirname (output_filename)) try: self.spawn ([self.linker] + ld_args) except DistutilsExecError, msg: raise LinkError, msg else: log.debug("skipping %s (up-to-date)", output_filename) # link () # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. def library_dir_option (self, dir): return "/LIBPATH:" + dir def runtime_library_dir_option (self, dir): raise DistutilsPlatformError, \ "don't know how to set runtime library search path for MSVC++" def library_option (self, lib): return self.library_filename (lib) def find_library_file (self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. if debug: try_names = [lib + "_d", lib] else: try_names = [lib] for dir in dirs: for name in try_names: libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: # Oops, didn't find it in *any* of 'dirs' return None # find_library_file () # Helper methods for using the MSVC registry settings def find_exe(self, exe): """Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn # didn't find it; try existing path for p in string.split(os.environ['Path'],';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn return exe def get_msvc_paths(self, path, platform='x86'): """Get a list of devstudio directories (include, lib or path). Return a list of strings. The list will be empty if unable to access the registry or appropriate registry keys not found. """ if not _can_read_reg: return [] path = path + " dirs" if self.__version >= 7: key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" % (self.__root, self.__version)) else: key = (r"%s\6.0\Build System\Components\Platforms" r"\Win32 (%s)\Directories" % (self.__root, platform)) for base in HKEYS: d = read_values(base, key) if d: if self.__version >= 7: return string.split(self.__macros.sub(d[path]), ";") else: return string.split(d[path], ";") # MSVC 6 seems to create the registry entries we need only when # the GUI is run. if self.__version == 6: for base in HKEYS: if read_values(base, r"%s\6.0" % self.__root) is not None: self.warn("It seems you have Visual Studio 6 installed, " "but the expected registry settings are not present.\n" "You must at least run the Visual Studio GUI once " "so that these entries are created.") break return [] def set_path_env_var(self, name): """Set environment variable 'name' to an MSVC path type value. This is equivalent to a SET command prior to execution of spawned commands. """ if name == "lib": p = self.get_msvc_paths("library") else: p = self.get_msvc_paths(name) if p: os.environ[name] = string.join(p, ';') if get_build_version() >= 8.0: log.debug("Importing new compiler from distutils.msvc9compiler") OldMSVCCompiler = MSVCCompiler from distutils.msvc9compiler import MSVCCompiler # get_build_architecture not really relevant now we support cross-compile from distutils.msvc9compiler import MacroExpander
    mit
    vdmann/cse-360-image-hosting-website
    lib/python2.7/site-packages/requests/packages/chardet/big5prober.py
    2931
    1684
    ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis from .mbcssm import Big5SMModel class Big5Prober(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(Big5SMModel) self._mDistributionAnalyzer = Big5DistributionAnalysis() self.reset() def get_charset_name(self): return "Big5"
    mit
    chouseknecht/ansible
    lib/ansible/modules/network/aci/mso_schema_template_anp.py
    26
    5536
    #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Dag Wieers (@dagwieers) <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: mso_schema_template_anp short_description: Manage Application Network Profiles (ANPs) in schema templates description: - Manage ANPs in schema templates on Cisco ACI Multi-Site. author: - Dag Wieers (@dagwieers) version_added: '2.8' options: schema: description: - The name of the schema. type: str required: yes template: description: - The name of the template. type: str required: yes anp: description: - The name of the ANP to manage. type: str aliases: [ name ] display_name: description: - The name as displayed on the MSO web interface. type: str state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present seealso: - module: mso_schema_template - module: mso_schema_template_anp_epg extends_documentation_fragment: mso ''' EXAMPLES = r''' - name: Add a new ANP mso_schema_template_anp: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 anp: ANP 1 state: present delegate_to: localhost - name: Remove an ANP mso_schema_template_anp: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 anp: ANP 1 state: absent delegate_to: localhost - name: Query a specific ANPs mso_schema_template_anp: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 state: query delegate_to: localhost register: query_result - name: Query all ANPs mso_schema_template_anp: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 state: query delegate_to: localhost register: query_result ''' RETURN = r''' ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, issubset def main(): argument_spec = mso_argument_spec() argument_spec.update( schema=dict(type='str', required=True), template=dict(type='str', required=True), anp=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects display_name=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['anp']], ['state', 'present', ['anp']], ], ) schema = module.params['schema'] template = module.params['template'] anp = module.params['anp'] display_name = module.params['display_name'] state = module.params['state'] mso = MSOModule(module) # Get schema_id schema_obj = mso.get_obj('schemas', displayName=schema) if not schema_obj: mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) schema_path = 'schemas/{id}'.format(**schema_obj) # Get template templates = [t['name'] for t in schema_obj['templates']] if template not in templates: mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates))) template_idx = templates.index(template) # Get ANP anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']] if anp is not None and anp in anps: anp_idx = anps.index(anp) mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx] if state == 'query': if anp is None: mso.existing = schema_obj['templates'][template_idx]['anps'] elif not mso.existing: mso.fail_json(msg="ANP '{anp}' not found".format(anp=anp)) mso.exit_json() anps_path = '/templates/{0}/anps'.format(template) anp_path = '/templates/{0}/anps/{1}'.format(template, anp) ops = [] mso.previous = mso.existing if state == 'absent': if mso.existing: mso.sent = mso.existing = {} ops.append(dict(op='remove', path=anp_path)) elif state == 'present': if display_name is None and not mso.existing: display_name = anp epgs = [] if mso.existing: epgs = None payload = dict( name=anp, displayName=display_name, epgs=epgs, ) mso.sanitize(payload, collate=True) if mso.existing: if display_name is not None: ops.append(dict(op='replace', path=anp_path + '/displayName', value=display_name)) else: ops.append(dict(op='add', path=anps_path + '/-', value=mso.sent)) mso.existing = mso.proposed if not module.check_mode: mso.request(schema_path, method='PATCH', data=ops) mso.exit_json() if __name__ == "__main__": main()
    gpl-3.0
    tequa/ammisoft
    ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/prompt_toolkit/key_binding/bindings/named_commands.py
    6
    15922
    """ Key bindings which are also known by GNU readline by the given names. See: http://www.delorie.com/gnu/docs/readline/rlman_13.html """ from __future__ import unicode_literals from prompt_toolkit.enums import IncrementalSearchDirection, SEARCH_BUFFER from prompt_toolkit.selection import PasteMode from six.moves import range import six from .completion import generate_completions, display_completions_like_readline from prompt_toolkit.document import Document from prompt_toolkit.enums import EditingMode from prompt_toolkit.key_binding.input_processor import KeyPress from prompt_toolkit.keys import Keys __all__ = ( 'get_by_name', ) # Registry that maps the Readline command names to their handlers. _readline_commands = {} def register(name): """ Store handler in the `_readline_commands` dictionary. """ assert isinstance(name, six.text_type) def decorator(handler): assert callable(handler) _readline_commands[name] = handler return handler return decorator def get_by_name(name): """ Return the handler for the (Readline) command with the given name. """ try: return _readline_commands[name] except KeyError: raise KeyError('Unknown readline command: %r' % name) # # Commands for moving # See: http://www.delorie.com/gnu/docs/readline/rlman_14.html # @register('beginning-of-line') def beginning_of_line(event): " Move to the start of the current line. " buff = event.current_buffer buff.cursor_position += buff.document.get_start_of_line_position(after_whitespace=False) @register('end-of-line') def end_of_line(event): " Move to the end of the line. " buff = event.current_buffer buff.cursor_position += buff.document.get_end_of_line_position() @register('forward-char') def forward_char(event): " Move forward a character. " buff = event.current_buffer buff.cursor_position += buff.document.get_cursor_right_position(count=event.arg) @register('backward-char') def backward_char(event): " Move back a character. " buff = event.current_buffer buff.cursor_position += buff.document.get_cursor_left_position(count=event.arg) @register('forward-word') def forward_word(event): """ Move forward to the end of the next word. Words are composed of letters and digits. """ buff = event.current_buffer pos = buff.document.find_next_word_ending(count=event.arg) if pos: buff.cursor_position += pos @register('backward-word') def backward_word(event): """ Move back to the start of the current or previous word. Words are composed of letters and digits. """ buff = event.current_buffer pos = buff.document.find_previous_word_beginning(count=event.arg) if pos: buff.cursor_position += pos @register('clear-screen') def clear_screen(event): """ Clear the screen and redraw everything at the top of the screen. """ event.cli.renderer.clear() @register('redraw-current-line') def redraw_current_line(event): """ Refresh the current line. (Readline defines this command, but prompt-toolkit doesn't have it.) """ pass # # Commands for manipulating the history. # See: http://www.delorie.com/gnu/docs/readline/rlman_15.html # @register('accept-line') def accept_line(event): " Accept the line regardless of where the cursor is. " b = event.current_buffer b.accept_action.validate_and_handle(event.cli, b) @register('previous-history') def previous_history(event): " Move `back' through the history list, fetching the previous command. " event.current_buffer.history_backward(count=event.arg) @register('next-history') def next_history(event): " Move `forward' through the history list, fetching the next command. " event.current_buffer.history_forward(count=event.arg) @register('beginning-of-history') def beginning_of_history(event): " Move to the first line in the history. " event.current_buffer.go_to_history(0) @register('end-of-history') def end_of_history(event): """ Move to the end of the input history, i.e., the line currently being entered. """ event.current_buffer.history_forward(count=10**100) buff = event.current_buffer buff.go_to_history(len(buff._working_lines) - 1) @register('reverse-search-history') def reverse_search_history(event): """ Search backward starting at the current line and moving `up' through the history as necessary. This is an incremental search. """ event.cli.current_search_state.direction = IncrementalSearchDirection.BACKWARD event.cli.push_focus(SEARCH_BUFFER) # # Commands for changing text # @register('end-of-file') def end_of_file(event): """ Exit. """ event.cli.exit() @register('delete-char') def delete_char(event): " Delete character before the cursor. " deleted = event.current_buffer.delete(count=event.arg) if not deleted: event.cli.output.bell() @register('backward-delete-char') def backward_delete_char(event): " Delete the character behind the cursor. " if event.arg < 0: # When a negative argument has been given, this should delete in front # of the cursor. deleted = event.current_buffer.delete(count=-event.arg) else: deleted = event.current_buffer.delete_before_cursor(count=event.arg) if not deleted: event.cli.output.bell() @register('self-insert') def self_insert(event): " Insert yourself. " event.current_buffer.insert_text(event.data * event.arg) @register('transpose-chars') def transpose_chars(event): """ Emulate Emacs transpose-char behavior: at the beginning of the buffer, do nothing. At the end of a line or buffer, swap the characters before the cursor. Otherwise, move the cursor right, and then swap the characters before the cursor. """ b = event.current_buffer p = b.cursor_position if p == 0: return elif p == len(b.text) or b.text[p] == '\n': b.swap_characters_before_cursor() else: b.cursor_position += b.document.get_cursor_right_position() b.swap_characters_before_cursor() @register('uppercase-word') def uppercase_word(event): """ Uppercase the current (or following) word. """ buff = event.current_buffer for i in range(event.arg): pos = buff.document.find_next_word_ending() words = buff.document.text_after_cursor[:pos] buff.insert_text(words.upper(), overwrite=True) @register('downcase-word') def downcase_word(event): """ Lowercase the current (or following) word. """ buff = event.current_buffer for i in range(event.arg): # XXX: not DRY: see meta_c and meta_u!! pos = buff.document.find_next_word_ending() words = buff.document.text_after_cursor[:pos] buff.insert_text(words.lower(), overwrite=True) @register('capitalize-word') def capitalize_word(event): """ Capitalize the current (or following) word. """ buff = event.current_buffer for i in range(event.arg): pos = buff.document.find_next_word_ending() words = buff.document.text_after_cursor[:pos] buff.insert_text(words.title(), overwrite=True) @register('quoted-insert') def quoted_insert(event): """ Add the next character typed to the line verbatim. This is how to insert key sequences like C-q, for example. """ event.cli.quoted_insert = True # # Killing and yanking. # @register('kill-line') def kill_line(event): """ Kill the text from the cursor to the end of the line. If we are at the end of the line, this should remove the newline. (That way, it is possible to delete multiple lines by executing this command multiple times.) """ buff = event.current_buffer if event.arg < 0: deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position()) else: if buff.document.current_char == '\n': deleted = buff.delete(1) else: deleted = buff.delete(count=buff.document.get_end_of_line_position()) event.cli.clipboard.set_text(deleted) @register('kill-word') def kill_word(event): """ Kill from point to the end of the current word, or if between words, to the end of the next word. Word boundaries are the same as forward-word. """ buff = event.current_buffer pos = buff.document.find_next_word_ending(count=event.arg) if pos: deleted = buff.delete(count=pos) event.cli.clipboard.set_text(deleted) @register('unix-word-rubout') def unix_word_rubout(event, WORD=True): """ Kill the word behind point, using whitespace as a word boundary. Usually bound to ControlW. """ buff = event.current_buffer pos = buff.document.find_start_of_previous_word(count=event.arg, WORD=WORD) if pos is None: # Nothing found? delete until the start of the document. (The # input starts with whitespace and no words were found before the # cursor.) pos = - buff.cursor_position if pos: deleted = buff.delete_before_cursor(count=-pos) # If the previous key press was also Control-W, concatenate deleted # text. if event.is_repeat: deleted += event.cli.clipboard.get_data().text event.cli.clipboard.set_text(deleted) else: # Nothing to delete. Bell. event.cli.output.bell() @register('backward-kill-word') def backward_kill_word(event): """ Kills the word before point, using "not a letter nor a digit" as a word boundary. Usually bound to M-Del or M-Backspace. """ unix_word_rubout(event, WORD=False) @register('delete-horizontal-space') def delete_horizontal_space(event): " Delete all spaces and tabs around point. " buff = event.current_buffer text_before_cursor = buff.document.text_before_cursor text_after_cursor = buff.document.text_after_cursor delete_before = len(text_before_cursor) - len(text_before_cursor.rstrip('\t ')) delete_after = len(text_after_cursor) - len(text_after_cursor.lstrip('\t ')) buff.delete_before_cursor(count=delete_before) buff.delete(count=delete_after) @register('unix-line-discard') def unix_line_discard(event): """ Kill backward from the cursor to the beginning of the current line. """ buff = event.current_buffer if buff.document.cursor_position_col == 0 and buff.document.cursor_position > 0: buff.delete_before_cursor(count=1) else: deleted = buff.delete_before_cursor(count=-buff.document.get_start_of_line_position()) event.cli.clipboard.set_text(deleted) @register('yank') def yank(event): """ Paste before cursor. """ event.current_buffer.paste_clipboard_data( event.cli.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.EMACS) @register('yank-nth-arg') def yank_nth_arg(event): """ Insert the first argument of the previous command. With an argument, insert the nth word from the previous command (start counting at 0). """ n = (event.arg if event.arg_present else None) event.current_buffer.yank_nth_arg(n) @register('yank-last-arg') def yank_last_arg(event): """ Like `yank_nth_arg`, but if no argument has been given, yank the last word of each line. """ n = (event.arg if event.arg_present else None) event.current_buffer.yank_last_arg(n) @register('yank-pop') def yank_pop(event): """ Rotate the kill ring, and yank the new top. Only works following yank or yank-pop. """ buff = event.current_buffer doc_before_paste = buff.document_before_paste clipboard = event.cli.clipboard if doc_before_paste is not None: buff.document = doc_before_paste clipboard.rotate() buff.paste_clipboard_data( clipboard.get_data(), paste_mode=PasteMode.EMACS) # # Completion. # @register('complete') def complete(event): " Attempt to perform completion. " display_completions_like_readline(event) @register('menu-complete') def menu_complete(event): """ Generate completions, or go to the next completion. (This is the default way of completing input in prompt_toolkit.) """ generate_completions(event) @register('menu-complete-backward') def menu_complete_backward(event): " Move backward through the list of possible completions. " event.current_buffer.complete_previous() # # Keyboard macros. # @register('start-kbd-macro') def start_kbd_macro(event): """ Begin saving the characters typed into the current keyboard macro. """ event.cli.input_processor.start_macro() @register('end-kbd-macro') def start_kbd_macro(event): """ Stop saving the characters typed into the current keyboard macro and save the definition. """ event.cli.input_processor.end_macro() @register('call-last-kbd-macro') def start_kbd_macro(event): """ Re-execute the last keyboard macro defined, by making the characters in the macro appear as if typed at the keyboard. """ event.cli.input_processor.call_macro() @register('print-last-kbd-macro') def print_last_kbd_macro(event): " Print the last keboard macro. " # TODO: Make the format suitable for the inputrc file. def print_macro(): for k in event.cli.input_processor.macro: print(k) event.cli.run_in_terminal(print_macro) # # Miscellaneous Commands. # @register('undo') def undo(event): " Incremental undo. " event.current_buffer.undo() @register('insert-comment') def insert_comment(event): """ Without numeric argument, comment all lines. With numeric argument, uncomment all lines. In any case accept the input. """ buff = event.current_buffer # Transform all lines. if event.arg != 1: def change(line): return line[1:] if line.startswith('#') else line else: def change(line): return '#' + line buff.document = Document( text='\n'.join(map(change, buff.text.splitlines())), cursor_position=0) # Accept input. buff.accept_action.validate_and_handle(event.cli, buff) @register('vi-editing-mode') def vi_editing_mode(event): " Switch to Vi editing mode. " event.cli.editing_mode = EditingMode.VI @register('emacs-editing-mode') def emacs_editing_mode(event): " Switch to Emacs editing mode. " event.cli.editing_mode = EditingMode.EMACS @register('prefix-meta') def prefix_meta(event): """ Metafy the next character typed. This is for keyboards without a meta key. Sometimes people also want to bind other keys to Meta, e.g. 'jj':: registry.add_key_binding('j', 'j', filter=ViInsertMode())(prefix_meta) """ event.cli.input_processor.feed(KeyPress(Keys.Escape)) @register('operate-and-get-next') def operate_and_get_next(event): """ Accept the current line for execution and fetch the next line relative to the current line from the history for editing. """ buff = event.current_buffer new_index = buff.working_index + 1 # Accept the current input. (This will also redraw the interface in the # 'done' state.) buff.accept_action.validate_and_handle(event.cli, buff) # Set the new index at the start of the next run. def set_working_index(): if new_index < len(buff._working_lines): buff.working_index = new_index event.cli.pre_run_callables.append(set_working_index) @register('edit-and-execute-command') def edit_and_execute(event): """ Invoke an editor on the current command line, and accept the result. """ buff = event.current_buffer buff.open_in_editor(event.cli) buff.accept_action.validate_and_handle(event.cli, buff)
    bsd-3-clause
    liyitest/rr
    openstack_dashboard/test/integration_tests/regions/baseregion.py
    40
    4462
    # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import types from openstack_dashboard.test.integration_tests import basewebobject class BaseRegion(basewebobject.BaseWebObject): """Base class for region module * there is necessity to override some basic methods for obtaining elements as in content of regions it is required to do relative searches * self.driver cannot be easily replaced with self.src_elem because that would result in functionality loss, self.driver is WebDriver and src_elem is WebElement its usage is different. * this does not mean that self.src_elem cannot be self.driver """ _default_src_locator = None # private methods def __init__(self, driver, conf, src_elem=None): super(BaseRegion, self).__init__(driver, conf) if src_elem is None and self._default_src_locator: # fake self.src_elem must be set up in # order self._get_element work self.src_elem = driver src_elem = self._get_element(*self._default_src_locator) self.src_elem = src_elem or driver # variable for storing names of dynamic properties and # associated 'getters' - meaning method that are supplying # regions or web elements self._dynamic_properties = {} def __getattr__(self, name): """It is not possible to create property bounded just to object and not class at runtime, therefore it is necessary to override __getattr__ and make fake 'properties' by storing them in the protected attribute _dynamic_attributes and returning result of the method associated with the specified attribute. This way the feeling of having regions accessed as 'properties' is created, which is one of the requirement of page object pattern. """ try: return self._dynamic_properties[name]() except KeyError: msg = "'{0}' object has no attribute '{1}'" raise AttributeError(msg.format(type(self).__name__, name)) # protected methods and classes class _DynamicProperty(object): """Serves as new property holder.""" def __init__(self, method, index=None): """In case object was created with index != None, it is assumed that the result of self.method should be tuple() and just certain index should be returned """ self.method = method self.index = index def __call__(self, *args, **kwargs): result = self.method() return result if self.index is None else result[self.index] def _init_dynamic_properties(self, new_attr_names, method): """Create new object's 'properties' at runtime.""" for index, new_attr_name in enumerate(new_attr_names): self._init_dynamic_property(new_attr_name, method, index) def _init_dynamic_property(self, new_attr_name, method, index=None): """Create new object's property at runtime. If index argument is supplied it is assumed that method returns tuple() and only element on ${index} position is returned. """ if (new_attr_name in dir(self) or new_attr_name in self._dynamic_properties): raise AttributeError("%s class has already attribute %s." "The new property could not be " "created." % (self.__class__.__name__, new_attr_name)) new_method = self.__class__._DynamicProperty(method, index) inst_method = types.MethodType(new_method, self) self._dynamic_properties[new_attr_name] = inst_method def _get_element(self, *locator): return self.src_elem.find_element(*locator) def _get_elements(self, *locator): return self.src_elem.find_elements(*locator)
    apache-2.0
    defance/edx-platform
    common/test/acceptance/pages/lms/matlab_problem.py
    179
    1024
    """ Matlab Problem Page. """ from bok_choy.page_object import PageObject class MatlabProblemPage(PageObject): """ View of matlab problem page. """ url = None def is_browser_on_page(self): return self.q(css='.ungraded-matlab-result').present @property def problem_name(self): """ Return the current problem name. """ return self.q(css='.problem-header').text[0] def set_response(self, response_str): """ Input a response to the prompt. """ input_css = "$('.CodeMirror')[0].CodeMirror.setValue('{}');".format(response_str) self.browser.execute_script(input_css) def click_run_code(self): """ Click the run code button. """ self.q(css='input.save').click() self.wait_for_ajax() def get_grader_msg(self, class_name): """ Returns the text value of given class. """ self.wait_for_ajax() return self.q(css=class_name).text
    agpl-3.0
    zapstar/gae-facebook
    main.py
    1
    3589
    #Import WebApp2 framework on Google App Engine import webapp2 #Import Sessions from WebApp2 Extras from webapp2_extras import sessions #Import Quote Function from URL Library from urllib import quote #Import Parse_QueryString from URL Parse from urlparse import parse_qs #Import URLFetch from Google App Engine API from google.appengine.api import urlfetch #Import JSON Loads from JSON from json import loads #Import the Session State Variable Generator #Random String of 23 characters, unguessable import state_variable #Import the BaseSessionHandler Class import session_module class MainHandler(session_module.BaseSessionHandler): #The APP_ID and the APP_SECRET variables contain information #required for Facebook Authentication APP_ID = '267910489968296' APP_SECRET = '02583f888c364e2d54fc081830c8f870' #If Offline then use this #my_url = 'http://localhost:8080/' #else if one Google App Engine use this as my_url my_url = 'http://facebook-gae-python.appspot.com/' def get(self): #Check whether 'code' is in the GET variables of the URL #if not then execute the below code to set the state variable in #the session if self.request.get('code') == '': #If not generate a state variable session_state = state_variable.SessionStateVariable() #Set the state variable in the session self.session['state'] = session_state.generateState() #The Dialog URL for Facebook Login dialog_url = 'http://www.facebook.com/dialog/oauth?client_id=' + \ self.APP_ID + '&redirect_uri=' + quote(self.my_url) + \ '&state=' + self.session.get('state') #Redirect to the Facebook Page (Please note the Redirection URL must #be updated on Facebook App's Site URL or Canvas URL self.redirect(dialog_url) else: #If state variable is already set then set the class variable self.state = self.session.get('state') #Check whether the State Variable is same as that returned by Facebook #Else report the CSRF Violation if self.request.get('state') == self.session.get('state'): #The token URL for generation of OAuth Access Token for Graph API #Requests from your application token_url = 'https://graph.facebook.com/oauth/access_token?client_id=' + \ self.APP_ID + '&redirect_uri=' + quote(self.my_url) + \ '&client_secret=' + self.APP_SECRET + '&code=' + self.request.get('code') #Get the token from the Token URL token_response = urlfetch.fetch(token_url) #Parse the string to get the Access token params = parse_qs(token_response.content) #Now params['access_token'][0] has the access_token for use #Requesting Facebook Graph API #the Graph URL graph_url = u'https://graph.facebook.com' #The API String for example /me or /me/movies etc. api_string = u'/me' #The concatenated URL for Graph API Request api_request_url = graph_url + api_string + u'?access_token=' + params['access_token'][0] #Fetch the Response from Graph API Request api_response = urlfetch.fetch(api_request_url) #Get the contents of the Response json_response = api_response.content #Convert the JSON String into a dictionary api_answer = loads(json_response) #Print your name on the screen! self.response.out.write('Hello ' + api_answer['name']) else: #CSRF Violation Response (if the state variables don't match) self.response.out.write('The states dont match. You may a victim of CSRF') #End of MainHandler Class #The WebApp2 WSGI Application definition app = webapp2.WSGIApplication([('/', MainHandler)], debug=True, config = session_module.session_config)
    mit
    jamison904/T999_minimum_kernel
    tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
    11088
    3246
    # Core.py - Python extension for perf script, core functions # # Copyright (C) 2010 by Tom Zanussi <[email protected]> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from collections import defaultdict def autodict(): return defaultdict(autodict) flag_fields = autodict() symbolic_fields = autodict() def define_flag_field(event_name, field_name, delim): flag_fields[event_name][field_name]['delim'] = delim def define_flag_value(event_name, field_name, value, field_str): flag_fields[event_name][field_name]['values'][value] = field_str def define_symbolic_field(event_name, field_name): # nothing to do, really pass def define_symbolic_value(event_name, field_name, value, field_str): symbolic_fields[event_name][field_name]['values'][value] = field_str def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: print_delim = 0 keys = flag_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break if idx and (value & idx) == idx: if print_delim and flag_fields[event_name][field_name]['delim']: string += " " + flag_fields[event_name][field_name]['delim'] + " " string += flag_fields[event_name][field_name]['values'][idx] print_delim = 1 value &= ~idx return string def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: keys = symbolic_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string = symbolic_fields[event_name][field_name]['values'][idx] break if (value == idx): string = symbolic_fields[event_name][field_name]['values'][idx] break return string trace_flags = { 0x00: "NONE", \ 0x01: "IRQS_OFF", \ 0x02: "IRQS_NOSUPPORT", \ 0x04: "NEED_RESCHED", \ 0x08: "HARDIRQ", \ 0x10: "SOFTIRQ" } def trace_flag_str(value): string = "" print_delim = 0 keys = trace_flags.keys() for idx in keys: if not value and not idx: string += "NONE" break if idx and (value & idx) == idx: if print_delim: string += " | "; string += trace_flags[idx] print_delim = 1 value &= ~idx return string def taskState(state): states = { 0 : "R", 1 : "S", 2 : "D", 64: "DEAD" } if state not in states: return "Unknown" return states[state] class EventHeaders: def __init__(self, common_cpu, common_secs, common_nsecs, common_pid, common_comm): self.cpu = common_cpu self.secs = common_secs self.nsecs = common_nsecs self.pid = common_pid self.comm = common_comm def ts(self): return (self.secs * (10 ** 9)) + self.nsecs def ts_format(self): return "%d.%d" % (self.secs, int(self.nsecs / 1000))
    gpl-2.0
    erdanieee/imagePicker
    node_modules/dmg-builder/vendor/dmgbuild/colors.py
    12
    13054
    # -*- coding: utf-8 -*- from __future__ import unicode_literals import re import math class Color (object): def to_rgb(self): raise Exception('Must implement to_rgb() in subclasses') class RGB (Color): def __init__(self, r, g, b): self.r = r self.g = g self.b = b def to_rgb(self): return self class HSL (Color): def __init__(self, h, s, l): self.h = h self.s = s self.l = l @staticmethod def _hue_to_rgb(t1, t2, hue): if hue < 0: hue += 6 elif hue >= 6: hue -= 6 if hue < 1: return (t2 - t1) * hue + t1 elif hue < 3: return t2 elif hue < 4: return (t2 - t1) * (4 - hue) + t1 else: return t1 def to_rgb(self): hue = self.h / 60.0 if self.l <= 0.5: t2 = self.l * (self.s + 1) else: t2 = self.l + self.s - (self.l * self.s) t1 = self.l * 2 - t2 r = self._hue_to_rgb(t1, t2, hue + 2) g = self._hue_to_rgb(t1, t2, hue) b = self._hue_to_rgb(t1, t2, hue - 2) return RGB(r, g, b) class HWB (Color): def __init__(self, h, w, b): self.h = h self.w = w self.b = b @staticmethod def _hue_to_rgb(hue): if hue < 0: hue += 6 elif hue >= 6: hue -= 6 if hue < 1: return hue elif hue < 3: return 1 elif hue < 4: return (4 - hue) else: return 0 def to_rgb(self): hue = self.h / 60.0 t1 = 1 - self.w - self.b r = self._hue_to_rgb(hue + 2) * t1 + self.w g = self._hue_to_rgb(hue) * t1 + self.w b = self._hue_to_rgb(hue - 2) * t1 + self.w return RGB(r, g, b) class CMYK (Color): def __init__(self, c, m, y, k): self.c = c self.m = m self.y = y self.k = k def to_rgb(self): r = 1.0 - min(1.0, self.c + self.k) g = 1.0 - min(1.0, self.m + self.k) b = 1.0 - min(1.0, self.y + self.k) return RGB(r, g, b) class Gray (Color): def __init__(self, g): self.g = g def to_rgb(self): return RGB(g, g, g) _x11_colors = { 'aliceblue': (240, 248, 255), 'antiquewhite': (250, 235, 215), 'aqua': ( 0, 255, 255), 'aquamarine': (127, 255, 212), 'azure': (240, 255, 255), 'beige': (245, 245, 220), 'bisque': (255, 228, 196), 'black': ( 0, 0, 0), 'blanchedalmond': (255, 235, 205), 'blue': ( 0, 0, 255), 'blueviolet': (138, 43, 226), 'brown': (165, 42, 42), 'burlywood': (222, 184, 135), 'cadetblue': ( 95, 158, 160), 'chartreuse': (127, 255, 0), 'chocolate': (210, 105, 30), 'coral': (255, 127, 80), 'cornflowerblue': (100, 149, 237), 'cornsilk': (255, 248, 220), 'crimson': (220, 20, 60), 'cyan': ( 0, 255, 255), 'darkblue': ( 0, 0, 139), 'darkcyan': ( 0, 139, 139), 'darkgoldenrod': (184, 134, 11), 'darkgray': (169, 169, 169), 'darkgreen': ( 0, 100, 0), 'darkgrey': (169, 169, 169), 'darkkhaki': (189, 183, 107), 'darkmagenta': (139, 0, 139), 'darkolivegreen': ( 85, 107, 47), 'darkorange': (255, 140, 0), 'darkorchid': (153, 50, 204), 'darkred': (139, 0, 0), 'darksalmon': (233, 150, 122), 'darkseagreen': (143, 188, 143), 'darkslateblue': ( 72, 61, 139), 'darkslategray': ( 47, 79, 79), 'darkslategrey': ( 47, 79, 79), 'darkturquoise': ( 0, 206, 209), 'darkviolet': (148, 0, 211), 'deeppink': (255, 20, 147), 'deepskyblue': ( 0, 191, 255), 'dimgray': (105, 105, 105), 'dimgrey': (105, 105, 105), 'dodgerblue': ( 30, 144, 255), 'firebrick': (178, 34, 34), 'floralwhite': (255, 250, 240), 'forestgreen': ( 34, 139, 34), 'fuchsia': (255, 0, 255), 'gainsboro': (220, 220, 220), 'ghostwhite': (248, 248, 255), 'gold': (255, 215, 0), 'goldenrod': (218, 165, 32), 'gray': (128, 128, 128), 'grey': (128, 128, 128), 'green': ( 0, 128, 0), 'greenyellow': (173, 255, 47), 'honeydew': (240, 255, 240), 'hotpink': (255, 105, 180), 'indianred': (205, 92, 92), 'indigo': ( 75, 0, 130), 'ivory': (255, 255, 240), 'khaki': (240, 230, 140), 'lavender': (230, 230, 250), 'lavenderblush': (255, 240, 245), 'lawngreen': (124, 252, 0), 'lemonchiffon': (255, 250, 205), 'lightblue': (173, 216, 230), 'lightcoral': (240, 128, 128), 'lightcyan': (224, 255, 255), 'lightgoldenrodyellow': (250, 250, 210), 'lightgray': (211, 211, 211), 'lightgreen': (144, 238, 144), 'lightgrey': (211, 211, 211), 'lightpink': (255, 182, 193), 'lightsalmon': (255, 160, 122), 'lightseagreen': ( 32, 178, 170), 'lightskyblue': (135, 206, 250), 'lightslategray': (119, 136, 153), 'lightslategrey': (119, 136, 153), 'lightsteelblue': (176, 196, 222), 'lightyellow': (255, 255, 224), 'lime': ( 0, 255, 0), 'limegreen': ( 50, 205, 50), 'linen': (250, 240, 230), 'magenta': (255, 0, 255), 'maroon': (128, 0, 0), 'mediumaquamarine': (102, 205, 170), 'mediumblue': ( 0, 0, 205), 'mediumorchid': (186, 85, 211), 'mediumpurple': (147, 112, 219), 'mediumseagreen': ( 60, 179, 113), 'mediumslateblue': (123, 104, 238), 'mediumspringgreen': ( 0, 250, 154), 'mediumturquoise': ( 72, 209, 204), 'mediumvioletred': (199, 21, 133), 'midnightblue': ( 25, 25, 112), 'mintcream': (245, 255, 250), 'mistyrose': (255, 228, 225), 'moccasin': (255, 228, 181), 'navajowhite': (255, 222, 173), 'navy': ( 0, 0, 128), 'oldlace': (253, 245, 230), 'olive': (128, 128, 0), 'olivedrab': (107, 142, 35), 'orange': (255, 165, 0), 'orangered': (255, 69, 0), 'orchid': (218, 112, 214), 'palegoldenrod': (238, 232, 170), 'palegreen': (152, 251, 152), 'paleturquoise': (175, 238, 238), 'palevioletred': (219, 112, 147), 'papayawhip': (255, 239, 213), 'peachpuff': (255, 218, 185), 'peru': (205, 133, 63), 'pink': (255, 192, 203), 'plum': (221, 160, 221), 'powderblue': (176, 224, 230), 'purple': (128, 0, 128), 'red': (255, 0, 0), 'rosybrown': (188, 143, 143), 'royalblue': ( 65, 105, 225), 'saddlebrown': (139, 69, 19), 'salmon': (250, 128, 114), 'sandybrown': (244, 164, 96), 'seagreen': ( 46, 139, 87), 'seashell': (255, 245, 238), 'sienna': (160, 82, 45), 'silver': (192, 192, 192), 'skyblue': (135, 206, 235), 'slateblue': (106, 90, 205), 'slategray': (112, 128, 144), 'slategrey': (112, 128, 144), 'snow': (255, 250, 250), 'springgreen': ( 0, 255, 127), 'steelblue': ( 70, 130, 180), 'tan': (210, 180, 140), 'teal': ( 0, 128, 128), 'thistle': (216, 191, 216), 'tomato': (255, 99, 71), 'turquoise': ( 64, 224, 208), 'violet': (238, 130, 238), 'wheat': (245, 222, 179), 'white': (255, 255, 255), 'whitesmoke': (245, 245, 245), 'yellow': (255, 255, 0), 'yellowgreen': (154, 205, 50) } _ws_re = re.compile('\s+') _token_re = re.compile('[A-Za-z_][A-Za-z0-9_]*') _hex_re = re.compile('#([0-9a-f]{3}(?:[0-9a-f]{3})?)$') _number_re = re.compile('[0-9]*(\.[0-9]*)') class ColorParser (object): def __init__(self, s): self._string = s self._pos = 0 def skipws(self): m = _ws_re.match(self._string, self._pos) if m: self._pos = m.end(0) def expect(self, s, context=''): if len(self._string) - self._pos < len(s) \ or self._string[self._pos:self._pos + len(s)] != s: raise ValueError('bad color "%s" - expected "%s"%s' % (self._string, s, context)) self._pos += len(s) def expectEnd(self): if self._pos != len(self._string): raise ValueError('junk at end of color "%s"' % self._string) def getToken(self): m = _token_re.match(self._string, self._pos) if m: token = m.group(0) self._pos = m.end(0) return token return None def parseNumber(self, context=''): m = _number_re.match(self._string, self._pos) if m: self._pos = m.end(0) return float(m.group(0)) raise ValueError('bad color "%s" - expected a number%s' % (self._string, context)) def parseColor(self): self.skipws() token = self.getToken() if token: if token == 'rgb': return self.parseRGB() elif token == 'hsl': return self.parseHSL() elif token == 'hwb': return self.parseHWB() elif token == 'cmyk': return self.parseCMYK() elif token == 'gray' or token == 'grey': return self.parseGray() try: r, g, b = _x11_colors[token] except KeyError: raise ValueError('unknown color name "%s"' % token) self.expectEnd() return RGB(r / 255.0, g / 255.0, b / 255.0) m = _hex_re.match(self._string, self._pos) if m: hrgb = m.group(1) if len(hrgb) == 3: r = int('0x' + 2 * hrgb[0], 16) g = int('0x' + 2 * hrgb[1], 16) b = int('0x' + 2 * hrgb[2], 16) else: r = int('0x' + hrgb[0:2], 16) g = int('0x' + hrgb[2:4], 16) b = int('0x' + hrgb[4:6], 16) self._pos = m.end(0) self.skipws() self.expectEnd() return RGB(r / 255.0, g / 255.0, b / 255.0) raise ValueError('bad color syntax "%s"' % self._string) def parseRGB(self): self.expect('(', 'after "rgb"') self.skipws() r = self.parseValue() self.skipws() self.expect(',', 'in "rgb"') self.skipws() g = self.parseValue() self.skipws() self.expect(',', 'in "rgb"') self.skipws() b = self.parseValue() self.skipws() self.expect(')', 'at end of "rgb"') self.skipws() self.expectEnd() return RGB(r, g, b) def parseHSL(self): self.expect('(', 'after "hsl"') self.skipws() h = self.parseAngle() self.skipws() self.expect(',', 'in "hsl"') self.skipws() s = self.parseValue() self.skipws() self.expect(',', 'in "hsl"') self.skipws() l = self.parseValue() self.skipws() self.expect(')', 'at end of "hsl"') self.skipws() self.expectEnd() return HSL(h, s, l) def parseHWB(self): self.expect('(', 'after "hwb"') self.skipws() h = self.parseAngle() self.skipws() self.expect(',', 'in "hwb"') self.skipws() w = self.parseValue() self.skipws() self.expect(',', 'in "hwb"') self.skipws() b = self.parseValue() self.skipws() self.expect(')', 'at end of "hwb"') self.skipws() self.expectEnd() return HWB(h, w, b) def parseCMYK(self): self.expect('(', 'after "cmyk"') self.skipws() c = self.parseValue() self.skipws() self.expect(',', 'in "cmyk"') self.skipws() m = self.parseValue() self.skipws() self.expect(',', 'in "cmyk"') self.skipws() y = self.parseValue() self.skipws() self.expect(',', 'in "cmyk"') self.skipws() k = self.parseValue() self.skipws() self.expect(')', 'at end of "cmyk"') self.skipws() self.expectEnd() return CMYK(c, m, y, k) def parseGray(self): self.expect('(', 'after "gray"') self.skipws() g = self.parseValue() self.skipws() self.expect(')', 'at end of "gray') self.skipws() self.expectEnd() return Gray(g) def parseValue(self): n = self.parseNumber() self.skipws() if self._string[self._pos] == '%': n = n / 100.0 self.pos += 1 return n def parseAngle(self): n = self.parseNumber() self.skipws() tok = self.getToken() if tok == 'rad': n = n * 180.0 / math.pi elif tok == 'grad' or tok == 'gon': n = n * 0.9 elif tok != 'deg': raise ValueError('bad angle unit "%s"' % tok) return n _color_re = re.compile('\s*(#|rgb|hsl|hwb|cmyk|gray|grey|%s)' % '|'.join(_x11_colors.keys())) def isAColor(s): return _color_re.match(s) def parseColor(s): return ColorParser(s).parseColor()
    mit
    Gregory-Howard/spaCy
    spacy/tests/en/test_punct.py
    4
    4576
    # coding: utf-8 """Test that open, closed and paired punctuation is split off correctly.""" from __future__ import unicode_literals import pytest from ...util import compile_prefix_regex from ...language_data import TOKENIZER_PREFIXES PUNCT_OPEN = ['(', '[', '{', '*'] PUNCT_CLOSE = [')', ']', '}', '*'] PUNCT_PAIRED = [('(', ')'), ('[', ']'), ('{', '}'), ('*', '*')] @pytest.mark.parametrize('text', ["(", "((", "<"]) def test_tokenizer_handles_only_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == len(text) @pytest.mark.parametrize('punct', PUNCT_OPEN) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_open_punct(en_tokenizer, punct, text): tokens = en_tokenizer(punct + text) assert len(tokens) == 2 assert tokens[0].text == punct assert tokens[1].text == text @pytest.mark.parametrize('punct', PUNCT_CLOSE) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_close_punct(en_tokenizer, punct, text): tokens = en_tokenizer(text + punct) assert len(tokens) == 2 assert tokens[0].text == text assert tokens[1].text == punct @pytest.mark.parametrize('punct', PUNCT_OPEN) @pytest.mark.parametrize('punct_add', ["`"]) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_two_diff_open_punct(en_tokenizer, punct, punct_add, text): tokens = en_tokenizer(punct + punct_add + text) assert len(tokens) == 3 assert tokens[0].text == punct assert tokens[1].text == punct_add assert tokens[2].text == text @pytest.mark.parametrize('punct', PUNCT_CLOSE) @pytest.mark.parametrize('punct_add', ["'"]) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_two_diff_close_punct(en_tokenizer, punct, punct_add, text): tokens = en_tokenizer(text + punct + punct_add) assert len(tokens) == 3 assert tokens[0].text == text assert tokens[1].text == punct assert tokens[2].text == punct_add @pytest.mark.parametrize('punct', PUNCT_OPEN) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_same_open_punct(en_tokenizer, punct, text): tokens = en_tokenizer(punct + punct + punct + text) assert len(tokens) == 4 assert tokens[0].text == punct assert tokens[3].text == text @pytest.mark.parametrize('punct', PUNCT_CLOSE) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_same_close_punct(en_tokenizer, punct, text): tokens = en_tokenizer(text + punct + punct + punct) assert len(tokens) == 4 assert tokens[0].text == text assert tokens[1].text == punct @pytest.mark.parametrize('text', ["'The"]) def test_tokenizer_splits_open_appostrophe(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == "'" @pytest.mark.parametrize('text', ["Hello''"]) def test_tokenizer_splits_double_end_quote(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 tokens_punct = en_tokenizer("''") assert len(tokens_punct) == 1 @pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_splits_open_close_punct(en_tokenizer, punct_open, punct_close, text): tokens = en_tokenizer(punct_open + text + punct_close) assert len(tokens) == 3 assert tokens[0].text == punct_open assert tokens[1].text == text assert tokens[2].text == punct_close @pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED) @pytest.mark.parametrize('punct_open2,punct_close2', [("`", "'")]) @pytest.mark.parametrize('text', ["Hello"]) def test_tokenizer_two_diff_punct(en_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text): tokens = en_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2) assert len(tokens) == 5 assert tokens[0].text == punct_open2 assert tokens[1].text == punct_open assert tokens[2].text == text assert tokens[3].text == punct_close assert tokens[4].text == punct_close2 @pytest.mark.parametrize('text,punct', [("(can't", "(")]) def test_tokenizer_splits_pre_punct_regex(text, punct): en_search_prefixes = compile_prefix_regex(TOKENIZER_PREFIXES).search match = en_search_prefixes(text) assert match.group() == punct def test_tokenizer_splits_bracket_period(en_tokenizer): text = "(And a 6a.m. run through Washington Park)." tokens = en_tokenizer(text) assert tokens[len(tokens) - 1].text == "."
    mit
    apache/airflow
    airflow/hooks/subprocess.py
    2
    3589
    # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import signal from collections import namedtuple from subprocess import PIPE, STDOUT, Popen from tempfile import TemporaryDirectory, gettempdir from typing import Dict, List, Optional from airflow.hooks.base import BaseHook SubprocessResult = namedtuple('SubprocessResult', ['exit_code', 'output']) class SubprocessHook(BaseHook): """Hook for running processes with the ``subprocess`` module""" def __init__(self) -> None: self.sub_process = None super().__init__() def run_command( self, command: List[str], env: Optional[Dict[str, str]] = None, output_encoding: str = 'utf-8' ) -> SubprocessResult: """ Execute the command in a temporary directory which will be cleaned afterwards If ``env`` is not supplied, ``os.environ`` is passed :param command: the command to run :param env: Optional dict containing environment variables to be made available to the shell environment in which ``command`` will be executed. If omitted, ``os.environ`` will be used. :param output_encoding: encoding to use for decoding stdout :return: :class:`namedtuple` containing ``exit_code`` and ``output``, the last line from stderr or stdout """ self.log.info('Tmp dir root location: \n %s', gettempdir()) with TemporaryDirectory(prefix='airflowtmp') as tmp_dir: def pre_exec(): # Restore default signal disposition and invoke setsid for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): if hasattr(signal, sig): signal.signal(getattr(signal, sig), signal.SIG_DFL) os.setsid() self.log.info('Running command: %s', command) self.sub_process = Popen( command, stdout=PIPE, stderr=STDOUT, cwd=tmp_dir, env=env if env or env == {} else os.environ, preexec_fn=pre_exec, ) self.log.info('Output:') line = '' for raw_line in iter(self.sub_process.stdout.readline, b''): line = raw_line.decode(output_encoding).rstrip() self.log.info("%s", line) self.sub_process.wait() self.log.info('Command exited with return code %s', self.sub_process.returncode) return SubprocessResult(exit_code=self.sub_process.returncode, output=line) def send_sigterm(self): """Sends SIGTERM signal to ``self.sub_process`` if one exists.""" self.log.info('Sending SIGTERM signal to process group') if self.sub_process and hasattr(self.sub_process, 'pid'): os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
    apache-2.0
    mitchrule/Miscellaneous
    Django_Project/django/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
    469
    4196
    import functools from pip._vendor.requests.adapters import HTTPAdapter from .controller import CacheController from .cache import DictCache from .filewrapper import CallbackFileWrapper class CacheControlAdapter(HTTPAdapter): invalidating_methods = set(['PUT', 'DELETE']) def __init__(self, cache=None, cache_etags=True, controller_class=None, serializer=None, heuristic=None, *args, **kw): super(CacheControlAdapter, self).__init__(*args, **kw) self.cache = cache or DictCache() self.heuristic = heuristic controller_factory = controller_class or CacheController self.controller = controller_factory( self.cache, cache_etags=cache_etags, serializer=serializer, ) def send(self, request, **kw): """ Send a request. Use the request information to see if it exists in the cache and cache the response if we need to and can. """ if request.method == 'GET': cached_response = self.controller.cached_request(request) if cached_response: return self.build_response(request, cached_response, from_cache=True) # check for etags and add headers if appropriate request.headers.update( self.controller.conditional_headers(request) ) resp = super(CacheControlAdapter, self).send(request, **kw) return resp def build_response(self, request, response, from_cache=False): """ Build a response by making a request or using the cache. This will end up calling send and returning a potentially cached response """ if not from_cache and request.method == 'GET': # apply any expiration heuristics if response.status == 304: # We must have sent an ETag request. This could mean # that we've been expired already or that we simply # have an etag. In either case, we want to try and # update the cache if that is the case. cached_response = self.controller.update_cached_response( request, response ) if cached_response is not response: from_cache = True # We are done with the server response, read a # possible response body (compliant servers will # not return one, but we cannot be 100% sure) and # release the connection back to the pool. response.read(decode_content=False) response.release_conn() response = cached_response # We always cache the 301 responses elif response.status == 301: self.controller.cache_response(request, response) else: # Check for any heuristics that might update headers # before trying to cache. if self.heuristic: response = self.heuristic.apply(response) # Wrap the response file with a wrapper that will cache the # response when the stream has been consumed. response._fp = CallbackFileWrapper( response._fp, functools.partial( self.controller.cache_response, request, response, ) ) resp = super(CacheControlAdapter, self).build_response( request, response ) # See if we should invalidate the cache. if request.method in self.invalidating_methods and resp.ok: cache_url = self.controller.cache_url(request.url) self.cache.delete(cache_url) # Give the request a from_cache attr to let people use it resp.from_cache = from_cache return resp def close(self): self.cache.close() super(CacheControlAdapter, self).close()
    mit
    jaharkes/home-assistant
    homeassistant/components/switch/mfi.py
    25
    3613
    """ Support for Ubiquiti mFi switches. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.mfi/ """ import logging import requests import voluptuous as vol from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA) from homeassistant.const import ( CONF_HOST, CONF_PORT, CONF_PASSWORD, CONF_USERNAME, CONF_SSL, CONF_VERIFY_SSL) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['mficlient==0.3.0'] _LOGGER = logging.getLogger(__name__) DEFAULT_SSL = True DEFAULT_VERIFY_SSL = True SWITCH_MODELS = [ 'Outlet', 'Output 5v', 'Output 12v', 'Output 24v', ] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_PORT): cv.port, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean, }) # pylint: disable=unused-variable def setup_platform(hass, config, add_devices, discovery_info=None): """Setup mFi sensors.""" host = config.get(CONF_HOST) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) use_tls = config.get(CONF_SSL) verify_tls = config.get(CONF_VERIFY_SSL) default_port = use_tls and 6443 or 6080 port = int(config.get(CONF_PORT, default_port)) from mficlient.client import FailedToLogin, MFiClient try: client = MFiClient(host, username, password, port=port, use_tls=use_tls, verify=verify_tls) except (FailedToLogin, requests.exceptions.ConnectionError) as ex: _LOGGER.error('Unable to connect to mFi: %s', str(ex)) return False add_devices(MfiSwitch(port) for device in client.get_devices() for port in device.ports.values() if port.model in SWITCH_MODELS) class MfiSwitch(SwitchDevice): """Representation of an mFi switch-able device.""" def __init__(self, port): """Initialize the mFi device.""" self._port = port self._target_state = None @property def should_poll(self): """Polling is needed.""" return True @property def unique_id(self): """Return the unique ID of the device.""" return self._port.ident @property def name(self): """Return the name of the device.""" return self._port.label @property def is_on(self): """Return true if the device is on.""" return self._port.output def update(self): """Get the latest state and update the state.""" self._port.refresh() if self._target_state is not None: self._port.data['output'] = float(self._target_state) self._target_state = None def turn_on(self): """Turn the switch on.""" self._port.control(True) self._target_state = True def turn_off(self): """Turn the switch off.""" self._port.control(False) self._target_state = False @property def current_power_mwh(self): """Return the current power usage in mWh.""" return int(self._port.data.get('active_pwr', 0) * 1000) @property def device_state_attributes(self): """Return the state attributes fof the device.""" attr = {} attr['volts'] = round(self._port.data.get('v_rms', 0), 1) attr['amps'] = round(self._port.data.get('i_rms', 0), 1) return attr
    mit
    fernandopinhati/oppia
    core/platform/users/gae_current_user_services.py
    30
    2532
    # coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a seam for user-related services.""" __author__ = 'Sean Lip' import feconf import logging import utils from google.appengine.api import users from google.appengine.ext import ndb def create_login_url(slug): """Creates a login url.""" return users.create_login_url(utils.set_url_query_parameter( feconf.SIGNUP_URL, 'return_url', slug)) def create_logout_url(slug): """Creates a logout url.""" logout_url = utils.set_url_query_parameter('/logout', 'return_url', slug) return logout_url def get_current_user(request): """Returns the current user.""" return users.get_current_user() def is_super_admin(user_id, request): """Checks whether the user with the given user_id owns this app. For GAE, the user in question is also required to be the current user. """ user = users.get_current_user() if user is None: return False return user.user_id() == user_id and users.is_current_user_admin() def get_user_id_from_email(email): """Given an email address, returns a user id. Returns None if the email address does not correspond to a valid user id. """ class _FakeUser(ndb.Model): _use_memcache = False _use_cache = False user = ndb.UserProperty(required=True) try: u = users.User(email) except users.UserNotFoundError: logging.error( 'The email address %s does not correspond to a valid user_id' % email) return None key = _FakeUser(id=email, user=u).put() obj = _FakeUser.get_by_id(key.id()) user_id = obj.user.user_id() if user_id: return unicode(user_id) else: return None def get_user_id(user): """ Given an user object, get the user id. """ return user.user_id() def get_user_email(user): """ Given an user object, get the user's email. """ return user.email()
    apache-2.0
    tammoippen/nest-simulator
    pynest/nest/tests/test_connect_pairwise_bernoulli.py
    11
    3393
    # -*- coding: utf-8 -*- # # test_connect_pairwise_bernoulli.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. import numpy as np import unittest import scipy.stats from . import test_connect_helpers as hf from .test_connect_parameters import TestParams class TestPairwiseBernoulli(TestParams): # specify connection pattern and specific params rule = 'pairwise_bernoulli' p = 0.5 conn_dict = {'rule': rule, 'p': p} # sizes of source-, target-population and connection probability for # statistical test N_s = 50 N_t = 50 # Critical values and number of iterations of two level test stat_dict = {'alpha2': 0.05, 'n_runs': 20} def testStatistics(self): for fan in ['in', 'out']: expected = hf.get_expected_degrees_bernoulli( self.p, fan, self.N_s, self.N_t) pvalues = [] for i in range(self.stat_dict['n_runs']): hf.reset_seed(i, self.nr_threads) self.setUpNetwork(conn_dict=self.conn_dict, N1=self.N_s, N2=self.N_t) degrees = hf.get_degrees(fan, self.pop1, self.pop2) degrees = hf.gather_data(degrees) # degrees = self.comm.gather(degrees, root=0) # if self.rank == 0: if degrees is not None: chi, p = hf.chi_squared_check(degrees, expected, self.rule) pvalues.append(p) hf.mpi_barrier() if degrees is not None: ks, p = scipy.stats.kstest(pvalues, 'uniform') self.assertTrue(p > self.stat_dict['alpha2']) def testAutapses(self): conn_params = self.conn_dict.copy() N = 10 conn_params['multapses'] = False # test that autapses exist conn_params['p'] = 1. conn_params['autapses'] = True pop = hf.nest.Create('iaf_neuron', N) hf.nest.Connect(pop, pop, conn_params) # make sure all connections do exist M = hf.get_connectivity_matrix(pop, pop) hf.mpi_assert(np.diag(M), np.ones(N), self) hf.nest.ResetKernel() # test that autapses were excluded conn_params['p'] = 1. conn_params['autapses'] = False pop = hf.nest.Create('iaf_neuron', N) hf.nest.Connect(pop, pop, conn_params) # make sure all connections do exist M = hf.get_connectivity_matrix(pop, pop) hf.mpi_assert(np.diag(M), np.zeros(N), self) def suite(): suite = unittest.TestLoader().loadTestsFromTestCase(TestPairwiseBernoulli) return suite def run(): runner = unittest.TextTestRunner(verbosity=2) runner.run(suite()) if __name__ == '__main__': run()
    gpl-2.0
    trankmichael/scikit-learn
    sklearn/datasets/svmlight_format.py
    114
    15826
    """This module implements a loader and dumper for the svmlight format This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. """ # Authors: Mathieu Blondel <[email protected]> # Lars Buitinck <[email protected]> # Olivier Grisel <[email protected]> # License: BSD 3 clause from contextlib import closing import io import os.path import numpy as np import scipy.sparse as sp from ._svmlight_format import _load_svmlight_file from .. import __version__ from ..externals import six from ..externals.six import u, b from ..externals.six.moves import range, zip from ..utils import check_array from ..utils.fixes import frombuffer_empty def load_svmlight_file(f, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load datasets in the svmlight / libsvm format into sparse CSR matrix This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When working on repeatedly on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. This implementation is written in Cython and is reasonably fast. However, a faster API-compatible loader is also available at: https://github.com/mblondel/svmlight-loader Parameters ---------- f : {str, file-like, int} (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. A file-like or file descriptor will not be closed by this function. A file-like object must be opened in binary mode. n_features : int or None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have examples of every feature, hence the inferred shape might vary from one slice to another. multilabel : boolean, optional, default False Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based : boolean or "auto", optional, default "auto" Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id : boolean, default False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- X: scipy.sparse matrix of shape (n_samples, n_features) y: ndarray of shape (n_samples,), or, in the multilabel a list of tuples of length n_samples. query_id: array of shape (n_samples,) query_id for each sample. Only returned when query_id is set to True. See also -------- load_svmlight_files: similar function for loading multiple files in this format, enforcing the same number of features/columns on all of them. Examples -------- To use joblib.Memory to cache the svmlight file:: from sklearn.externals.joblib import Memory from sklearn.datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache def get_data(): data = load_svmlight_file("mysvmlightfile") return data[0], data[1] X, y = get_data() """ return tuple(load_svmlight_files([f], n_features, dtype, multilabel, zero_based, query_id)) def _gen_open(f): if isinstance(f, int): # file descriptor return io.open(f, "rb", closefd=False) elif not isinstance(f, six.string_types): raise TypeError("expected {str, int, file-like}, got %s" % type(f)) _, ext = os.path.splitext(f) if ext == ".gz": import gzip return gzip.open(f, "rb") elif ext == ".bz2": from bz2 import BZ2File return BZ2File(f, "rb") else: return open(f, "rb") def _open_and_load(f, dtype, multilabel, zero_based, query_id): if hasattr(f, "read"): actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # XXX remove closing when Python 2.7+/3.1+ required else: with closing(_gen_open(f)) as f: actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # convert from array.array, give data the right dtype if not multilabel: labels = frombuffer_empty(labels, np.float64) data = frombuffer_empty(data, actual_dtype) indices = frombuffer_empty(ind, np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) # never empty query = frombuffer_empty(query, np.intc) data = np.asarray(data, dtype=dtype) # no-op for float{32,64} return data, indices, indptr, labels, query def load_svmlight_files(files, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load dataset from multiple files in SVMlight format This function is equivalent to mapping load_svmlight_file over a list of files, except that the results are concatenated into a single, flat list and the samples vectors are constrained to all have the same number of features. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. Parameters ---------- files : iterable over {str, file-like, int} (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. File-likes and file descriptors will not be closed by this function. File-like objects must be opened in binary mode. n_features: int or None The number of features to use. If None, it will be inferred from the maximum column index occurring in any of the files. This can be set to a higher value than the actual number of features in any of the input files, but setting it to a lower value will cause an exception to be raised. multilabel: boolean, optional Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based: boolean or "auto", optional Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id: boolean, defaults to False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- [X1, y1, ..., Xn, yn] where each (Xi, yi) pair is the result from load_svmlight_file(files[i]). If query_id is set to True, this will return instead [X1, y1, q1, ..., Xn, yn, qn] where (Xi, yi, qi) is the result from load_svmlight_file(files[i]) Notes ----- When fitting a model to a matrix X_train and evaluating it against a matrix X_test, it is essential that X_train and X_test have the same number of features (X_train.shape[1] == X_test.shape[1]). This may not be the case if you load the files individually with load_svmlight_file. See also -------- load_svmlight_file """ r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id)) for f in files] if (zero_based is False or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)): for ind in r: indices = ind[1] indices -= 1 n_f = max(ind[1].max() for ind in r) + 1 if n_features is None: n_features = n_f elif n_features < n_f: raise ValueError("n_features was set to {}," " but input file contains {} features" .format(n_features, n_f)) result = [] for data, indices, indptr, y, query_values in r: shape = (indptr.shape[0] - 1, n_features) X = sp.csr_matrix((data, indices, indptr), shape) X.sort_indices() result += X, y if query_id: result.append(query_values) return result def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): is_sp = int(hasattr(X, "tocsr")) if X.dtype.kind == 'i': value_pattern = u("%d:%d") else: value_pattern = u("%d:%.16g") if y.dtype.kind == 'i': label_pattern = u("%d") else: label_pattern = u("%.16g") line_pattern = u("%s") if query_id is not None: line_pattern += u(" qid:%d") line_pattern += u(" %s\n") if comment: f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__)) f.write(b("# Column indices are %s-based\n" % ["zero", "one"][one_based])) f.write(b("#\n")) f.writelines(b("# %s\n" % line) for line in comment.splitlines()) for i in range(X.shape[0]): if is_sp: span = slice(X.indptr[i], X.indptr[i + 1]) row = zip(X.indices[span], X.data[span]) else: nz = X[i] != 0 row = zip(np.where(nz)[0], X[i, nz]) s = " ".join(value_pattern % (j + one_based, x) for j, x in row) if multilabel: nz_labels = np.where(y[i] != 0)[0] labels_str = ",".join(label_pattern % j for j in nz_labels) else: labels_str = label_pattern % y[i] if query_id is not None: feat = (labels_str, query_id[i], s) else: feat = (labels_str, s) f.write((line_pattern % feat).encode('ascii')) def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None, multilabel=False): """Dump the dataset in svmlight / libsvm file format. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. f : string or file-like in binary mode If string, specifies the path that will contain the data. If file-like, data will be written to f. f should be opened in binary mode. zero_based : boolean, optional Whether column indices should be written zero-based (True) or one-based (False). comment : string, optional Comment to insert at the top of the file. This should be either a Unicode string, which will be encoded as UTF-8, or an ASCII byte string. If a comment is given, then it will be preceded by one that identifies the file as having been dumped by scikit-learn. Note that not all tools grok comments in SVMlight files. query_id : array-like, shape = [n_samples] Array containing pairwise preference constraints (qid in svmlight format). multilabel: boolean, optional Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) """ if comment is not None: # Convert comment string to list of lines in UTF-8. # If a byte string is passed, then check whether it's ASCII; # if a user wants to get fancy, they'll have to decode themselves. # Avoid mention of str and unicode types for Python 3.x compat. if isinstance(comment, bytes): comment.decode("ascii") # just for the exception else: comment = comment.encode("utf-8") if six.b("\0") in comment: raise ValueError("comment string contains NUL byte") y = np.asarray(y) if y.ndim != 1 and not multilabel: raise ValueError("expected y of shape (n_samples,), got %r" % (y.shape,)) Xval = check_array(X, accept_sparse='csr') if Xval.shape[0] != y.shape[0]: raise ValueError("X.shape[0] and y.shape[0] should be the same, got" " %r and %r instead." % (Xval.shape[0], y.shape[0])) # We had some issues with CSR matrices with unsorted indices (e.g. #1501), # so sort them here, but first make sure we don't modify the user's X. # TODO We can do this cheaper; sorted_indices copies the whole matrix. if Xval is X and hasattr(Xval, "sorted_indices"): X = Xval.sorted_indices() else: X = Xval if hasattr(X, "sort_indices"): X.sort_indices() if query_id is not None: query_id = np.asarray(query_id) if query_id.shape[0] != y.shape[0]: raise ValueError("expected query_id of shape (n_samples,), got %r" % (query_id.shape,)) one_based = not zero_based if hasattr(f, "write"): _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id) else: with open(f, "wb") as f: _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
    bsd-3-clause
    ubiar/odoo
    addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/logreport.py
    386
    1736
    # -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import tempfile LOG_DEBUG='debug' LOG_INFO='info' LOG_WARNING='warn' LOG_ERROR='error' LOG_CRITICAL='critical' _logger = logging.getLogger(__name__) def log_detail(self): import os logfile_name = os.path.join(tempfile.gettempdir(), "openerp_report_designer.log") hdlr = logging.FileHandler(logfile_name) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) _logger.addHandler(hdlr) _logger.setLevel(logging.INFO) class Logger(object): def log_write(self, name, level, msg): getattr(_logger,level)(msg) def shutdown(self): logging.shutdown() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
    agpl-3.0
    noinil/pinang
    share/figures/energy_function/native.py
    1
    1420
    #!/usr/bin/env python from pylab import * figure(figsize=(8,5), dpi=80) subplot(111) X = np.linspace(0.5, 2, 256,endpoint=False) C = 5*pow((1/X),12) - 6*pow((1/X),10) # C1 = 5*pow((0.5/X),12) - 6*pow((0.5/X),10) A = 5*pow((1/X),12) R = - 6*pow((1/X),10) plot(X, C, color="green", linewidth=2.5, linestyle="-", label=r"$V_{native} = \varepsilon [5(\frac{\sigma}{r})^{12} - 6(\frac{\sigma}{r})^{10} ]$") plot(X, A, color="blue", linewidth=1.5, linestyle="--", label=r"$V_{repulsion} = 5 \varepsilon (\frac{\sigma}{r})^{12}$") plot(X, R, color="red", linewidth=1.5, linestyle="--", label=r"$V_{attraction} = - 6 \varepsilon (\frac{\sigma}{r})^{10}$") # plot(X, C1, color="black", linewidth=1, linestyle="-.", # label=r"$V_{native} = \varepsilon [5(\frac{\sigma}{r})^{12} - 6(\frac{\sigma}{r})^{10} ]$") ax = gca() ax.spines['right'].set_color('none') ax.spines['bottom'].set_color('none') ax.xaxis.set_ticks_position('top') ax.spines['top'].set_position(('data',0)) ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0.8)) xlim(0.6, X.max()*1.01) xticks([0.8,1], [r'$0.8\sigma$', r'$\sigma$']) ylim(-3,5) yticks([-1], [r'$-\varepsilon$']) t = 1 plot([t,t],[0,-1], color ='green', linewidth=1., linestyle="--") plot([0.8,t],[-1,-1], color ='green', linewidth=1., linestyle="--") legend(loc='upper right') savefig("native.svg", dpi=72) show()
    gpl-2.0
    shivam1111/odoo
    addons/hr_holidays/tests/common.py
    389
    4347
    # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests import common class TestHrHolidaysBase(common.TransactionCase): def setUp(self): super(TestHrHolidaysBase, self).setUp() cr, uid = self.cr, self.uid # Usefull models self.hr_employee = self.registry('hr.employee') self.hr_holidays = self.registry('hr.holidays') self.hr_holidays_status = self.registry('hr.holidays.status') self.res_users = self.registry('res.users') self.res_partner = self.registry('res.partner') # Find Employee group group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user') self.group_employee_id = group_employee_ref and group_employee_ref[1] or False # Find Hr User group group_hr_user_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_user') self.group_hr_user_ref_id = group_hr_user_ref and group_hr_user_ref[1] or False # Find Hr Manager group group_hr_manager_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager') self.group_hr_manager_ref_id = group_hr_manager_ref and group_hr_manager_ref[1] or False # Test partners to use through the various tests self.hr_partner_id = self.res_partner.create(cr, uid, { 'name': 'Gertrude AgrolaitPartner', 'email': '[email protected]', }) self.email_partner_id = self.res_partner.create(cr, uid, { 'name': 'Patrick Ratatouille', 'email': '[email protected]', }) # Test users to use through the various tests self.user_hruser_id = self.res_users.create(cr, uid, { 'name': 'Armande HrUser', 'login': 'Armande', 'alias_name': 'armande', 'email': '[email protected]', 'groups_id': [(6, 0, [self.group_employee_id, self.group_hr_user_ref_id])] }, {'no_reset_password': True}) self.user_hrmanager_id = self.res_users.create(cr, uid, { 'name': 'Bastien HrManager', 'login': 'bastien', 'alias_name': 'bastien', 'email': '[email protected]', 'groups_id': [(6, 0, [self.group_employee_id, self.group_hr_manager_ref_id])] }, {'no_reset_password': True}) self.user_none_id = self.res_users.create(cr, uid, { 'name': 'Charlie Avotbonkeur', 'login': 'charlie', 'alias_name': 'charlie', 'email': '[email protected]', 'groups_id': [(6, 0, [])] }, {'no_reset_password': True}) self.user_employee_id = self.res_users.create(cr, uid, { 'name': 'David Employee', 'login': 'david', 'alias_name': 'david', 'email': '[email protected]', 'groups_id': [(6, 0, [self.group_employee_id])] }, {'no_reset_password': True}) # Hr Data self.employee_emp_id = self.hr_employee.create(cr, uid, { 'name': 'David Employee', 'user_id': self.user_employee_id, }) self.employee_hruser_id = self.hr_employee.create(cr, uid, { 'name': 'Armande HrUser', 'user_id': self.user_hruser_id, })
    agpl-3.0
    2014c2g4/2015cda0623
    static/Brython3.1.0-20150301-090019/Lib/browser/markdown.py
    623
    13060
    # -*- coding: utf-8 -*- try: import _jsre as re except: import re import random import time letters = 'abcdefghijklmnopqrstuvwxyz' letters += letters.upper()+'0123456789' class URL: def __init__(self,src): elts = src.split(maxsplit=1) self.href = elts[0] self.alt = '' if len(elts)==2: alt = elts[1] if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1] elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1] elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1] class CodeBlock: def __init__(self,line): self.lines = [line] if line.startswith("```") and len(line)>3: self.info = line[3:] else: self.info = None def to_html(self): if self.lines[0].startswith("`"): self.lines.pop(0) res = escape('\n'.join(self.lines)) res = unmark(res) _class = self.info or "marked" res = '<pre class="%s">%s</pre>\n' %(_class, res) return res,[] class HtmlBlock: def __init__(self, src): self.src = src def to_html(self): return self.src class Marked: def __init__(self, line=''): self.line = line self.children = [] def to_html(self): return apply_markdown(self.line) # get references refs = {} ref_pattern = r"^\[(.*)\]:\s+(.*)" def mark(src): global refs t0 = time.time() refs = {} # split source in sections # sections can be : # - a block-level HTML element (markdown syntax will not be processed) # - a script # - a span-level HTML tag (markdown syntax will be processed) # - a code block # normalise line feeds src = src.replace('\r\n','\n') # lines followed by dashes src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src) src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src) lines = src.split('\n')+[''] i = bq = 0 ul = ol = 0 while i<len(lines): # enclose lines starting by > in a blockquote if lines[i].startswith('>'): nb = 1 while nb<len(lines[i]) and lines[i][nb]=='>': nb += 1 lines[i] = lines[i][nb:] if nb>bq: lines.insert(i,'<blockquote>'*(nb-bq)) i += 1 bq = nb elif nb<bq: lines.insert(i,'</blockquote>'*(bq-nb)) i += 1 bq = nb elif bq>0: lines.insert(i,'</blockquote>'*bq) i += 1 bq = 0 # unordered lists if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \ and len(lines[i].lstrip())>1 \ and lines[i].lstrip()[1]==' ' \ and (i==0 or ul or not lines[i-1].strip()): # line indentation indicates nesting level nb = 1+len(lines[i])-len(lines[i].lstrip()) lines[i] = '<li>'+lines[i][nb:] if nb>ul: lines.insert(i,'<ul>'*(nb-ul)) i += 1 elif nb<ul: lines.insert(i,'</ul>'*(ul-nb)) i += 1 ul = nb elif ul and not lines[i].strip(): if i<len(lines)-1 and lines[i+1].strip() \ and not lines[i+1].startswith(' '): nline = lines[i+1].lstrip() if nline[0] in '-+*' and len(nline)>1 and nline[1]==' ': pass else: lines.insert(i,'</ul>'*ul) i += 1 ul = 0 # ordered lists mo = re.search(r'^(\d+\.)',lines[i]) if mo: if not ol: lines.insert(i,'<ol>') i += 1 lines[i] = '<li>'+lines[i][len(mo.groups()[0]):] ol = 1 elif ol and not lines[i].strip() and i<len(lines)-1 \ and not lines[i+1].startswith(' ') \ and not re.search(r'^(\d+\.)',lines[i+1]): lines.insert(i,'</ol>') i += 1 ol = 0 i += 1 if ul: lines.append('</ul>'*ul) if ol: lines.append('</ol>'*ol) if bq: lines.append('</blockquote>'*bq) t1 = time.time() #print('part 1', t1-t0) sections = [] scripts = [] section = Marked() i = 0 while i<len(lines): line = lines[i] if line.strip() and line.startswith(' '): if isinstance(section,Marked) and section.line: sections.append(section) section = CodeBlock(line[4:]) j = i+1 while j<len(lines) and lines[j].startswith(' '): section.lines.append(lines[j][4:]) j += 1 sections.append(section) section = Marked() i = j continue elif line.strip() and line.startswith("```"): # fenced code blocks à la Github Flavoured Markdown if isinstance(section,Marked) and section.line: sections.append(section) section = CodeBlock(line) j = i+1 while j<len(lines) and not lines[j].startswith("```"): section.lines.append(lines[j]) j += 1 sections.append(section) section = Marked() i = j+1 continue elif line.lower().startswith('<script'): if isinstance(section,Marked) and section.line: sections.append(section) section = Marked() j = i+1 while j<len(lines): if lines[j].lower().startswith('</script>'): scripts.append('\n'.join(lines[i+1:j])) for k in range(i,j+1): lines[k] = '' break j += 1 i = j continue # atext header elif line.startswith('#'): level = 1 line = lines[i] while level<len(line) and line[level]=='#' and level<=6: level += 1 if not line[level+1:].strip(): if level==1: i += 1 continue else: lines[i] = '<H%s>%s</H%s>\n' %(level-1,'#',level-1) else: lines[i] = '<H%s>%s</H%s>\n' %(level,line[level+1:],level) else: mo = re.search(ref_pattern,line) if mo is not None: if isinstance(section,Marked) and section.line: sections.append(section) section = Marked() key = mo.groups()[0] value = URL(mo.groups()[1]) refs[key.lower()] = value else: if not line.strip(): line = '<p></p>' if section.line: section.line += '\n' section.line += line i += 1 t2 = time.time() #print('section 2', t2-t1) if isinstance(section,Marked) and section.line: sections.append(section) res = '' for section in sections: mk,_scripts = section.to_html() res += mk scripts += _scripts #print('end mark', time.time()-t2) return res,scripts def escape(czone): czone = czone.replace('&','&amp;') czone = czone.replace('<','&lt;') czone = czone.replace('>','&gt;') czone = czone.replace('_','&#95;') czone = czone.replace('*','&#42;') return czone def s_escape(mo): # used in re.sub czone = mo.string[mo.start():mo.end()] return escape(czone) def unmark(code_zone): # convert _ to &#95; inside inline code code_zone = code_zone.replace('_','&#95;') return code_zone def s_unmark(mo): # convert _ to &#95; inside inline code code_zone = mo.string[mo.start():mo.end()] code_zone = code_zone.replace('_','&#95;') return code_zone def apply_markdown(src): scripts = [] key = None t0 = time.time() i = 0 while i<len(src): if src[i]=='[': start_a = i+1 while True: end_a = src.find(']',i) if end_a == -1: break if src[end_a-1]=='\\': i = end_a+1 else: break if end_a>-1 and src[start_a:end_a].find('\n')==-1: link = src[start_a:end_a] rest = src[end_a+1:].lstrip() if rest and rest[0]=='(': j = 0 while True: end_href = rest.find(')',j) if end_href == -1: break if rest[end_href-1]=='\\': j = end_href+1 else: break if end_href>-1 and rest[:end_href].find('\n')==-1: tag = '<a href="'+rest[1:end_href]+'">'+link+'</a>' src = src[:start_a-1]+tag+rest[end_href+1:] i = start_a+len(tag) elif rest and rest[0]=='[': j = 0 while True: end_key = rest.find(']',j) if end_key == -1: break if rest[end_key-1]=='\\': j = end_key+1 else: break if end_key>-1 and rest[:end_key].find('\n')==-1: if not key: key = link if key.lower() not in refs: raise KeyError('unknown reference %s' %key) url = refs[key.lower()] tag = '<a href="'+url+'">'+link+'</a>' src = src[:start_a-1]+tag+rest[end_key+1:] i = start_a+len(tag) i += 1 t1 = time.time() #print('apply markdown 1', t1-t0) # before applying the markup with _ and *, isolate HTML tags because # they can contain these characters # We replace them temporarily by a random string rstr = ''.join(random.choice(letters) for i in range(16)) i = 0 state = None start = -1 data = '' tags = [] while i<len(src): if src[i]=='<': j = i+1 while j<len(src): if src[j]=='"' or src[j]=="'": if state==src[j] and src[j-1]!='\\': state = None j = start+len(data)+1 data = '' elif state==None: state = src[j] start = j else: data += src[j] elif src[j]=='>' and state is None: tags.append(src[i:j+1]) src = src[:i]+rstr+src[j+1:] i += len(rstr) break elif state=='"' or state=="'": data += src[j] elif src[j]=='\n': # if a sign < is not followed by > in the same ligne, it # is the sign "lesser than" src = src[:i]+'&lt;'+src[i+1:] j=i+4 break j += 1 elif src[i]=='`' and i>0 and src[i-1]!='\\': # ignore the content of inline code j = i+1 while j<len(src): if src[j]=='`' and src[j-1]!='\\': break j += 1 i = j i += 1 t2 = time.time() #print('apply markdown 2', len(src), t2-t1) # escape "<", ">", "&" and "_" in inline code code_pattern = r'\`(.*?)\`' src = re.sub(code_pattern,s_escape,src) # replace escaped ` _ * by HTML characters src = src.replace(r'\\`','&#96;') src = src.replace(r'\_','&#95;') src = src.replace(r'\*','&#42;') # emphasis strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')] for tag,strong_pattern in strong_patterns: src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src) em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')] for tag,em_pattern in em_patterns: src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src) # inline code code_pattern = r'\`(.*?)\`' src = re.sub(code_pattern,r'<code>\1</code>',src) # restore tags while True: pos = src.rfind(rstr) if pos==-1: break repl = tags.pop() src = src[:pos]+repl+src[pos+len(rstr):] src = '<p>'+src+'</p>' t3 = time.time() #print('apply markdown 3', t3-t2) return src,scripts
    gpl-3.0
    Fujin-Suzukaze/GT-I9505-Kernel-JB-4.3
    tools/perf/scripts/python/futex-contention.py
    11261
    1486
    # futex contention # (c) 2010, Arnaldo Carvalho de Melo <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
    gpl-2.0
    gpkulkarni/linux-arm64
    scripts/checkkconfigsymbols.py
    88
    15783
    #!/usr/bin/env python2 """Find Kconfig symbols that are referenced but not defined.""" # (c) 2014-2015 Valentin Rothberg <[email protected]> # (c) 2014 Stefan Hengelein <[email protected]> # # Licensed under the terms of the GNU GPL License version 2 import difflib import os import re import signal import sys from multiprocessing import Pool, cpu_count from optparse import OptionParser from subprocess import Popen, PIPE, STDOUT # regex expressions OPERATORS = r"&|\(|\)|\||\!" FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}" DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*" EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+" DEFAULT = r"default\s+.*?(?:if\s.+){,1}" STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")" # regex objects REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)') REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$") REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") REGEX_QUOTES = re.compile("(\"(.*?)\")") def parse_options(): """The user interface of this module.""" usage = "%prog [options]\n\n" \ "Run this tool to detect Kconfig symbols that are referenced but " \ "not defined in\nKconfig. The output of this tool has the " \ "format \'Undefined symbol\\tFile list\'\n\n" \ "If no option is specified, %prog will default to check your\n" \ "current tree. Please note that specifying commits will " \ "\'git reset --hard\'\nyour current tree! You may save " \ "uncommitted changes to avoid losing data." parser = OptionParser(usage=usage) parser.add_option('-c', '--commit', dest='commit', action='store', default="", help="Check if the specified commit (hash) introduces " "undefined Kconfig symbols.") parser.add_option('-d', '--diff', dest='diff', action='store', default="", help="Diff undefined symbols between two commits. The " "input format bases on Git log's " "\'commmit1..commit2\'.") parser.add_option('-f', '--find', dest='find', action='store_true', default=False, help="Find and show commits that may cause symbols to be " "missing. Required to run with --diff.") parser.add_option('-i', '--ignore', dest='ignore', action='store', default="", help="Ignore files matching this pattern. Note that " "the pattern needs to be a Python regex. To " "ignore defconfigs, specify -i '.*defconfig'.") parser.add_option('-s', '--sim', dest='sim', action='store', default="", help="Print a list of maximum 10 string-similar symbols.") parser.add_option('', '--force', dest='force', action='store_true', default=False, help="Reset current Git tree even when it's dirty.") (opts, _) = parser.parse_args() if opts.commit and opts.diff: sys.exit("Please specify only one option at once.") if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff): sys.exit("Please specify valid input in the following format: " "\'commmit1..commit2\'") if opts.commit or opts.diff: if not opts.force and tree_is_dirty(): sys.exit("The current Git tree is dirty (see 'git status'). " "Running this script may\ndelete important data since it " "calls 'git reset --hard' for some performance\nreasons. " " Please run this script in a clean Git tree or pass " "'--force' if you\nwant to ignore this warning and " "continue.") if opts.commit: opts.find = False if opts.ignore: try: re.match(opts.ignore, "this/is/just/a/test.c") except: sys.exit("Please specify a valid Python regex.") return opts def main(): """Main function of this module.""" opts = parse_options() if opts.sim and not opts.commit and not opts.diff: sims = find_sims(opts.sim, opts.ignore) if sims: print "%s: %s" % (yel("Similar symbols"), ', '.join(sims)) else: print "%s: no similar symbols found" % yel("Similar symbols") sys.exit(0) # dictionary of (un)defined symbols defined = {} undefined = {} if opts.commit or opts.diff: head = get_head() # get commit range commit_a = None commit_b = None if opts.commit: commit_a = opts.commit + "~" commit_b = opts.commit elif opts.diff: split = opts.diff.split("..") commit_a = split[0] commit_b = split[1] undefined_a = {} undefined_b = {} # get undefined items before the commit execute("git reset --hard %s" % commit_a) undefined_a, _ = check_symbols(opts.ignore) # get undefined items for the commit execute("git reset --hard %s" % commit_b) undefined_b, defined = check_symbols(opts.ignore) # report cases that are present for the commit but not before for feature in sorted(undefined_b): # feature has not been undefined before if not feature in undefined_a: files = sorted(undefined_b.get(feature)) undefined[feature] = files # check if there are new files that reference the undefined feature else: files = sorted(undefined_b.get(feature) - undefined_a.get(feature)) if files: undefined[feature] = files # reset to head execute("git reset --hard %s" % head) # default to check the entire tree else: undefined, defined = check_symbols(opts.ignore) # now print the output for feature in sorted(undefined): print red(feature) files = sorted(undefined.get(feature)) print "%s: %s" % (yel("Referencing files"), ", ".join(files)) sims = find_sims(feature, opts.ignore, defined) sims_out = yel("Similar symbols") if sims: print "%s: %s" % (sims_out, ', '.join(sims)) else: print "%s: %s" % (sims_out, "no similar symbols found") if opts.find: print "%s:" % yel("Commits changing symbol") commits = find_commits(feature, opts.diff) if commits: for commit in commits: commit = commit.split(" ", 1) print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1]) else: print "\t- no commit found" print # new line def yel(string): """ Color %string yellow. """ return "\033[33m%s\033[0m" % string def red(string): """ Color %string red. """ return "\033[31m%s\033[0m" % string def execute(cmd): """Execute %cmd and return stdout. Exit in case of error.""" pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) (stdout, _) = pop.communicate() # wait until finished if pop.returncode != 0: sys.exit(stdout) return stdout def find_commits(symbol, diff): """Find commits changing %symbol in the given range of %diff.""" commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s" % (symbol, diff)) return [x for x in commits.split("\n") if x] def tree_is_dirty(): """Return true if the current working tree is dirty (i.e., if any file has been added, deleted, modified, renamed or copied but not committed).""" stdout = execute("git status --porcelain") for line in stdout: if re.findall(r"[URMADC]{1}", line[:2]): return True return False def get_head(): """Return commit hash of current HEAD.""" stdout = execute("git rev-parse HEAD") return stdout.strip('\n') def partition(lst, size): """Partition list @lst into eveni-sized lists of size @size.""" return [lst[i::size] for i in xrange(size)] def init_worker(): """Set signal handler to ignore SIGINT.""" signal.signal(signal.SIGINT, signal.SIG_IGN) def find_sims(symbol, ignore, defined = []): """Return a list of max. ten Kconfig symbols that are string-similar to @symbol.""" if defined: return sorted(difflib.get_close_matches(symbol, set(defined), 10)) pool = Pool(cpu_count(), init_worker) kfiles = [] for gitfile in get_files(): if REGEX_FILE_KCONFIG.match(gitfile): kfiles.append(gitfile) arglist = [] for part in partition(kfiles, cpu_count()): arglist.append((part, ignore)) for res in pool.map(parse_kconfig_files, arglist): defined.extend(res[0]) return sorted(difflib.get_close_matches(symbol, set(defined), 10)) def get_files(): """Return a list of all files in the current git directory.""" # use 'git ls-files' to get the worklist stdout = execute("git ls-files") if len(stdout) > 0 and stdout[-1] == "\n": stdout = stdout[:-1] files = [] for gitfile in stdout.rsplit("\n"): if ".git" in gitfile or "ChangeLog" in gitfile or \ ".log" in gitfile or os.path.isdir(gitfile) or \ gitfile.startswith("tools/"): continue files.append(gitfile) return files def check_symbols(ignore): """Find undefined Kconfig symbols and return a dict with the symbol as key and a list of referencing files as value. Files matching %ignore are not checked for undefined symbols.""" pool = Pool(cpu_count(), init_worker) try: return check_symbols_helper(pool, ignore) except KeyboardInterrupt: pool.terminate() pool.join() sys.exit(1) def check_symbols_helper(pool, ignore): """Helper method for check_symbols(). Used to catch keyboard interrupts in check_symbols() in order to properly terminate running worker processes.""" source_files = [] kconfig_files = [] defined_features = [] referenced_features = dict() # {file: [features]} for gitfile in get_files(): if REGEX_FILE_KCONFIG.match(gitfile): kconfig_files.append(gitfile) else: if ignore and not re.match(ignore, gitfile): continue # add source files that do not match the ignore pattern source_files.append(gitfile) # parse source files arglist = partition(source_files, cpu_count()) for res in pool.map(parse_source_files, arglist): referenced_features.update(res) # parse kconfig files arglist = [] for part in partition(kconfig_files, cpu_count()): arglist.append((part, ignore)) for res in pool.map(parse_kconfig_files, arglist): defined_features.extend(res[0]) referenced_features.update(res[1]) defined_features = set(defined_features) # inverse mapping of referenced_features to dict(feature: [files]) inv_map = dict() for _file, features in referenced_features.iteritems(): for feature in features: inv_map[feature] = inv_map.get(feature, set()) inv_map[feature].add(_file) referenced_features = inv_map undefined = {} # {feature: [files]} for feature in sorted(referenced_features): # filter some false positives if feature == "FOO" or feature == "BAR" or \ feature == "FOO_BAR" or feature == "XXX": continue if feature not in defined_features: if feature.endswith("_MODULE"): # avoid false positives for kernel modules if feature[:-len("_MODULE")] in defined_features: continue undefined[feature] = referenced_features.get(feature) return undefined, defined_features def parse_source_files(source_files): """Parse each source file in @source_files and return dictionary with source files as keys and lists of references Kconfig symbols as values.""" referenced_features = dict() for sfile in source_files: referenced_features[sfile] = parse_source_file(sfile) return referenced_features def parse_source_file(sfile): """Parse @sfile and return a list of referenced Kconfig features.""" lines = [] references = [] if not os.path.exists(sfile): return references with open(sfile, "r") as stream: lines = stream.readlines() for line in lines: if not "CONFIG_" in line: continue features = REGEX_SOURCE_FEATURE.findall(line) for feature in features: if not REGEX_FILTER_FEATURES.search(feature): continue references.append(feature) return references def get_features_in_line(line): """Return mentioned Kconfig features in @line.""" return REGEX_FEATURE.findall(line) def parse_kconfig_files(args): """Parse kconfig files and return tuple of defined and references Kconfig symbols. Note, @args is a tuple of a list of files and the @ignore pattern.""" kconfig_files = args[0] ignore = args[1] defined_features = [] referenced_features = dict() for kfile in kconfig_files: defined, references = parse_kconfig_file(kfile) defined_features.extend(defined) if ignore and re.match(ignore, kfile): # do not collect references for files that match the ignore pattern continue referenced_features[kfile] = references return (defined_features, referenced_features) def parse_kconfig_file(kfile): """Parse @kfile and update feature definitions and references.""" lines = [] defined = [] references = [] skip = False if not os.path.exists(kfile): return defined, references with open(kfile, "r") as stream: lines = stream.readlines() for i in range(len(lines)): line = lines[i] line = line.strip('\n') line = line.split("#")[0] # ignore comments if REGEX_KCONFIG_DEF.match(line): feature_def = REGEX_KCONFIG_DEF.findall(line) defined.append(feature_def[0]) skip = False elif REGEX_KCONFIG_HELP.match(line): skip = True elif skip: # ignore content of help messages pass elif REGEX_KCONFIG_STMT.match(line): line = REGEX_QUOTES.sub("", line) features = get_features_in_line(line) # multi-line statements while line.endswith("\\"): i += 1 line = lines[i] line = line.strip('\n') features.extend(get_features_in_line(line)) for feature in set(features): if REGEX_NUMERIC.match(feature): # ignore numeric values continue references.append(feature) return defined, references if __name__ == "__main__": main()
    gpl-2.0
    denisff/python-for-android
    python-build/python-libs/gdata/src/gdata/tlslite/integration/TLSSocketServerMixIn.py
    320
    2203
    """TLS Lite + SocketServer.""" from gdata.tlslite.TLSConnection import TLSConnection class TLSSocketServerMixIn: """ This class can be mixed in with any L{SocketServer.TCPServer} to add TLS support. To use this class, define a new class that inherits from it and some L{SocketServer.TCPServer} (with the mix-in first). Then implement the handshake() method, doing some sort of server handshake on the connection argument. If the handshake method returns True, the RequestHandler will be triggered. Below is a complete example of a threaded HTTPS server:: from SocketServer import * from BaseHTTPServer import * from SimpleHTTPServer import * from tlslite.api import * s = open("./serverX509Cert.pem").read() x509 = X509() x509.parse(s) certChain = X509CertChain([x509]) s = open("./serverX509Key.pem").read() privateKey = parsePEMKey(s, private=True) sessionCache = SessionCache() class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, HTTPServer): def handshake(self, tlsConnection): try: tlsConnection.handshakeServer(certChain=certChain, privateKey=privateKey, sessionCache=sessionCache) tlsConnection.ignoreAbruptClose = True return True except TLSError, error: print "Handshake failure:", str(error) return False httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler) httpd.serve_forever() """ def finish_request(self, sock, client_address): tlsConnection = TLSConnection(sock) if self.handshake(tlsConnection) == True: self.RequestHandlerClass(tlsConnection, client_address, self) tlsConnection.close() #Implement this method to do some form of handshaking. Return True #if the handshake finishes properly and the request is authorized. def handshake(self, tlsConnection): raise NotImplementedError()
    apache-2.0
    JohnGeorgiadis/invenio
    invenio/modules/jsonalchemy/jsonext/readers/json_reader.py
    17
    3532
    # -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Json Reader.""" import re from invenio.modules.jsonalchemy.reader import ModelParser from invenio.modules.jsonalchemy.reader import Reader class JsonReader(Reader): """JSON reader.""" __master_format__ = 'json' @staticmethod def split_blob(blob, schema=None, **kwargs): """ In case of several objs inside the blob this method specify how to split then and work one by one afterwards. """ return blob.splitlines() def _prepare_blob(self): """ """ model_fields = ModelParser.resolve_models( self._json.model_info.names, self._json.additional_info.namespace).get('fields', {}) model_json_ids = list(model_fields.keys()) model_field_names = list(model_fields.values()) for key in list(self._blob.keys()): if key in model_field_names and key not in model_json_ids: _key = model_json_ids[model_field_names.index(key)] self._blob[_key] = self._blob[key] del self._blob[key] def _get_elements_from_blob(self, regex_key): if regex_key in ('entire_record', '*'): return self._blob elements = [] for k in regex_key: regex = re.compile(k) keys = filter(regex.match, self._blob.keys()) values = [] for key in keys: values.append(self._blob.get(key)) elements.extend(values) return elements def _unpack_rule(self, json_id, field_name=None): super(JsonReader, self)._unpack_rule(json_id, field_name) def _apply_virtual_rules(self, json_id, field_name, rule): """JSON if a bit special as you can set the value of this fields""" if json_id in self._blob: field_defs = [] field_defs.append(('calculated', rule['rules'].get('calculated', []))) field_defs.append(('derived', rule['rules'].get('derived', []))) for (field_type, ffield_def) in field_defs: for field_def in ffield_def: info = self._find_field_metadata(json_id, field_name, field_type, field_def) self._json['__meta_metadata__'][field_name] = info self._json.__setitem__(field_name, self._blob[json_id], extend=False, exclude=['decorators', 'extensions']) return else: super(JsonReader, self)._apply_virtual_rules(json_id, field_name, rule) reader = JsonReader
    gpl-2.0
    bollwyvl/nbviewer
    tasks.py
    3
    2576
    #!/usr/bin/env python # -*- coding: utf-8 -*- import os import json import shutil import tempfile import invoke from notebook import DEFAULT_STATIC_FILES_PATH APP_ROOT = os.path.dirname(__file__) NPM_BIN = os.path.join(APP_ROOT, "node_modules", ".bin") @invoke.task def test(): invoke.run("nosetests -v") @invoke.task def bower(): invoke.run( "cd {}/nbviewer/static &&".format(APP_ROOT) + " {}/bower install".format(NPM_BIN) + " --config.interactive=false --allow-root" ) @invoke.task def less(debug=False): if debug: extra = "--source-map" else: extra = " --clean-css='--s1 --advanced --compatibility=ie8'" tmpl = ( "cd {}/nbviewer/static/less ".format(APP_ROOT) + " && {}/lessc".format(NPM_BIN) + " {1} " " --include-path={2}" " --autoprefix='> 1%'" " {0}.less ../build/{0}.css" ) args = (extra, DEFAULT_STATIC_FILES_PATH) [ invoke.run(tmpl.format(less_file, *args)) for less_file in ["styles", "notebook", "slides"] ] @invoke.task def screenshots(root="http://localhost:5000/", dest="./screenshots"): dest = os.path.abspath(dest) script = """ root = "{root}" urls = ({{name, url}} for name, url of {{ home: "" dir: "github/ipython/ipython/tree/3.x/examples/" user: "github/ipython/" gists: "gist/fperez/" notebook: "github/ipython/ipython/blob/3.x/examples/Notebook/Notebook%20Basics.ipynb"}}) screens = ({{name, w, h}} for name, [w, h] of {{ smartphone_portrait: [320, 480] smartphone_landscape: [480, 320] tablet_portrait: [768, 1024] tablet_landscape: [1024, 768] desktop_standard: [1280, 1024] desktop_1080p: [1920, 1080] }}) casper.start root casper.each screens, (_, screen) -> @then -> @viewport screen.w, screen.h, -> _.each urls, (_, page) -> @thenOpen root + page.url, -> @wait 1000 @then -> @echo "#{{page.name}} #{{screen.name}}" @capture "{dest}/#{{page.name}}-#{{screen.name}}.png" casper.run() """.format(root=root, dest=dest) tmpdir = tempfile.mkdtemp() tmpfile = os.path.join(tmpdir, "screenshots.coffee") with open(tmpfile, "w+") as f: f.write(script) invoke.run("casperjs test {script}".format(script=tmpfile)) shutil.rmtree(tmpdir)
    bsd-3-clause
    scattering/ipeek
    server/plot_dcs.py
    1
    3478
    # -*- coding: utf-8 -*- import h5py import simplejson import os import numpy as np #import matplotlib.pyplot as plt from time import time def Elam(lam): """ convert wavelength in angstroms to energy in meV """ return 81.81/lam**2 def Ek(k): """ convert wave-vector in inver angstroms to energy in meV """ return 2.072*k**2 def kE(E): return np.sqrt(E/2.072) def Qfunc(ki, kf, theta): """ evaluate the magnitude of Q from ki, kf, and theta theta is the angle between kf and ki, sometimes called 2 theta, units of degrees """ return np.sqrt( ki**2 + kf**2 - 2*ki*kf*np.cos(theta*np.pi/180) ) def Ef_from_timechannel(timeChannel, t_SD_min, speedRatDenom, masterSpeed): """ using the parameters t_SD_min = minimum sample to detector time speedRatDenom = to set FOL chopper speed masterSpeed = chopper speed (except for FOL chopper) using the variabl timeChannel, where I am numbering from 1 <be careful of this convention> """ return 8.41e7 / (t_SD_min + (timeChannel+1)* (6e4 *(speedRatDenom/masterSpeed)) )**2 def process_raw_dcs(data_path): # t0 = time() os.chdir(data_path) # change working directory detInfo = np.genfromtxt('dcs_detector_info.txt', skip_header=1, skip_footer=17) detToTwoTheta = detInfo[:,9] # 10th column os.system('gzip -dc livedata.dcs.gz > livedata.dcs') #os.system('C:\\Software\\Octave-3.6.4\\bin\\octave --eval "load livedata.dcs; save -hdf5 livedata.hdf;"') os.system('octave --eval "load livedata.dcs; save -hdf5 livedata.hdf;"') f = h5py.File('livedata.hdf') data = f['histodata']['value'].value ch_wl = f['ch_wl']['value'].value Ei = Elam(ch_wl) ki = kE(Ei) dE =0.5*(-0.10395+0.05616 *Ei+0.00108 *Ei**2) #take the putative resolution and halve it masterSpeed = f['ch_ms']['value'].value speedRatDenom = f['ch_srdenom']['value'].value t_SD_min = f['tsdmin']['value'].value Q_max = Qfunc(ki,ki,150) Q_min = 0 E_bins = np.linspace(-Ei, Ei, int(2*Ei/dE) ) Q_bins = np.linspace(Q_min,Q_max,301) #for every point in {timechannel, detectorchannel} space, map into a bin of {E,Q} space #remember, data is organized as data[detectorchannel][timechannel] i,j = np.indices(data.shape) ef = Ef_from_timechannel(j, t_SD_min, speedRatDenom, masterSpeed) Q_ = Qfunc(ki, kE(ef), detToTwoTheta[:, None]) E_transfer = Ei-ef E_mask = (E_transfer > -Ei) EQ_data, xedges, yedges = np.histogram2d(Q_[E_mask], E_transfer[E_mask], bins=(Q_bins, E_bins), range=([Q_min,Q_max], [-Ei, Ei]), weights=data[E_mask]) stop_date = ''.join(chr(a) for a in f['stop_date']['value'].value.flatten()) start_date = ''.join(chr(a) for a in f['start_date']['value'].value.flatten()) output = { "title": "DCS snapshot", "dims": { "ymin": -Ei, "ymax": Ei, "ydim": EQ_data.shape[1], "xmin": 0, "xmax": Q_max, "xdim": EQ_data.shape[0], "zmin": EQ_data.min(), "zmax": EQ_data.max() }, "type": "2d", "ylabel": "Ei-Ef [meV]", "xlabel": "|Q| [Å⁻¹]", "z": [EQ_data.T.tolist()], "options": {}, "metadata": { "stop_date": stop_date, "start_date": start_date } } #print time()-t0 return simplejson.dumps([output])
    unlicense
    autodrive/utils3
    utils3/tests_remote/tests_remote.py
    1
    4137
    import os import unittest from .. import git_util class TestGitUtilRemotes(unittest.TestCase): def test_get_remote_branch_list(self): # function under test result_set = set(git_util.get_remote_branch_list()) # sample file in the test script folder filename = os.path.join(os.path.split(__file__)[0], 'remote_branch_list.txt') pattern_str = 'heads' # get sample file if os.path.exists(filename): with open(filename, 'r') as f: tags_list = [tag_str.strip() for tag_str in f.readlines()] else: # if file missing # make a list from git ls-remote Warning('''file %s might be missing make a list from git ls-remote''' % (filename)) result_txt = git_util.git('ls-remote --%s' % pattern_str) result_line_list = result_txt.splitlines() if result_line_list[0].startswith('From '): result_line_list.pop(0) tags_list = [] with open(filename, 'w') as f_out: # build list of expected tags for line_txt in result_line_list: line_split_list = line_txt.split() # filter remote tags filtered_line_split_list = [txt for txt in line_split_list if txt.startswith('refs/%s/' % pattern_str) and (not txt.endswith('^{}'))] if filtered_line_split_list: for tag_item in filtered_line_split_list: tag_items = tag_item.split('/')[2:] tag_txt = '/'.join(tag_items) f_out.write(tag_txt + '\n') tags_list.append(tag_txt) # finished making a list from git ls-remote expected_set = set(tags_list) self.assertFalse(expected_set - result_set, msg=''' expected set = %r result_set = %r '''%(expected_set, result_set)) def test_get_remote_tag_list(self): result_list = git_util.get_remote_tag_list() result_set = set(result_list) input_file_name = os.path.join(os.path.split(__file__)[0], 'tags_list.txt') if os.path.exists(input_file_name): with open(input_file_name, 'r') as f: tags_list = [tag_str.strip() for tag_str in f.readlines()] else: print('''test_get_remote_tag_list() : file %s might be missing make a list from git ls-remote''' % (input_file_name)) result_txt = git_util.git('ls-remote --tags') result_line_list = result_txt.splitlines() if result_line_list[0].startswith('From '): result_line_list.pop(0) tags_list = [] with open(input_file_name, 'w') as f_out: # build list of expected tags for line_txt in result_line_list: line_split_list = line_txt.split() # filter remote tags filtered_line_split_list = [txt for txt in line_split_list if txt.startswith('refs/tags/') and (not txt.endswith('^{}'))] if filtered_line_split_list: for tag_item in filtered_line_split_list: tag_items = tag_item.split('/')[2:] tag_txt = '/'.join(tag_items) f_out.write(tag_txt + '\n') tags_list.append(tag_txt) # finished making a list from git ls-remote expected_set = set(tags_list) self.assertFalse(expected_set - result_set, msg=''' expected set = %r result_set = %r '''%(expected_set, result_set)) def test_is_branch_in_remote_branch_list(self): self.assertTrue(git_util.is_branch_in_remote_branch_list('master', 'origin', False)) self.assertFalse(git_util.is_branch_in_remote_branch_list('__m_a_s_t_e_r__', 'origin', False)) if __name__ == '__main__': unittest.main()
    apache-2.0
    ravindrasingh22/ansible
    v1/ansible/runner/connection_plugins/local.py
    110
    5581
    # (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import traceback import os import pipes import shutil import subprocess import select import fcntl from ansible import errors from ansible import utils from ansible.callbacks import vvv class Connection(object): ''' Local based connections ''' def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host # port is unused, since this is local self.port = port self.has_pipelining = False # TODO: add su(needs tty), pbrun, pfexec self.become_methods_supported=['sudo'] def connect(self, port=None): ''' connect to the local host; nothing to do here ''' return self def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the local host ''' # su requires to be run from a terminal, and therefore isn't supported here (yet?) if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") if self.runner.become and sudoable: local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe) else: if executable: local_cmd = executable.split() + ['-c', cmd] else: local_cmd = cmd executable = executable.split()[0] if executable else None vvv("EXEC %s" % (local_cmd), host=self.host) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, executable=executable, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if self.runner.become and sudoable and self.runner.become_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) become_output = '' while success_key not in become_output: if prompt and become_output.endswith(prompt): break if utils.su_prompts.check_su_prompt(become_output): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self.runner.timeout) if p.stdout in rfd: chunk = p.stdout.read() elif p.stderr in rfd: chunk = p.stderr.read() else: stdout, stderr = p.communicate() raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output) if not chunk: stdout, stderr = p.communicate() raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output) become_output += chunk if success_key not in become_output: p.stdin.write(self.runner.become_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) if not os.path.exists(in_path): raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) try: shutil.copyfile(in_path, out_path) except shutil.Error: traceback.print_exc() raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) except IOError: traceback.print_exc() raise errors.AnsibleError("failed to transfer file to %s" % out_path) def fetch_file(self, in_path, out_path): vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) ''' fetch a file from local to local -- for copatibility ''' self.put_file(in_path, out_path) def close(self): ''' terminate the connection; nothing to do here ''' pass
    gpl-3.0
    rg3915/django-basic-apps
    basic/music/urls.py
    10
    1276
    from django.conf.urls.defaults import * urlpatterns = patterns('basic.music.views', url(r'^genres/(?P<slug>[-\w]+)/$', view='genre_detail', name='music_genre_detail', ), url (r'^genres/$', view='genre_list', name='music_genre_list', ), url(r'^labels/(?P<slug>[-\w]+)/$', view='label_detail', name='music_label_detail', ), url (r'^labels/$', view='label_list', name='music_label_list', ), url(r'^bands/(?P<slug>[-\w]+)/$', view='band_detail', name='music_band_detail', ), url (r'^bands/$', view='band_list', name='music_band_list', ), url(r'^albums/(?P<slug>[-\w]+)/$', view='album_detail', name='music_album_detail', ), url (r'^albums/$', view='album_list', name='music_album_list', ), url(r'^tracks/(?P<slug>[-\w]+)/$', view='track_detail', name='music_track_detail', ), url (r'^tracks/$', view='track_list', name='music_track_list', ), ) urlpatterns += patterns('', url (r'^$', view='django.views.generic.simple.direct_to_template', kwargs={'template': 'music/index.html'}, name='music_index', ), )
    bsd-3-clause
    pycepa/pycepa
    modules/Tor/cell/cell.py
    1
    11202
    from cryptography import x509 from cryptography.hazmat.backends import default_backend from time import time from datetime import datetime import struct import os import socket import logging log = logging.getLogger(__name__) # default protocol version before negotiation proto_version = 3 class CellError(Exception): """ Generic cell error. """ pass class TorError(Exception): """ Generic tor protocol error. """ pass class FixedCell(object): """ Fixed length cell. """ cell_type = -1 def __init__(self, circuit_id=None): self.fixed = True self.circuit_id = circuit_id or 0 def unpack(self, data): self.data = data def pack(self, data): """ Pack the circuit id, cell type, and data. """ if proto_version < 4: data = struct.pack('>HB509s', self.circuit_id, self.cell_type, data) else: data = struct.pack('>IB509s', self.circuit_id, self.cell_type, data) return data class VariableCell(object): """ Variable lengthed cell. """ cell_type = -1 def __init__(self, circuit_id=None): self.fixed = False self.circuit_id = circuit_id or 0 def has_len(self): """ Returns true if the length header has been parsed. """ return hasattr(self, 'length') def len(self, data=None): """ Get or set the length of the cell. """ if data: self.length = struct.unpack('>H', data[:2])[0] elif self.has_len(): return self.length def unpack(self, data): self.data = data[:self.length] def pack(self, data): """ Pack the circuit id, cell type, length, and data. """ if proto_version < 4: header = struct.pack('>HBH', self.circuit_id, self.cell_type, len(data)) else: header = struct.pack('>IBH', self.circuit_id, self.cell_type, len(data)) return header + data class Relay(FixedCell): """ Relay cell. """ cell_type = 3 def get_str(self, include_digest=True): """ Returns the packed data without sending so that it can be encrypted. """ if isinstance(self.data, str): return self.data if not self.data['data']: self.data['data'] = '' if include_digest: digest = self.data['digest'] else: digest = '\x00' * 4 return struct.pack('>BHH4sH498s', self.data['command'], 0, self.data['stream_id'], digest, len(self.data['data']), self.data['data']) def parse(self): """ Parse a received relay cell after decryption. This currently can't be implemented as a part of the unpack() function because the data must first be decrypted. """ headers = struct.unpack('>BHH4sH', self.data[:11]) self.data = self.data[11:] if len(self.data) < headers[4] or headers[1]: raise CellError('Invalid relay packet (possibly not from this OR).') try: text = relay_commands[headers[0]] except IndexError: raise CellError('Invalid relay packet command.') self.data = { 'command': headers[0], 'command_text': text, 'recognized': headers[1], 'stream_id': headers[2], 'digest': headers[3], 'length': headers[4], 'data': self.data[:headers[4]] } def pack(self, data): return super(Relay, self).pack(self.data) def init_relay(self, data): """ Set the relay cell data. """ self.data = data class Padding(FixedCell): """ Padding cell. """ cell_type = 0 class Destroy(FixedCell): """ Destroy cell. """ cell_type = 4 def unpack(self, data): super(Destroy, self).unpack(data) reason = struct.unpack('>B', self.data[0])[0] reasons = [ 'No reason given.', 'Tor protocol violation.', 'Internal error.', 'A client sent a TRUNCATE command.', 'Not currently operating; trying to save bandwidth.', 'Out of memory, sockets, or circuit IDs.', 'Unable to reach relay.', 'Connected to relay, but its OR identity was not as expected.', 'The OR connection that was carrying this circuit died.', 'The circuit has expired for being dirty or old.' 'Circuit construction took too long.', 'The circuit was destroyed w/o client TRUNCATE.', 'Request for unknown hidden service.' ] raise TorError('Circuit closed: %s' % reasons[reason]) class CreateFast(FixedCell): """ CreateFast cell. """ cell_type = 5 def __init__(self, circuit_id=None): super(CreateFast, self).__init__(circuit_id=circuit_id) self.key_material = os.urandom(20) def pack(self, data): data = struct.pack('>20s', self.key_material) return super(CreateFast, self).pack(data) class CreatedFast(FixedCell): """ CreatedFast cell. """ cell_type = 6 def unpack(self, data): """ Unpack the key material. """ super(CreatedFast, self).unpack(data) self.key_material, self.derivative_key = struct.unpack('>20s20s', self.data[:40]) class Versions(VariableCell): """ Versions cell. """ cell_type = 7 def unpack(self, data): """ Parse the received versions. """ super(Versions, self).unpack(data) self.versions = struct.unpack('>' + 'H' * int(len(self.data) / 2), self.data) def pack(self, data): """ Pack our known versions. """ data = struct.pack('>HH', 3,4) return super(Versions, self).pack(data) class Netinfo(FixedCell): """ Netinfo cell. """ cell_type = 8 def unpack(self, data): """ Parse out netinfo. """ super(Netinfo, self).unpack(data) data = self.data time = struct.unpack('>I', data[:4])[0] data = data[4:] # decode our IP address host_type, address, data = self.decode_ip(data) self.our_address = address self.router_addresses = [] # iterate over OR addresses. num_addresses = data[0] if not isinstance(num_addresses, int): num_addresses = struct.unpack('B', num_addresses)[0] data = data[1:] for _ in range(num_addresses): host_type, address, data = self.decode_ip(data) self.router_addresses.append(address) def decode_ip(self, data): """ Decode IPv4 and IPv6 addresses. """ host_type, size = struct.unpack('>BB', data[:2]) data = data[2:] address = struct.unpack('>%ds' % size, data[:size])[0] data = data[size:] if host_type == 4: address = socket.inet_ntop(socket.AF_INET, address) elif host_type == 6: address = socket.inet_ntop(socket.AF_INET6, address) else: raise CellError('Do we allow hostnames in NETINFO?') return host_type, address, data def pack(self, data): """ Pack our own netinfo. """ ips = data data = struct.pack('>I', int(time())) data += self.encode_ip(ips['other']) data += struct.pack('>B', 1) data += self.encode_ip(ips['me']) return super(Netinfo, self).pack(data) def encode_ip(self, ip): """ Encode an IPv4 address. """ return struct.pack('>BB', 4, 4) + socket.inet_aton(ip) class RelayEarly(Relay): """ RelayEarly cell. """ cell_type = 9 class Create2(FixedCell): """ Create2 cell. """ cell_type = 10 def pack(self, data): data = struct.pack('>HH', 0x2, len(data)) + data return super(Create2, self).pack(data) class Created2(FixedCell): """ Created2 cell. """ cell_type = 11 def unpack(self, data): super(Created2, self).unpack(data) length, self.Y, self.auth = struct.unpack('>H32s32s', data[:66]) class Certs(VariableCell): """ Certs cell. """ cell_type = 129 def unpack(self, data): """ Unpack a certs cell. Parses out all of the send certs and does *very* basic validation. """ super(Certs, self).unpack(data) data = self.data num_certs = data[0] if not isinstance(num_certs, int): num_certs = struct.unpack('>B', num_certs)[0] data = data[1:] now = datetime.now().strftime('%Y%m%d%H%M%S%z') self.certs = {} for _ in range(num_certs): # get cert type and length cert_info = struct.unpack('>BH', data[:3]) data = data[3:] # unpack the cert cert_type = cert_info[0] cert = struct.unpack('>%ds' % cert_info[1], data[:cert_info[1]])[0] data = data[cert_info[1]:] # we only want one of each certificate type if cert_type in self.certs or int(cert_type) > 3: raise CellError('Duplicate or invalid certificate received.') # load the certificate and check expiration. # cert = crypto.load_certificate(crypto.FILETYPE_ASN1, cert) cert = x509.load_der_x509_certificate(cert, default_backend()) now = datetime.now() if cert.not_valid_before > now or cert.not_valid_after < now: log.error('got invalid certificate date.') raise CellError('Certificate expired.') self.certs[cert_type] = cert log.info('got cert type %d, hash: %s' % (cert_type, cert)) class AuthChallenge(VariableCell): """ AuthChallenge cell. """ cell_type = 130 def unpack(self, data): """ Unpack the auth challenge. Currently not doing anything with it. """ super(AuthChallenge, self).unpack(data) struct.unpack('>32sH', self.data[:34]) def cell_type_to_name(cell_type): """ Convert a cell type to its name. """ if cell_type in cell_types: return cell_types[cell_type].__name__ else: return '' def relay_name_to_command(name): """ Converts relay name to a command. """ if name in relay_commands: return relay_commands.index(name) else: return -1 # List of cell types. cell_types = { 0: Padding, 3: Relay, 4: Destroy, 5: CreateFast, 6: CreatedFast, 7: Versions, 8: Netinfo, 9: RelayEarly, 10: Create2, 11: Created2, 129: Certs, 130: AuthChallenge } # List of relay commnads. relay_commands = [ '', 'RELAY_BEGIN', 'RELAY_DATA', 'RELAY_END', 'RELAY_CONNECTED', 'RELAY_SENDME', 'RELAY_EXTEND', 'RELAY_EXTENDED', 'RELAY_TRUNCATE', 'RELAY_TRUNCATED', 'RELAY_DROP', 'RELAY_RESOLVE', 'RELAY_RESOLVED', 'RELAY_BEGIN_DIR', 'RELAY_EXTEND2', 'RELAY_EXTENDED2' ]
    gpl-3.0
    adsorensen/girder
    plugins/oauth/server/providers/globus.py
    3
    3567
    #!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### from six.moves import urllib from girder.api.rest import getApiUrl, RestException from .base import ProviderBase from .. import constants class Globus(ProviderBase): _AUTH_URL = 'https://auth.globus.org/v2/oauth2/authorize' _AUTH_SCOPES = ['urn:globus:auth:scope:auth.globus.org:view_identities', 'openid', 'profile', 'email'] _TOKEN_URL = 'https://auth.globus.org/v2/oauth2/token' _API_USER_URL = 'https://auth.globus.org/v2/oauth2/userinfo' def getClientIdSetting(self): return self.model('setting').get( constants.PluginSettings.GLOBUS_CLIENT_ID) def getClientSecretSetting(self): return self.model('setting').get( constants.PluginSettings.GLOBUS_CLIENT_SECRET) @classmethod def getUrl(cls, state): clientId = cls.model('setting').get( constants.PluginSettings.GLOBUS_CLIENT_ID) if clientId is None: raise Exception('No Globus client ID setting is present.') callbackUrl = '/'.join((getApiUrl(), 'oauth', 'globus', 'callback')) query = urllib.parse.urlencode({ 'response_type': 'code', 'access_type': 'online', 'client_id': clientId, 'redirect_uri': callbackUrl, 'state': state, 'scope': ' '.join(cls._AUTH_SCOPES) }) return '%s?%s' % (cls._AUTH_URL, query) def getToken(self, code): params = { 'grant_type': 'authorization_code', 'code': code, 'client_id': self.clientId, 'client_secret': self.clientSecret, 'redirect_uri': self.redirectUri } resp = self._getJson(method='POST', url=self._TOKEN_URL, data=params, headers={'Accept': 'application/json'}) if 'error' in resp: raise RestException( 'Got an error exchanging token from provider: "%s".' % resp, code=502) return resp def getUser(self, token): headers = { 'Authorization': 'Bearer {}'.format(token['access_token']) } resp = self._getJson(method='GET', url=self._API_USER_URL, headers=headers) oauthId = resp.get('sub') if not oauthId: raise RestException( 'Globus identity did not return a valid ID.', code=502) email = resp.get('email') if not email: raise RestException( 'Globus identity did not return a valid email.', code=502) name = resp['name'].split() firstName = name[0] lastName = name[-1] return self._createOrReuseUser(oauthId, email, firstName, lastName)
    apache-2.0
    ebernhardson/l2r
    code/bench_formats.py
    1
    1186
    import pandas as pd import feather import os import timeit import config from utils import table_utils df = table_utils._read(config.ALL_DATA) FILE_HDF = os.path.join(config.TMP_DIR, 'test.h5') FILE_PICKLE = os.path.join(config.TMP_DIR, 'test.pkl') FILE_FEATHER = os.path.join(config.TMP_DIR, 'test.feather') def test_hdf_write(): df.to_hdf(FILE_HDF, 'test', mode='w') def test_hdf_read(): pd.read_hdf(FILE_HDF, 'test') def test_pickle_write(): df.to_pickle(FILE_PICKLE) def test_pickle_read(): pd.read_pickle(FILE_PICKLE) def test_feather_write(): feather.write_dataframe(df.copy(), FILE_FEATHER) def test_feather_read(): feather.read_dataframe(FILE_FEATHER) def test(func): took = timeit.timeit("%s()" % (func.__name__), setup="from __main__ import %s" % (func.__name__), number=3) print "%s: %.3f" % (func.__name__, took) if __name__ == "__main__": res = [] res.append(test(test_hdf_write)) res.append(test(test_hdf_read)) res.append(test(test_pickle_write)) res.append(test(test_pickle_read)) res.append(test(test_feather_write)) res.append(test(test_feather_read)) print "\n\n\n" print "\n".join(res)
    mit
    manishpatell/erpcustomizationssaiimpex123qwe
    addons/gamification/models/__init__.py
    389
    1038
    # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import goal import challenge import res_users import badge
    agpl-3.0
    mattesno1/Sick-Beard
    lib/requests/packages/charade/jpcntx.py
    151
    19323
    ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .compat import wrap_ord NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis: def __init__(self): self.reset() def reset(self): self._mTotalRel = 0 # total sequence received # category counters, each interger counts sequence in its category self._mRelSample = [0] * NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._mNeedToSkipCharNum = 0 self._mLastCharOrder = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False def feed(self, aBuf, aLen): if self._mDone: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._mNeedToSkipCharNum while i < aLen: order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen self._mLastCharOrder = -1 else: if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW def get_order(self, aBuf): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 elif first_char == 0x8F: charLen = 3 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, charLen return -1, charLen # flake8: noqa
    gpl-3.0
    mdhaber/scipy
    scipy/sparse/linalg/eigen/arpack/setup.py
    18
    1845
    from os.path import join def configuration(parent_package='',top_path=None): from scipy._build_utils.system_info import get_info from numpy.distutils.misc_util import Configuration from scipy._build_utils import (get_g77_abi_wrappers, gfortran_legacy_flag_hook, blas_ilp64_pre_build_hook, uses_blas64, get_f2py_int64_options) if uses_blas64(): lapack_opt = get_info('lapack_ilp64_opt', 2) pre_build_hook = (gfortran_legacy_flag_hook, blas_ilp64_pre_build_hook(lapack_opt)) f2py_options = get_f2py_int64_options() else: lapack_opt = get_info('lapack_opt') pre_build_hook = gfortran_legacy_flag_hook f2py_options = None config = Configuration('arpack', parent_package, top_path) arpack_sources = [join('ARPACK','SRC', '*.f')] arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) arpack_sources += get_g77_abi_wrappers(lapack_opt) config.add_library('arpack_scipy', sources=arpack_sources, include_dirs=[join('ARPACK', 'SRC')], _pre_build_hook=pre_build_hook) ext = config.add_extension('_arpack', sources=['arpack.pyf.src'], libraries=['arpack_scipy'], f2py_options=f2py_options, extra_info=lapack_opt, depends=arpack_sources) ext._pre_build_hook = pre_build_hook config.add_data_dir('tests') # Add license files config.add_data_files('ARPACK/COPYING') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
    bsd-3-clause
    dropbox/changes
    tests/changes/jobs/test_sync_build.py
    1
    3772
    from __future__ import absolute_import from datetime import datetime from mock import patch from changes.constants import Status, Result from changes.config import db from changes.models.build import Build from changes.models.itemstat import ItemStat from changes.jobs.sync_build import sync_build from changes.testutils import TestCase class SyncBuildTest(TestCase): @patch('changes.config.queue.delay') def test_simple(self, queue_delay): project = self.create_project() build = self.create_build( project=project, status=Status.unknown, result=Result.unknown, ) job_a = self.create_job( build=build, status=Status.finished, result=Result.failed, duration=5000, date_started=datetime(2013, 9, 19, 22, 15, 22), date_finished=datetime(2013, 9, 19, 22, 15, 25), ) job_b = self.create_job( build=build, status=Status.in_progress, result=Result.passed, duration=5000, date_started=datetime(2013, 9, 19, 22, 15, 23), date_finished=datetime(2013, 9, 19, 22, 15, 26), ) self.create_task( task_name='sync_job', parent_id=build.id, task_id=job_a.id, status=Status.finished, ) task_b = self.create_task( task_name='sync_job', parent_id=build.id, task_id=job_b.id, status=Status.in_progress, ) db.session.add(ItemStat(item_id=job_a.id, name='tests_missing', value=1)) db.session.add(ItemStat(item_id=job_b.id, name='tests_missing', value=0)) db.session.commit() with patch.object(sync_build, 'allow_absent_from_db', True): sync_build(build_id=build.id.hex, task_id=build.id.hex) build = Build.query.get(build.id) assert build.status == Status.in_progress assert build.result == Result.failed task_b.status = Status.finished db.session.add(task_b) job_b.status = Status.finished db.session.add(job_b) db.session.commit() with patch.object(sync_build, 'allow_absent_from_db', True): sync_build(build_id=build.id.hex, task_id=build.id.hex) build = Build.query.get(build.id) assert build.status == Status.finished assert build.result == Result.failed assert build.duration == 4000 assert build.date_started == datetime(2013, 9, 19, 22, 15, 22) assert build.date_finished == datetime(2013, 9, 19, 22, 15, 26) queue_delay.assert_any_call('update_project_stats', kwargs={ 'project_id': project.id.hex, }, countdown=1) stat = ItemStat.query.filter( ItemStat.item_id == build.id, ItemStat.name == 'tests_missing', ).first() assert stat.value == 1 @patch('changes.jobs.sync_build.datetime') def test_finished_no_jobs(self, sync_build_datetime): project = self.create_project() build = self.create_build( project=project, status=Status.unknown, result=Result.unknown, ) sync_build_datetime.utcnow.return_value = datetime(2013, 9, 19, 22, 15, 22) with patch.object(sync_build, 'allow_absent_from_db', True): sync_build(build_id=build.id.hex, task_id=build.id.hex) build = Build.query.get(build.id) assert build.status == Status.finished assert build.result == Result.unknown assert build.date_finished == sync_build_datetime.utcnow.return_value assert build.date_decided == sync_build_datetime.utcnow.return_value
    apache-2.0
    manishpatell/erpcustomizationssaiimpex123qwe
    addons/purchase/__init__.py
    439
    1185
    # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import purchase import partner import stock import wizard import report import stock import company import edi import res_config # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
    agpl-3.0
    signed/intellij-community
    python/lib/Lib/site-packages/django/template/smartif.py
    331
    6261
    """ Parser and utilities for the smart 'if' tag """ import operator # Using a simple top down parser, as described here: # http://effbot.org/zone/simple-top-down-parsing.htm. # 'led' = left denotation # 'nud' = null denotation # 'bp' = binding power (left = lbp, right = rbp) class TokenBase(object): """ Base class for operators and literals, mainly for debugging and for throwing syntax errors. """ id = None # node/token type name value = None # used by literals first = second = None # used by tree nodes def nud(self, parser): # Null denotation - called in prefix context raise parser.error_class( "Not expecting '%s' in this position in if tag." % self.id ) def led(self, left, parser): # Left denotation - called in infix context raise parser.error_class( "Not expecting '%s' as infix operator in if tag." % self.id ) def display(self): """ Returns what to display in error messages for this node """ return self.id def __repr__(self): out = [str(x) for x in [self.id, self.first, self.second] if x is not None] return "(" + " ".join(out) + ")" def infix(bp, func): """ Creates an infix operator, given a binding power and a function that evaluates the node """ class Operator(TokenBase): lbp = bp def led(self, left, parser): self.first = left self.second = parser.expression(bp) return self def eval(self, context): try: return func(context, self.first, self.second) except Exception: # Templates shouldn't throw exceptions when rendering. We are # most likely to get exceptions for things like {% if foo in bar # %} where 'bar' does not support 'in', so default to False return False return Operator def prefix(bp, func): """ Creates a prefix operator, given a binding power and a function that evaluates the node. """ class Operator(TokenBase): lbp = bp def nud(self, parser): self.first = parser.expression(bp) self.second = None return self def eval(self, context): try: return func(context, self.first) except Exception: return False return Operator # Operator precedence follows Python. # NB - we can get slightly more accurate syntax error messages by not using the # same object for '==' and '='. # We defer variable evaluation to the lambda to ensure that terms are # lazily evaluated using Python's boolean parsing logic. OPERATORS = { 'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)), 'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)), 'not': prefix(8, lambda context, x: not x.eval(context)), 'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)), 'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)), '=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)), '>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)), '>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)), '<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)), '<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)), } # Assign 'id' to each: for key, op in OPERATORS.items(): op.id = key class Literal(TokenBase): """ A basic self-resolvable object similar to a Django template variable. """ # IfParser uses Literal in create_var, but TemplateIfParser overrides # create_var so that a proper implementation that actually resolves # variables, filters etc is used. id = "literal" lbp = 0 def __init__(self, value): self.value = value def display(self): return repr(self.value) def nud(self, parser): return self def eval(self, context): return self.value def __repr__(self): return "(%s %r)" % (self.id, self.value) class EndToken(TokenBase): lbp = 0 def nud(self, parser): raise parser.error_class("Unexpected end of expression in if tag.") EndToken = EndToken() class IfParser(object): error_class = ValueError def __init__(self, tokens): # pre-pass necessary to turn 'not','in' into single token l = len(tokens) mapped_tokens = [] i = 0 while i < l: token = tokens[i] if token == "not" and i + 1 < l and tokens[i+1] == "in": token = "not in" i += 1 # skip 'in' mapped_tokens.append(self.translate_token(token)) i += 1 self.tokens = mapped_tokens self.pos = 0 self.current_token = self.next() def translate_token(self, token): try: op = OPERATORS[token] except (KeyError, TypeError): return self.create_var(token) else: return op() def next(self): if self.pos >= len(self.tokens): return EndToken else: retval = self.tokens[self.pos] self.pos += 1 return retval def parse(self): retval = self.expression() # Check that we have exhausted all the tokens if self.current_token is not EndToken: raise self.error_class("Unused '%s' at end of if expression." % self.current_token.display()) return retval def expression(self, rbp=0): t = self.current_token self.current_token = self.next() left = t.nud(self) while rbp < self.current_token.lbp: t = self.current_token self.current_token = self.next() left = t.led(left, self) return left def create_var(self, value): return Literal(value)
    apache-2.0
    sorki/rosdep
    test/test_rosdep_osx.py
    6
    7167
    # -*- coding: utf-8 -*- # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Author Ken Conley/[email protected] import os import traceback from mock import call from mock import Mock from mock import patch def get_test_dir(): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'osx')) def is_port_installed_tripwire(): # don't know the correct answer, but make sure this does not throw from rosdep2.platforms.osx import is_port_installed assert is_port_installed() in [True, False] def is_brew_installed_tripwire(): # don't know the correct answer, but make sure this does not throw from rosdep2.platforms.osx import is_brew_installed assert is_brew_installed() in [True, False] def make_resolutions(package_list): from rosdep2.platforms.osx import HomebrewResolution return list(map(lambda pkg: HomebrewResolution(pkg, [], []), package_list)) def make_resolutions_options(package_list): from rosdep2.platforms.osx import HomebrewResolution return list(map(lambda pkg: HomebrewResolution(pkg[0], pkg[1], pkg[2]), package_list)) def brew_command(command): if command[1] == "list": with open(os.path.join(get_test_dir(), 'brew-list-output'), 'r') as f: return f.read() elif command[1] == "info": pkg = command[2] with open(os.path.join(get_test_dir(), 'brew-info-output'), 'r') as f: output = f.readlines() for line in output: res = line.split(":", 1) if res[0] == pkg: return res[1] return '' def test_brew_detect(): from rosdep2.platforms.osx import brew_detect m = Mock() m.return_value = '' val = brew_detect([], exec_fn=m) assert val == [], val m = Mock() m.return_value = '' val = brew_detect(make_resolutions(['tinyxml']), exec_fn=m) assert val == [], val # make sure our test harness is based on the same implementation m.assert_called_with(['brew', 'list']) assert m.call_args_list == [call(['brew', 'list'])], m.call_args_list m = Mock() m.side_effect = brew_command val = brew_detect(make_resolutions(['apt', 'subversion', 'python', 'bazaar']), exec_fn=m) # make sure it preserves order expected = make_resolutions(['subversion', 'bazaar']) assert set(val) == set(expected), val assert val == expected, val assert len(val) == len(set(val)), val def test_HomebrewInstaller(): from rosdep2.platforms.osx import HomebrewInstaller @patch('rosdep2.platforms.osx.is_brew_installed') @patch.object(HomebrewInstaller, 'remove_duplicate_dependencies') @patch.object(HomebrewInstaller, 'get_packages_to_install') def test(mock_get_packages_to_install, mock_remove_duplicate_dependencies, mock_brew_installed): mock_brew_installed.return_value = True installer = HomebrewInstaller() mock_get_packages_to_install.return_value = [] mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value assert [] == installer.get_install_command(make_resolutions(['fake'])) mock_get_packages_to_install.return_value = make_resolutions(['subversion', 'bazaar']) mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value expected = [['brew', 'install', 'subversion'], ['brew', 'install', 'bazaar']] # brew is always non-interactive for interactive in [True, False]: val = installer.get_install_command(['whatever'], interactive=interactive) assert val == expected, val expected = [['brew', 'uninstall', '--force', 'subversion'], ['brew', 'install', 'subversion'], ['brew', 'uninstall', '--force', 'bazaar'], ['brew', 'install', 'bazaar']] val = installer.get_install_command(['whatever'], reinstall=True) assert val == expected, val mock_get_packages_to_install.return_value = make_resolutions_options( [('subversion', ['foo', 'bar'], ['baz']), ('bazaar', [], ['--with-quux'])]) mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value expected = [['brew', 'install', 'subversion', 'foo', 'bar', 'baz'], ['brew', 'install', 'bazaar', '--with-quux']] val = installer.get_install_command(['whatever']) assert val == expected, val try: mock_get_packages_to_install.return_value = eval("make_resolutions_options([('subversion', [u'f´´ßß', u'öäö'], []), (u'bazaar', [], [u'tüü'])])") except SyntaxError: # Python 3.2, u'...' is not allowed, but string literals are unicode mock_get_packages_to_install.return_value = make_resolutions_options( [('subversion', ['f´´ßß', 'öäö'], []), ('bazaar', [], ["tüü"])]) mock_remove_duplicate_dependencies.return_value = mock_get_packages_to_install.return_value try: expected = eval("[['brew', 'install', 'subversion', u'f´´ßß', u'öäö'], ['brew', 'install', 'bazaar', u'tüü']]") except SyntaxError: # Python 3.2, u'...' is not allowed, but string literals are unicode expected = [['brew', 'install', 'subversion', 'f´´ßß', 'öäö'], ['brew', 'install', 'bazaar', "tüü"]] val = installer.get_install_command(['whatever']) assert val == expected, val try: test() except AssertionError: traceback.print_exc() raise
    bsd-3-clause
    lancezlin/pyjs
    pygtkweb/library/browser.py
    6
    4255
    from __pyjamas__ import JS, doc, wnd, get_main_frame listeners = {} def mash_attrib(name, joiner='-'): return name def get_listener(item): pass def set_listener(item, listener): pass def round_val(val, digits): return JS('@{{val}}.toFixed(@{{digits}});') class Element: def __init__(self, tag=None, element=None): if tag is not None: JS(''' this.element = $doc.createElement(@{{tag}}); ''') elif element is not None: self.element = element else: raise Exception("Cannot create Element without tag or element") self.element.__ref = self; self.activeEvents = [] def append(self, element): JS(''' this.element.appendChild(@{{element}}.element); ''') def prepend(self, element): JS(''' this.element.insertBefore(@{{element}}.element, @{{self}}.element.firstChild); ''') def getX(self): JS(''' var obj = this.element; var curleft = 0; if (obj.offsetParent) { curleft = obj.offsetLeft while (obj = obj.offsetParent) { curleft += obj.offsetLeft } } return curleft; ''') def getY(self): JS(''' var obj = this.element; var curtop = 0; if (obj.offsetParent) { curtop = obj.offsetTop while (obj = obj.offsetParent) { curtop += obj.offsetTop } } return curtop; ''') def getWidth(self): JS(''' return this.element.offsetWidth; ''') def getHeight(self): JS(''' return this.element.offsetHeight; ''') def setWidth(self, width): self.setStyle('width',width) def setHeight(self, height): self.setStyle('height',height) def setStyle(self, property, value): JS(''' this.element.style[@{{property}}] = @{{value}}; ''') def setPxStyle(self, property, value): self.setStyle(property, "%dpx" % value) def setPercentStyle(self, property, value): self.setStyle(property, "%d%%" % value) def getStyle(self, property): JS(''' return this.element.style[@{{property}}]; ''') def setProperty(self, property, value): JS(''' //this.element.setAttribute(@{{property}},@{{value}}); this.element[@{{property}}] = @{{value}}; ''') def getProperty(self, property): JS(''' //return this.element.getAttribute(@{{property}}); return this.element[@{{property}}]; ''') def setHTML(self, content): JS(''' this.element.innerHTML = @{{content}}; ''') def getHTML(self): JS(''' return this.element.innerHTML; ''') def on_browser_event(self, view, e, ignore): pass def catchEvents(self, name, object): JS(''' var tmp = function(e) { var targ; if (!e) var e = $wnd.event; if (e.target) targ = e.target; else if (e.srcElement) targ = e.srcElement; if (targ.nodeType == 3) targ = targ.parentNode; if (targ.__ref) @{{object}}.dom_event(e, targ.__ref); else @{{object}}.dom_event(e, null); }; ''') name = name[0] self.activeEvents.append((name, object)) JS(''' var old_callback = this.element['on'+@{{name}}]; this.element['on'+@{{name}}] = function(e){if(old_callback){old_callback(e);}@{{!tmp}}(e);}; ''') class Document: window = Element(element= JS('$wnd')) document = Element(element= JS('$doc')) body = Element(element= JS('$doc.body')) @staticmethod def createElement(tag): return Element(tag) @staticmethod def append(element): JS(''' $doc.body.appendChild(@{{element}}.element); ''') @staticmethod def setContent(message): JS(''' $doc.body.innerHTML = @{{message}}; ''') @staticmethod def setTitle(title): JS(''' $doc.title = @{{title}}; ''')
    apache-2.0
    xen0l/ansible
    lib/ansible/modules/cloud/google/gcp_compute_route_facts.py
    12
    7916
    #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_route_facts description: - Gather facts for GCP Route short_description: Gather facts for GCP Route version_added: 2.7 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: filters: description: A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). Each additional filter in the list will act be added as an AND condition (filter1 and filter2) extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: a route facts gcp_compute_route_facts: filters: - name = test_object project: test_project auth_kind: service_account service_account_file: "/tmp/auth.pem" ''' RETURN = ''' items: description: List of items returned: always type: complex contains: dest_range: description: - The destination range of outgoing packets that this route applies to. - Only IPv4 is supported. returned: success type: str description: description: - An optional description of this resource. Provide this property when you create the resource. returned: success type: str name: description: - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. returned: success type: str network: description: - The network that this route applies to. returned: success type: dict priority: description: - The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. - In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. - Default value is 1000. Valid range is 0 through 65535. returned: success type: int tags: description: - A list of instance tags to which this route applies. returned: success type: list next_hop_gateway: description: - URL to a gateway that should handle matching packets. - 'Currently, you can only specify the internet gateway, using a full or partial valid URL: * U(https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway) * projects/project/global/gateways/default-internet-gateway * global/gateways/default-internet-gateway .' returned: success type: str next_hop_instance: description: - URL to an instance that should handle matching packets. - 'You can specify this as a full or partial URL. For example: * U(https://www.googleapis.com/compute/v1/projects/project/zones/zone/) instances/instance * projects/project/zones/zone/instances/instance * zones/zone/instances/instance .' returned: success type: str next_hop_ip: description: - Network IP address of an instance that should handle matching packets. returned: success type: str next_hop_vpn_tunnel: description: - URL to a VpnTunnel that should handle matching packets. returned: success type: str next_hop_network: description: - URL to a Network that should handle matching packets. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule( argument_spec=dict( filters=dict(type='list', elements='str'), ) ) if 'scopes' not in module.params: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] items = fetch_list(module, collection(module), query_options(module.params['filters'])) if items.get('items'): items = items.get('items') else: items = [] return_value = { 'items': items } module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/global/routes".format(**module.params) def fetch_list(module, link, query): auth = GcpSession(module, 'compute') response = auth.get(link, params={'filter': query}) return return_if_object(module, response) def query_options(filters): if not filters: return '' if len(filters) == 1: return filters[0] else: queries = [] for f in filters: # For multiple queries, all queries should have () if f[0] != '(' and f[-1] != ')': queries.append("(%s)" % ''.join(f)) else: queries.append(f) return ' '.join(queries) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
    gpl-3.0
    gudcjfdldu/volatility
    volatility/plugins/linux/dentry_cache.py
    57
    2513
    # Volatility # Copyright (C) 2007-2013 Volatility Foundation # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: Andrew Case @license: GNU General Public License 2.0 @contact: [email protected] @organization: """ import volatility.plugins.linux.common as linux_common from volatility.plugins.linux.slab_info import linux_slabinfo class linux_dentry_cache(linux_common.AbstractLinuxCommand): """Gather files from the dentry cache""" def __init__(self, config, *args, **kwargs): linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs) self._config.add_option('UNALLOCATED', short_option = 'u', default = False, help = 'Show unallocated', action = 'store_true') def make_body(self, dentry): """Create a pipe-delimited bodyfile from a dentry structure. MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime """ path = dentry.get_partial_path() or "" i = dentry.d_inode if i: ret = [0, path, i.i_ino, 0, i.i_uid, i.i_gid, i.i_size, i.i_atime, i.i_mtime, 0, i.i_ctime] else: ret = [0, path] + [0] * 8 ret = "|".join([str(val) for val in ret]) return ret def calculate(self): linux_common.set_plugin_members(self) cache = linux_slabinfo(self._config).get_kmem_cache("dentry", self._config.UNALLOCATED) # support for old kernels if cache == []: cache = linux_slabinfo(self._config).get_kmem_cache("dentry_cache", self._config.UNALLOCATED, struct_name = "dentry") for dentry in cache: yield self.make_body(dentry) def render_text(self, outfd, data): for bodyline in data: outfd.write(bodyline + "\n")
    gpl-2.0
    scripnichenko/nova
    nova/tests/unit/compute/test_resources.py
    57
    11446
    # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute extra resources framework.""" from oslo_config import cfg from stevedore import extension from stevedore import named from nova.compute import resources from nova.compute.resources import base from nova.compute.resources import vcpu from nova import context from nova.objects import flavor as flavor_obj from nova import test from nova.tests.unit import fake_instance CONF = cfg.CONF class FakeResourceHandler(resources.ResourceHandler): def __init__(self, extensions): self._mgr = \ named.NamedExtensionManager.make_test_instance(extensions) class FakeResource(base.Resource): def __init__(self): self.total_res = 0 self.used_res = 0 def _get_requested(self, usage): if 'extra_specs' not in usage: return if self.resource_name not in usage['extra_specs']: return req = usage['extra_specs'][self.resource_name] return int(req) def _get_limit(self, limits): if self.resource_name not in limits: return limit = limits[self.resource_name] return int(limit) def reset(self, resources, driver): self.total_res = 0 self.used_res = 0 def test(self, usage, limits): requested = self._get_requested(usage) if not requested: return limit = self._get_limit(limits) if not limit: return free = limit - self.used_res if requested <= free: return else: return ('Free %(free)d < requested %(requested)d ' % {'free': free, 'requested': requested}) def add_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res += requested def remove_instance(self, usage): requested = self._get_requested(usage) if requested: self.used_res -= requested def write(self, resources): pass def report_free(self): return "Free %s" % (self.total_res - self.used_res) class ResourceA(FakeResource): def reset(self, resources, driver): # ResourceA uses a configuration option self.total_res = int(CONF.resA) self.used_res = 0 self.resource_name = 'resource:resA' def write(self, resources): resources['resA'] = self.total_res resources['used_resA'] = self.used_res class ResourceB(FakeResource): def reset(self, resources, driver): # ResourceB uses resource details passed in parameter resources self.total_res = resources['resB'] self.used_res = 0 self.resource_name = 'resource:resB' def write(self, resources): resources['resB'] = self.total_res resources['used_resB'] = self.used_res def fake_flavor_obj(**updates): flavor = flavor_obj.Flavor() flavor.id = 1 flavor.name = 'fakeflavor' flavor.memory_mb = 8000 flavor.vcpus = 3 flavor.root_gb = 11 flavor.ephemeral_gb = 4 flavor.swap = 0 flavor.rxtx_factor = 1.0 flavor.vcpu_weight = 1 if updates: flavor.update(updates) return flavor class BaseTestCase(test.NoDBTestCase): def _initialize_used_res_counter(self): # Initialize the value for the used resource for ext in self.r_handler._mgr.extensions: ext.obj.used_res = 0 def setUp(self): super(BaseTestCase, self).setUp() # initialize flavors and stub get_by_id to # get flavors from here self._flavors = {} self.ctxt = context.get_admin_context() # Create a flavor without extra_specs defined _flavor_id = 1 _flavor = fake_flavor_obj(id=_flavor_id) self._flavors[_flavor_id] = _flavor # Create a flavor with extra_specs defined _flavor_id = 2 requested_resA = 5 requested_resB = 7 requested_resC = 7 _extra_specs = {'resource:resA': requested_resA, 'resource:resB': requested_resB, 'resource:resC': requested_resC} _flavor = fake_flavor_obj(id=_flavor_id, extra_specs=_extra_specs) self._flavors[_flavor_id] = _flavor # create fake resource extensions and resource handler _extensions = [ extension.Extension('resA', None, ResourceA, ResourceA()), extension.Extension('resB', None, ResourceB, ResourceB()), ] self.r_handler = FakeResourceHandler(_extensions) # Resources details can be passed to each plugin or can be specified as # configuration options driver_resources = {'resB': 5} CONF.resA = '10' # initialise the resources self.r_handler.reset_resources(driver_resources, None) def test_update_from_instance_with_extra_specs(self): # Flavor with extra_specs _flavor_id = 2 sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] self.assertEqual(int(expected_resA), self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(int(expected_resB), self.r_handler._mgr['resB'].obj.used_res) def test_update_from_instance_without_extra_specs(self): # Flavor id without extra spec _flavor_id = 1 self._initialize_used_res_counter() self.r_handler.resource_list = [] sign = 1 self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) def test_write_resources(self): self._initialize_used_res_counter() extra_resources = {} expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} self.r_handler.write_resources(extra_resources) self.assertEqual(expected, extra_resources) def test_test_resources_without_extra_specs(self): limits = {} # Flavor id without extra_specs flavor = self._flavors[1] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_test_resources_with_limits_for_different_resource(self): limits = {'resource:resC': 20} # Flavor id with extra_specs flavor = self._flavors[2] result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_passing_test_resources(self): limits = {'resource:resA': 10, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) self.assertEqual([None, None], result) def test_failing_test_resources_for_single_resource(self): limits = {'resource:resA': 4, 'resource:resB': 20} # Flavor id with extra_specs flavor = self._flavors[2] self._initialize_used_res_counter() result = self.r_handler.test_resources(flavor, limits) expected = ['Free 4 < requested 5 ', None] self.assertEqual(sorted(expected), sorted(result)) def test_empty_resource_handler(self): """An empty resource handler has no resource extensions, should have no effect, and should raise no exceptions. """ empty_r_handler = FakeResourceHandler([]) resources = {} empty_r_handler.reset_resources(resources, None) flavor = self._flavors[1] sign = 1 empty_r_handler.update_from_instance(flavor, sign) limits = {} test_result = empty_r_handler.test_resources(flavor, limits) self.assertEqual([], test_result) sign = -1 empty_r_handler.update_from_instance(flavor, sign) extra_resources = {} expected_extra_resources = extra_resources empty_r_handler.write_resources(extra_resources) self.assertEqual(expected_extra_resources, extra_resources) empty_r_handler.report_free_resources() def test_vcpu_resource_load(self): # load the vcpu example names = ['vcpu'] real_r_handler = resources.ResourceHandler(names) ext_names = real_r_handler._mgr.names() self.assertEqual(names, ext_names) # check the extension loaded is the one we expect # and an instance of the object has been created ext = real_r_handler._mgr['vcpu'] self.assertIsInstance(ext.obj, vcpu.VCPU) class TestVCPU(test.NoDBTestCase): def setUp(self): super(TestVCPU, self).setUp() self._vcpu = vcpu.VCPU() self._vcpu._total = 10 self._vcpu._used = 0 self._flavor = fake_flavor_obj(vcpus=5) self._big_flavor = fake_flavor_obj(vcpus=20) self._instance = fake_instance.fake_instance_obj(None) def test_reset(self): # set vcpu values to something different to test reset self._vcpu._total = 10 self._vcpu._used = 5 driver_resources = {'vcpus': 20} self._vcpu.reset(driver_resources, None) self.assertEqual(20, self._vcpu._total) self.assertEqual(0, self._vcpu._used) def test_add_and_remove_instance(self): self._vcpu.add_instance(self._flavor) self.assertEqual(10, self._vcpu._total) self.assertEqual(5, self._vcpu._used) self._vcpu.remove_instance(self._flavor) self.assertEqual(10, self._vcpu._total) self.assertEqual(0, self._vcpu._used) def test_test_pass_limited(self): result = self._vcpu.test(self._flavor, {'vcpu': 10}) self.assertIsNone(result, 'vcpu test failed when it should pass') def test_test_pass_unlimited(self): result = self._vcpu.test(self._big_flavor, {}) self.assertIsNone(result, 'vcpu test failed when it should pass') def test_test_fail(self): result = self._vcpu.test(self._flavor, {'vcpu': 2}) expected = 'Free CPUs 2.00 VCPUs < requested 5 VCPUs' self.assertEqual(expected, result) def test_write(self): resources = {'stats': {}} self._vcpu.write(resources) expected = { 'vcpus': 10, 'vcpus_used': 0, 'stats': { 'num_vcpus': 10, 'num_vcpus_used': 0 } } self.assertEqual(sorted(expected), sorted(resources))
    apache-2.0
    PierreRust/beets
    extra/release.py
    24
    8554
    #!/usr/bin/env python3 """A utility script for automating the beets release process. """ import click import os import re import subprocess from contextlib import contextmanager import datetime BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst') @contextmanager def chdir(d): """A context manager that temporary changes the working directory. """ olddir = os.getcwd() os.chdir(d) yield os.chdir(olddir) @click.group() def release(): pass # Locations (filenames and patterns) of the version number. VERSION_LOCS = [ ( os.path.join(BASE, 'beets', '__init__.py'), [ ( r'__version__\s*=\s*[\'"]([0-9\.]+)[\'"]', "__version__ = '{version}'", ) ] ), ( os.path.join(BASE, 'docs', 'conf.py'), [ ( r'version\s*=\s*[\'"]([0-9\.]+)[\'"]', "version = '{minor}'", ), ( r'release\s*=\s*[\'"]([0-9\.]+)[\'"]', "release = '{version}'", ), ] ), ( os.path.join(BASE, 'setup.py'), [ ( r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]', " version='{version}',", ) ] ), ] def bump_version(version): """Update the version number in setup.py, docs config, changelog, and root module. """ version_parts = [int(p) for p in version.split('.')] assert len(version_parts) == 3, "invalid version number" minor = '{}.{}'.format(*version_parts) major = '{}'.format(*version_parts) # Replace the version each place where it lives. for filename, locations in VERSION_LOCS: # Read and transform the file. out_lines = [] with open(filename) as f: found = False for line in f: for pattern, template in locations: match = re.match(pattern, line) if match: # Check that this version is actually newer. old_version = match.group(1) old_parts = [int(p) for p in old_version.split('.')] assert version_parts > old_parts, \ "version must be newer than {}".format( old_version ) # Insert the new version. out_lines.append(template.format( version=version, major=major, minor=minor, ) + '\n') found = True break else: # Normal line. out_lines.append(line) if not found: print("No pattern found in {}".format(filename)) # Write the file back. with open(filename, 'w') as f: f.write(''.join(out_lines)) # Generate bits to insert into changelog. header_line = '{} (in development)'.format(version) header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n' header += 'Changelog goes here!\n' # Insert into the right place. with open(CHANGELOG) as f: contents = f.read() location = contents.find('\n\n') # First blank line. contents = contents[:location] + header + contents[location:] # Write back. with open(CHANGELOG, 'w') as f: f.write(contents) @release.command() @click.argument('version') def bump(version): """Bump the version number. """ bump_version(version) def get_latest_changelog(): """Extract the first section of the changelog. """ started = False lines = [] with open(CHANGELOG) as f: for line in f: if re.match(r'^--+$', line.strip()): # Section boundary. Start or end. if started: # Remove last line, which is the header of the next # section. del lines[-1] break else: started = True elif started: lines.append(line) return ''.join(lines).strip() def rst2md(text): """Use Pandoc to convert text from ReST to Markdown. """ pandoc = subprocess.Popen( ['pandoc', '--from=rst', '--to=markdown', '--no-wrap'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, _ = pandoc.communicate(text.encode('utf8')) md = stdout.decode('utf8').strip() # Fix up odd spacing in lists. return re.sub(r'^- ', '- ', md, flags=re.M) def changelog_as_markdown(): """Get the latest changelog entry as hacked up Markdown. """ rst = get_latest_changelog() # Replace plugin links with plugin names. rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst) # References with text. rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst) # Other backslashes with verbatim ranges. rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst) # Command links with command names. rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst) # Bug numbers. rst = re.sub(r':bug:`(\d+)`', r'#\1', rst) # Users. rst = re.sub(r':user:`(\w+)`', r'@\1', rst) # Convert with Pandoc. md = rst2md(rst) # Restore escaped issue numbers. md = re.sub(r'\\#(\d+)\b', r'#\1', md) return md @release.command() def changelog(): """Get the most recent version's changelog as Markdown. """ print(changelog_as_markdown()) def get_version(index=0): """Read the current version from the changelog. """ with open(CHANGELOG) as f: cur_index = 0 for line in f: match = re.search(r'^\d+\.\d+\.\d+', line) if match: if cur_index == index: return match.group(0) else: cur_index += 1 @release.command() def version(): """Display the current version. """ print(get_version()) @release.command() def datestamp(): """Enter today's date as the release date in the changelog. """ dt = datetime.datetime.now() stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year) marker = '(in development)' lines = [] underline_length = None with open(CHANGELOG) as f: for line in f: if marker in line: # The header line. line = line.replace(marker, stamp) lines.append(line) underline_length = len(line.strip()) elif underline_length: # This is the line after the header. Rewrite the dashes. lines.append('-' * underline_length + '\n') underline_length = None else: lines.append(line) with open(CHANGELOG, 'w') as f: for line in lines: f.write(line) @release.command() def prep(): """Run all steps to prepare a release. - Tag the commit. - Build the sdist package. - Generate the Markdown changelog to ``changelog.md``. - Bump the version number to the next version. """ cur_version = get_version() # Tag. subprocess.check_output(['git', 'tag', 'v{}'.format(cur_version)]) # Build. with chdir(BASE): subprocess.check_call(['python2', 'setup.py', 'sdist']) # Generate Markdown changelog. cl = changelog_as_markdown() with open(os.path.join(BASE, 'changelog.md'), 'w') as f: f.write(cl) # Version number bump. # FIXME It should be possible to specify this as an argument. version_parts = [int(n) for n in cur_version.split('.')] version_parts[-1] += 1 next_version = u'.'.join(map(str, version_parts)) bump_version(next_version) @release.command() def publish(): """Unleash a release unto the world. - Push the tag to GitHub. - Upload to PyPI. """ version = get_version(1) # Push to GitHub. with chdir(BASE): subprocess.check_call(['git', 'push']) subprocess.check_call(['git', 'push', '--tags']) # Upload to PyPI. path = os.path.join(BASE, 'dist', 'beets-{}.tar.gz'.format(version)) subprocess.check_call(['twine', 'upload', path]) if __name__ == '__main__': release()
    mit
    romankagan/DDBWorkbench
    python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/predicates.py
    623
    1777
    """ This module houses the GEOS ctypes prototype functions for the unary and binary predicate operations on geometries. """ from ctypes import c_char, c_char_p, c_double from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.prototypes.errcheck import check_predicate from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc ## Binary & unary predicate functions ## def binary_predicate(func, *args): "For GEOS binary predicate functions." argtypes = [GEOM_PTR, GEOM_PTR] if args: argtypes += args func.argtypes = argtypes func.restype = c_char func.errcheck = check_predicate return func def unary_predicate(func): "For GEOS unary predicate functions." func.argtypes = [GEOM_PTR] func.restype = c_char func.errcheck = check_predicate return func ## Unary Predicates ## geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ')) geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty')) geos_isring = unary_predicate(GEOSFunc('GEOSisRing')) geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple')) geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid')) ## Binary Predicates ## geos_contains = binary_predicate(GEOSFunc('GEOSContains')) geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses')) geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint')) geos_equals = binary_predicate(GEOSFunc('GEOSEquals')) geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double) geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects')) geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps')) geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p) geos_touches = binary_predicate(GEOSFunc('GEOSTouches')) geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
    apache-2.0
    RomainBrault/scikit-learn
    sklearn/externals/joblib/numpy_pickle_compat.py
    78
    8439
    """Numpy pickle compatibility functions.""" import pickle import os import zlib from io import BytesIO from ._compat import PY3_OR_LATER from .numpy_pickle_utils import _ZFILE_PREFIX from .numpy_pickle_utils import Unpickler def hex_str(an_int): """Convert an int to an hexadecimal string.""" return '{:#x}'.format(an_int) if PY3_OR_LATER: def asbytes(s): if isinstance(s, bytes): return s return s.encode('latin1') else: asbytes = str _MAX_LEN = len(hex_str(2 ** 64)) _CHUNK_SIZE = 64 * 1024 def read_zfile(file_handle): """Read the z-file and return the content as a string. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guaranteed. Do not use for external purposes. """ file_handle.seek(0) header_length = len(_ZFILE_PREFIX) + _MAX_LEN length = file_handle.read(header_length) length = length[len(_ZFILE_PREFIX):] length = int(length, 16) # With python2 and joblib version <= 0.8.4 compressed pickle header is one # character wider so we need to ignore an additional space if present. # Note: the first byte of the zlib data is guaranteed not to be a # space according to # https://tools.ietf.org/html/rfc6713#section-2.1 next_byte = file_handle.read(1) if next_byte != b' ': # The zlib compressed data has started and we need to go back # one byte file_handle.seek(header_length) # We use the known length of the data to tell Zlib the size of the # buffer to allocate. data = zlib.decompress(file_handle.read(), 15, length) assert len(data) == length, ( "Incorrect data length while decompressing %s." "The file could be corrupted." % file_handle) return data def write_zfile(file_handle, data, compress=1): """Write the data in the given file as a Z-file. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guarantied. Do not use for external purposes. """ file_handle.write(_ZFILE_PREFIX) length = hex_str(len(data)) # Store the length of the data file_handle.write(asbytes(length.ljust(_MAX_LEN))) file_handle.write(zlib.compress(asbytes(data), compress)) ############################################################################### # Utility objects for persistence. class NDArrayWrapper(object): """An object to be persisted instead of numpy arrays. The only thing this object does, is to carry the filename in which the array has been persisted, and the array subclass. """ def __init__(self, filename, subclass, allow_mmap=True): """Constructor. Store the useful information for later.""" self.filename = filename self.subclass = subclass self.allow_mmap = allow_mmap def read(self, unpickler): """Reconstruct the array.""" filename = os.path.join(unpickler._dirname, self.filename) # Load the array from the disk # use getattr instead of self.allow_mmap to ensure backward compat # with NDArrayWrapper instances pickled with joblib < 0.9.0 allow_mmap = getattr(self, 'allow_mmap', True) memmap_kwargs = ({} if not allow_mmap else {'mmap_mode': unpickler.mmap_mode}) array = unpickler.np.load(filename, **memmap_kwargs) # Reconstruct subclasses. This does not work with old # versions of numpy if (hasattr(array, '__array_prepare__') and self.subclass not in (unpickler.np.ndarray, unpickler.np.memmap)): # We need to reconstruct another subclass new_array = unpickler.np.core.multiarray._reconstruct( self.subclass, (0,), 'b') return new_array.__array_prepare__(array) else: return array class ZNDArrayWrapper(NDArrayWrapper): """An object to be persisted instead of numpy arrays. This object store the Zfile filename in which the data array has been persisted, and the meta information to retrieve it. The reason that we store the raw buffer data of the array and the meta information, rather than array representation routine (tostring) is that it enables us to use completely the strided model to avoid memory copies (a and a.T store as fast). In addition saving the heavy information separately can avoid creating large temporary buffers when unpickling data with large arrays. """ def __init__(self, filename, init_args, state): """Constructor. Store the useful information for later.""" self.filename = filename self.state = state self.init_args = init_args def read(self, unpickler): """Reconstruct the array from the meta-information and the z-file.""" # Here we a simply reproducing the unpickling mechanism for numpy # arrays filename = os.path.join(unpickler._dirname, self.filename) array = unpickler.np.core.multiarray._reconstruct(*self.init_args) with open(filename, 'rb') as f: data = read_zfile(f) state = self.state + (data,) array.__setstate__(state) return array class ZipNumpyUnpickler(Unpickler): """A subclass of the Unpickler to unpickle our numpy pickles.""" dispatch = Unpickler.dispatch.copy() def __init__(self, filename, file_handle, mmap_mode=None): """Constructor.""" self._filename = os.path.basename(filename) self._dirname = os.path.dirname(filename) self.mmap_mode = mmap_mode self.file_handle = self._open_pickle(file_handle) Unpickler.__init__(self, self.file_handle) try: import numpy as np except ImportError: np = None self.np = np def _open_pickle(self, file_handle): return BytesIO(read_zfile(file_handle)) def load_build(self): """Set the state of a newly created object. We capture it to replace our place-holder objects, NDArrayWrapper, by the array we are interested in. We replace them directly in the stack of pickler. """ Unpickler.load_build(self) if isinstance(self.stack[-1], NDArrayWrapper): if self.np is None: raise ImportError("Trying to unpickle an ndarray, " "but numpy didn't import correctly") nd_array_wrapper = self.stack.pop() array = nd_array_wrapper.read(self) self.stack.append(array) # Be careful to register our new method. if PY3_OR_LATER: dispatch[pickle.BUILD[0]] = load_build else: dispatch[pickle.BUILD] = load_build def load_compatibility(filename): """Reconstruct a Python object from a file persisted with joblib.dump. This function ensures the compatibility with joblib old persistence format (<= 0.9.3). Parameters ----------- filename: string The name of the file from which to load the object Returns ------- result: any Python object The object stored in the file. See Also -------- joblib.dump : function to save an object Notes ----- This function can load numpy array files saved separately during the dump. """ with open(filename, 'rb') as file_handle: # We are careful to open the file handle early and keep it open to # avoid race-conditions on renames. That said, if data is stored in # companion files, moving the directory will create a race when # joblib tries to access the companion files. unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle) try: obj = unpickler.load() except UnicodeDecodeError as exc: # More user-friendly error message if PY3_OR_LATER: new_exc = ValueError( 'You may be trying to read with ' 'python 3 a joblib pickle generated with python 2. ' 'This feature is not supported by joblib.') new_exc.__cause__ = exc raise new_exc finally: if hasattr(unpickler, 'file_handle'): unpickler.file_handle.close() return obj
    bsd-3-clause
    patrickstocklin/chattR
    lib/python2.7/site-packages/nltk/parse/nonprojectivedependencyparser.py
    8
    29287
    # Natural Language Toolkit: Dependency Grammars # # Copyright (C) 2001-2015 NLTK Project # Author: Jason Narad <[email protected]> # # URL: <http://nltk.org/> # For license information, see LICENSE.TXT # from __future__ import print_function import math import logging from nltk.compat import xrange from nltk.parse.dependencygraph import DependencyGraph logger = logging.getLogger(__name__) ################################################################# # DependencyScorerI - Interface for Graph-Edge Weight Calculation ################################################################# class DependencyScorerI(object): """ A scorer for calculated the weights on the edges of a weighted dependency graph. This is used by a ``ProbabilisticNonprojectiveParser`` to initialize the edge weights of a ``DependencyGraph``. While typically this would be done by training a binary classifier, any class that can return a multidimensional list representation of the edge weights can implement this interface. As such, it has no necessary fields. """ def __init__(self): if self.__class__ == DependencyScorerI: raise TypeError('DependencyScorerI is an abstract interface') def train(self, graphs): """ :type graphs: list(DependencyGraph) :param graphs: A list of dependency graphs to train the scorer. Typically the edges present in the graphs can be used as positive training examples, and the edges not present as negative examples. """ raise NotImplementedError() def score(self, graph): """ :type graph: DependencyGraph :param graph: A dependency graph whose set of edges need to be scored. :rtype: A three-dimensional list of numbers. :return: The score is returned in a multidimensional(3) list, such that the outer-dimension refers to the head, and the inner-dimension refers to the dependencies. For instance, scores[0][1] would reference the list of scores corresponding to arcs from node 0 to node 1. The node's 'address' field can be used to determine its number identification. For further illustration, a score list corresponding to Fig.2 of Keith Hall's 'K-best Spanning Tree Parsing' paper: scores = [[[], [5], [1], [1]], [[], [], [11], [4]], [[], [10], [], [5]], [[], [8], [8], []]] When used in conjunction with a MaxEntClassifier, each score would correspond to the confidence of a particular edge being classified with the positive training examples. """ raise NotImplementedError() ################################################################# # NaiveBayesDependencyScorer ################################################################# class NaiveBayesDependencyScorer(DependencyScorerI): """ A dependency scorer built around a MaxEnt classifier. In this particular class that classifier is a ``NaiveBayesClassifier``. It uses head-word, head-tag, child-word, and child-tag features for classification. >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2 >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry] >>> npp = ProbabilisticNonprojectiveParser() >>> npp.train(graphs, NaiveBayesDependencyScorer()) >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']) >>> len(list(parses)) 1 """ def __init__(self): pass # Do nothing without throwing error def train(self, graphs): """ Trains a ``NaiveBayesClassifier`` using the edges present in graphs list as positive examples, the edges not present as negative examples. Uses a feature vector of head-word, head-tag, child-word, and child-tag. :type graphs: list(DependencyGraph) :param graphs: A list of dependency graphs to train the scorer. """ from nltk.classify import NaiveBayesClassifier # Create training labeled training examples labeled_examples = [] for graph in graphs: for head_node in graph.nodes.values(): for child_index, child_node in graph.nodes.items(): if child_index in head_node['deps']: label = "T" else: label = "F" labeled_examples.append( ( dict( a=head_node['word'], b=head_node['tag'], c=child_node['word'], d=child_node['tag'], ), label, ) ) self.classifier = NaiveBayesClassifier.train(labeled_examples) def score(self, graph): """ Converts the graph into a feature-based representation of each edge, and then assigns a score to each based on the confidence of the classifier in assigning it to the positive label. Scores are returned in a multidimensional list. :type graph: DependencyGraph :param graph: A dependency graph to score. :rtype: 3 dimensional list :return: Edge scores for the graph parameter. """ # Convert graph to feature representation edges = [] for head_node in graph.nodes.values(): for child_node in graph.nodes.values(): edges.append( ( dict( a=head_node['word'], b=head_node['tag'], c=child_node['word'], d=child_node['tag'], ) ) ) # Score edges edge_scores = [] row = [] count = 0 for pdist in self.classifier.prob_classify_many(edges): logger.debug('%.4f %.4f', pdist.prob('T'), pdist.prob('F')) # smoothing in case the probability = 0 row.append([math.log(pdist.prob("T")+0.00000000001)]) count += 1 if count == len(graph.nodes): edge_scores.append(row) row = [] count = 0 return edge_scores ################################################################# # A Scorer for Demo Purposes ################################################################# # A short class necessary to show parsing example from paper class DemoScorer(DependencyScorerI): def train(self, graphs): print('Training...') def score(self, graph): # scores for Keith Hall 'K-best Spanning Tree Parsing' paper return [[[], [5], [1], [1]], [[], [], [11], [4]], [[], [10], [], [5]], [[], [8], [8], []]] ################################################################# # Non-Projective Probabilistic Parsing ################################################################# class ProbabilisticNonprojectiveParser(object): """A probabilistic non-projective dependency parser. Nonprojective dependencies allows for "crossing branches" in the parse tree which is necessary for representing particular linguistic phenomena, or even typical parses in some languages. This parser follows the MST parsing algorithm, outlined in McDonald(2005), which likens the search for the best non-projective parse to finding the maximum spanning tree in a weighted directed graph. >>> class Scorer(DependencyScorerI): ... def train(self, graphs): ... pass ... ... def score(self, graph): ... return [ ... [[], [5], [1], [1]], ... [[], [], [11], [4]], ... [[], [10], [], [5]], ... [[], [8], [8], []], ... ] >>> npp = ProbabilisticNonprojectiveParser() >>> npp.train([], Scorer()) >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None]) >>> len(list(parses)) 1 Rule based example ------------------ >>> from nltk.grammar import DependencyGrammar >>> grammar = DependencyGrammar.fromstring(''' ... 'taught' -> 'play' | 'man' ... 'man' -> 'the' | 'in' ... 'in' -> 'corner' ... 'corner' -> 'the' ... 'play' -> 'golf' | 'dachshund' | 'to' ... 'dachshund' -> 'his' ... ''') >>> ndp = NonprojectiveDependencyParser(grammar) >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) >>> len(list(parses)) 4 """ def __init__(self): """ Creates a new non-projective parser. """ logging.debug('initializing prob. nonprojective...') def train(self, graphs, dependency_scorer): """ Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects, and establishes this as the parser's scorer. This is used to initialize the scores on a ``DependencyGraph`` during the parsing procedure. :type graphs: list(DependencyGraph) :param graphs: A list of dependency graphs to train the scorer. :type dependency_scorer: DependencyScorerI :param dependency_scorer: A scorer which implements the ``DependencyScorerI`` interface. """ self._scorer = dependency_scorer self._scorer.train(graphs) def initialize_edge_scores(self, graph): """ Assigns a score to every edge in the ``DependencyGraph`` graph. These scores are generated via the parser's scorer which was assigned during the training process. :type graph: DependencyGraph :param graph: A dependency graph to assign scores to. """ self.scores = self._scorer.score(graph) def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): """ Takes a list of nodes that have been identified to belong to a cycle, and collapses them into on larger node. The arcs of all nodes in the graph must be updated to account for this. :type new_node: Node. :param new_node: A Node (Dictionary) to collapse the cycle nodes into. :type cycle_path: A list of integers. :param cycle_path: A list of node addresses, each of which is in the cycle. :type g_graph, b_graph, c_graph: DependencyGraph :param g_graph, b_graph, c_graph: Graphs which need to be updated. """ logger.debug('Collapsing nodes...') # Collapse all cycle nodes into v_n+1 in G_Graph for cycle_node_index in cycle_path: g_graph.remove_by_address(cycle_node_index) g_graph.add_node(new_node) g_graph.redirect_arcs(cycle_path, new_node['address']) def update_edge_scores(self, new_node, cycle_path): """ Updates the edge scores to reflect a collapse operation into new_node. :type new_node: A Node. :param new_node: The node which cycle nodes are collapsed into. :type cycle_path: A list of integers. :param cycle_path: A list of node addresses that belong to the cycle. """ logger.debug('cycle %s', cycle_path) cycle_path = self.compute_original_indexes(cycle_path) logger.debug('old cycle %s', cycle_path) logger.debug('Prior to update: %s', self.scores) for i, row in enumerate(self.scores): for j, column in enumerate(self.scores[i]): logger.debug(self.scores[i][j]) if ( j in cycle_path and i not in cycle_path and self.scores[i][j] ): subtract_val = self.compute_max_subtract_score(j, cycle_path) logger.debug('%s - %s', self.scores[i][j], subtract_val) new_vals = [] for cur_val in self.scores[i][j]: new_vals.append(cur_val - subtract_val) self.scores[i][j] = new_vals for i, row in enumerate(self.scores): for j, cell in enumerate(self.scores[i]): if i in cycle_path and j in cycle_path: self.scores[i][j] = [] logger.debug('After update: %s', self.scores) def compute_original_indexes(self, new_indexes): """ As nodes are collapsed into others, they are replaced by the new node in the graph, but it's still necessary to keep track of what these original nodes were. This takes a list of node addresses and replaces any collapsed node addresses with their original addresses. :type new_indexes: A list of integers. :param new_indexes: A list of node addresses to check for subsumed nodes. """ swapped = True while swapped: originals = [] swapped = False for new_index in new_indexes: if new_index in self.inner_nodes: for old_val in self.inner_nodes[new_index]: if old_val not in originals: originals.append(old_val) swapped = True else: originals.append(new_index) new_indexes = originals return new_indexes def compute_max_subtract_score(self, column_index, cycle_indexes): """ When updating scores the score of the highest-weighted incoming arc is subtracted upon collapse. This returns the correct amount to subtract from that edge. :type column_index: integer. :param column_index: A index representing the column of incoming arcs to a particular node being updated :type cycle_indexes: A list of integers. :param cycle_indexes: Only arcs from cycle nodes are considered. This is a list of such nodes addresses. """ max_score = -100000 for row_index in cycle_indexes: for subtract_val in self.scores[row_index][column_index]: if subtract_val > max_score: max_score = subtract_val return max_score def best_incoming_arc(self, node_index): """ Returns the source of the best incoming arc to the node with address: node_index :type node_index: integer. :param node_index: The address of the 'destination' node, the node that is arced to. """ originals = self.compute_original_indexes([node_index]) logger.debug('originals: %s', originals) max_arc = None max_score = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): # print self.scores[row_index][col_index] if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score): max_score = self.scores[row_index][col_index] max_arc = row_index logger.debug('%s, %s', row_index, col_index) logger.debug(max_score) for key in self.inner_nodes: replaced_nodes = self.inner_nodes[key] if max_arc in replaced_nodes: return key return max_arc def original_best_arc(self, node_index): originals = self.compute_original_indexes([node_index]) max_arc = None max_score = None max_orig = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score): max_score = self.scores[row_index][col_index] max_arc = row_index max_orig = col_index return [max_arc, max_orig] def parse(self, tokens, tags): """ Parses a list of tokens in accordance to the MST parsing algorithm for non-projective dependency parses. Assumes that the tokens to be parsed have already been tagged and those tags are provided. Various scoring methods can be used by implementing the ``DependencyScorerI`` interface and passing it to the training algorithm. :type tokens: list(str) :param tokens: A list of words or punctuation to be parsed. :type tags: list(str) :param tags: A list of tags corresponding by index to the words in the tokens list. :return: An iterator of non-projective parses. :rtype: iter(DependencyGraph) """ self.inner_nodes = {} # Initialize g_graph g_graph = DependencyGraph() for index, token in enumerate(tokens): g_graph.nodes[index + 1].update( { 'word': token, 'tag': tags[index], 'rel': 'NTOP', 'address': index + 1, } ) #print (g_graph.nodes) # Fully connect non-root nodes in g_graph g_graph.connect_graph() original_graph = DependencyGraph() for index, token in enumerate(tokens): original_graph.nodes[index + 1].update( { 'word': token, 'tag': tags[index], 'rel': 'NTOP', 'address': index+1, } ) b_graph = DependencyGraph() c_graph = DependencyGraph() for index, token in enumerate(tokens): c_graph.nodes[index + 1].update( { 'word': token, 'tag': tags[index], 'rel': 'NTOP', 'address': index + 1, } ) # Assign initial scores to g_graph edges self.initialize_edge_scores(g_graph) logger.debug(self.scores) # Initialize a list of unvisited vertices (by node address) unvisited_vertices = [ vertex['address'] for vertex in c_graph.nodes.values() ] # Iterate over unvisited vertices nr_vertices = len(tokens) betas = {} while unvisited_vertices: # Mark current node as visited current_vertex = unvisited_vertices.pop(0) logger.debug('current_vertex: %s', current_vertex) # Get corresponding node n_i to vertex v_i current_node = g_graph.get_by_address(current_vertex) logger.debug('current_node: %s', current_node) # Get best in-edge node b for current node best_in_edge = self.best_incoming_arc(current_vertex) betas[current_vertex] = self.original_best_arc(current_vertex) logger.debug('best in arc: %s --> %s', best_in_edge, current_vertex) # b_graph = Union(b_graph, b) for new_vertex in [current_vertex, best_in_edge]: b_graph.nodes[new_vertex].update( { 'word': 'TEMP', 'rel': 'NTOP', 'address': new_vertex, } ) b_graph.add_arc(best_in_edge, current_vertex) # Beta(current node) = b - stored for parse recovery # If b_graph contains a cycle, collapse it cycle_path = b_graph.contains_cycle() if cycle_path: # Create a new node v_n+1 with address = len(nodes) + 1 new_node = { 'word': 'NONE', 'rel': 'NTOP', 'address': nr_vertices + 1, } # c_graph = Union(c_graph, v_n+1) c_graph.add_node(new_node) # Collapse all nodes in cycle C into v_n+1 self.update_edge_scores(new_node, cycle_path) self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) for cycle_index in cycle_path: c_graph.add_arc(new_node['address'], cycle_index) # self.replaced_by[cycle_index] = new_node['address'] self.inner_nodes[new_node['address']] = cycle_path # Add v_n+1 to list of unvisited vertices unvisited_vertices.insert(0, nr_vertices + 1) # increment # of nodes counter nr_vertices += 1 # Remove cycle nodes from b_graph; B = B - cycle c for cycle_node_address in cycle_path: b_graph.remove_by_address(cycle_node_address) logger.debug('g_graph: %s', g_graph) logger.debug('b_graph: %s', b_graph) logger.debug('c_graph: %s', c_graph) logger.debug('Betas: %s', betas) logger.debug('replaced nodes %s', self.inner_nodes) # Recover parse tree logger.debug('Final scores: %s', self.scores) logger.debug('Recovering parse...') for i in range(len(tokens) + 1, nr_vertices + 1): betas[betas[i][1]] = betas[i] logger.debug('Betas: %s', betas) for node in original_graph.nodes.values(): # TODO: It's dangerous to assume that deps it a dictionary # because it's a default dictionary. Ideally, here we should not # be concerned how dependencies are stored inside of a dependency # graph. node['deps'] = {} for i in range(1, len(tokens) + 1): original_graph.add_arc(betas[i][0], betas[i][1]) logger.debug('Done.') yield original_graph ################################################################# # Rule-based Non-Projective Parser ################################################################# class NonprojectiveDependencyParser(object): """ A non-projective, rule-based, dependency parser. This parser will return the set of all possible non-projective parses based on the word-to-word relations defined in the parser's dependency grammar, and will allow the branches of the parse tree to cross in order to capture a variety of linguistic phenomena that a projective parser will not. """ def __init__(self, dependency_grammar): """ Creates a new ``NonprojectiveDependencyParser``. :param dependency_grammar: a grammar of word-to-word relations. :type dependency_grammar: DependencyGrammar """ self._grammar = dependency_grammar def parse(self, tokens): """ Parses the input tokens with respect to the parser's grammar. Parsing is accomplished by representing the search-space of possible parses as a fully-connected directed graph. Arcs that would lead to ungrammatical parses are removed and a lattice is constructed of length n, where n is the number of input tokens, to represent all possible grammatical traversals. All possible paths through the lattice are then enumerated to produce the set of non-projective parses. param tokens: A list of tokens to parse. type tokens: list(str) return: An iterator of non-projective parses. rtype: iter(DependencyGraph) """ # Create graph representation of tokens self._graph = DependencyGraph() for index, token in enumerate(tokens): self._graph.nodes[index] = { 'word': token, 'deps': [], 'rel': 'NTOP', 'address': index, } for head_node in self._graph.nodes.values(): deps = [] for dep_node in self._graph.nodes.values() : if ( self._grammar.contains(head_node['word'], dep_node['word']) and head_node['word'] != dep_node['word'] ): deps.append(dep_node['address']) head_node['deps'] = deps # Create lattice of possible heads roots = [] possible_heads = [] for i, word in enumerate(tokens): heads = [] for j, head in enumerate(tokens): if (i != j) and self._grammar.contains(head, word): heads.append(j) if len(heads) == 0: roots.append(i) possible_heads.append(heads) # Set roots to attempt if len(roots) < 2: if len(roots) == 0: for i in range(len(tokens)): roots.append(i) # Traverse lattice analyses = [] for root in roots: stack = [] analysis = [[] for i in range(len(possible_heads))] i = 0 forward = True while i >= 0: if forward: if len(possible_heads[i]) == 1: analysis[i] = possible_heads[i][0] elif len(possible_heads[i]) == 0: analysis[i] = -1 else: head = possible_heads[i].pop() analysis[i] = head stack.append([i, head]) if not forward: index_on_stack = False for stack_item in stack: if stack_item[0] == i: index_on_stack = True orig_length = len(possible_heads[i]) if index_on_stack and orig_length == 0: for j in xrange(len(stack) - 1, -1, -1): stack_item = stack[j] if stack_item[0] == i: possible_heads[i].append(stack.pop(j)[1]) elif index_on_stack and orig_length > 0: head = possible_heads[i].pop() analysis[i] = head stack.append([i, head]) forward = True if i + 1 == len(possible_heads): analyses.append(analysis[:]) forward = False if forward: i += 1 else: i -= 1 # Filter parses # ensure 1 root, every thing has 1 head for analysis in analyses: if analysis.count(-1) > 1: # there are several root elements! continue graph = DependencyGraph() graph.root = graph.nodes[analysis.index(-1) + 1] for address, (token, head_index) in enumerate(zip(tokens, analysis), start=1): head_address = head_index + 1 node = graph.nodes[address] node.update( { 'word': token, 'address': address, } ) if head_address == 0: rel = 'ROOT' else: rel = '' graph.nodes[head_index + 1]['deps'][rel].append(address) # TODO: check for cycles yield graph ################################################################# # Demos ################################################################# def demo(): # hall_demo() nonprojective_conll_parse_demo() rule_based_demo() def hall_demo(): npp = ProbabilisticNonprojectiveParser() npp.train([], DemoScorer()) for parse_graph in npp.parse(['v1', 'v2', 'v3'], [None, None, None]): print(parse_graph) def nonprojective_conll_parse_demo(): from nltk.parse.dependencygraph import conll_data2 graphs = [ DependencyGraph(entry) for entry in conll_data2.split('\n\n') if entry ] npp = ProbabilisticNonprojectiveParser() npp.train(graphs, NaiveBayesDependencyScorer()) for parse_graph in npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']): print(parse_graph) def rule_based_demo(): from nltk.grammar import DependencyGrammar grammar = DependencyGrammar.fromstring(""" 'taught' -> 'play' | 'man' 'man' -> 'the' | 'in' 'in' -> 'corner' 'corner' -> 'the' 'play' -> 'golf' | 'dachshund' | 'to' 'dachshund' -> 'his' """) print(grammar) ndp = NonprojectiveDependencyParser(grammar) graphs = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) print('Graphs:') for graph in graphs: print(graph) if __name__ == '__main__': demo()
    gpl-2.0